mlx5_main.c revision 359545
1/*- 2 * Copyright (c) 2013-2019, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_main.c 359545 2020-04-01 22:48:59Z kib $ 26 */ 27 28#include <linux/kmod.h> 29#include <linux/module.h> 30#include <linux/errno.h> 31#include <linux/pci.h> 32#include <linux/dma-mapping.h> 33#include <linux/slab.h> 34#include <linux/io-mapping.h> 35#include <linux/interrupt.h> 36#include <linux/hardirq.h> 37#include <dev/mlx5/driver.h> 38#include <dev/mlx5/cq.h> 39#include <dev/mlx5/qp.h> 40#include <dev/mlx5/srq.h> 41#include <dev/mlx5/mpfs.h> 42#include <dev/mlx5/vport.h> 43#include <linux/delay.h> 44#include <dev/mlx5/mlx5_ifc.h> 45#include <dev/mlx5/mlx5_fpga/core.h> 46#include <dev/mlx5/mlx5_lib/mlx5.h> 47#include "mlx5_core.h" 48#include "eswitch.h" 49#include "fs_core.h" 50#ifdef PCI_IOV 51#include <sys/nv.h> 52#include <dev/pci/pci_iov.h> 53#include <sys/iov_schema.h> 54#endif 55 56static const char mlx5_version[] = "Mellanox Core driver " 57 DRIVER_VERSION " (" DRIVER_RELDATE ")"; 58MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 59MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); 60MODULE_LICENSE("Dual BSD/GPL"); 61MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1); 62MODULE_DEPEND(mlx5, mlxfw, 1, 1, 1); 63MODULE_DEPEND(mlx5, firmware, 1, 1, 1); 64MODULE_VERSION(mlx5, 1); 65 66SYSCTL_NODE(_hw, OID_AUTO, mlx5, CTLFLAG_RW, 0, "mlx5 hardware controls"); 67 68int mlx5_core_debug_mask; 69SYSCTL_INT(_hw_mlx5, OID_AUTO, debug_mask, CTLFLAG_RWTUN, 70 &mlx5_core_debug_mask, 0, 71 "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); 72 73#define MLX5_DEFAULT_PROF 2 74static int mlx5_prof_sel = MLX5_DEFAULT_PROF; 75SYSCTL_INT(_hw_mlx5, OID_AUTO, prof_sel, CTLFLAG_RWTUN, 76 &mlx5_prof_sel, 0, 77 "profile selector. Valid range 0 - 2"); 78 79static int mlx5_fast_unload_enabled = 1; 80SYSCTL_INT(_hw_mlx5, OID_AUTO, fast_unload_enabled, CTLFLAG_RWTUN, 81 &mlx5_fast_unload_enabled, 0, 82 "Set to enable fast unload. Clear to disable."); 83 84#define NUMA_NO_NODE -1 85 86static LIST_HEAD(intf_list); 87static LIST_HEAD(dev_list); 88static DEFINE_MUTEX(intf_mutex); 89 90struct mlx5_device_context { 91 struct list_head list; 92 struct mlx5_interface *intf; 93 void *context; 94}; 95 96enum { 97 MLX5_ATOMIC_REQ_MODE_BE = 0x0, 98 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1, 99}; 100 101static struct mlx5_profile profiles[] = { 102 [0] = { 103 .mask = 0, 104 }, 105 [1] = { 106 .mask = MLX5_PROF_MASK_QP_SIZE, 107 .log_max_qp = 12, 108 }, 109 [2] = { 110 .mask = MLX5_PROF_MASK_QP_SIZE | 111 MLX5_PROF_MASK_MR_CACHE, 112 .log_max_qp = 17, 113 .mr_cache[0] = { 114 .size = 500, 115 .limit = 250 116 }, 117 .mr_cache[1] = { 118 .size = 500, 119 .limit = 250 120 }, 121 .mr_cache[2] = { 122 .size = 500, 123 .limit = 250 124 }, 125 .mr_cache[3] = { 126 .size = 500, 127 .limit = 250 128 }, 129 .mr_cache[4] = { 130 .size = 500, 131 .limit = 250 132 }, 133 .mr_cache[5] = { 134 .size = 500, 135 .limit = 250 136 }, 137 .mr_cache[6] = { 138 .size = 500, 139 .limit = 250 140 }, 141 .mr_cache[7] = { 142 .size = 500, 143 .limit = 250 144 }, 145 .mr_cache[8] = { 146 .size = 500, 147 .limit = 250 148 }, 149 .mr_cache[9] = { 150 .size = 500, 151 .limit = 250 152 }, 153 .mr_cache[10] = { 154 .size = 500, 155 .limit = 250 156 }, 157 .mr_cache[11] = { 158 .size = 500, 159 .limit = 250 160 }, 161 .mr_cache[12] = { 162 .size = 64, 163 .limit = 32 164 }, 165 .mr_cache[13] = { 166 .size = 32, 167 .limit = 16 168 }, 169 .mr_cache[14] = { 170 .size = 16, 171 .limit = 8 172 }, 173 }, 174 [3] = { 175 .mask = MLX5_PROF_MASK_QP_SIZE, 176 .log_max_qp = 17, 177 }, 178}; 179 180#ifdef PCI_IOV 181static const char iov_mac_addr_name[] = "mac-addr"; 182#endif 183 184static int set_dma_caps(struct pci_dev *pdev) 185{ 186 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 187 int err; 188 189 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 190 if (err) { 191 mlx5_core_warn(dev, "couldn't set 64-bit PCI DMA mask\n"); 192 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 193 if (err) { 194 mlx5_core_err(dev, "Can't set PCI DMA mask, aborting\n"); 195 return err; 196 } 197 } 198 199 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 200 if (err) { 201 mlx5_core_warn(dev, "couldn't set 64-bit consistent PCI DMA mask\n"); 202 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 203 if (err) { 204 mlx5_core_err(dev, "Can't set consistent PCI DMA mask, aborting\n"); 205 return err; 206 } 207 } 208 209 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); 210 return err; 211} 212 213int mlx5_pci_read_power_status(struct mlx5_core_dev *dev, 214 u16 *p_power, u8 *p_status) 215{ 216 u32 in[MLX5_ST_SZ_DW(mpein_reg)] = {}; 217 u32 out[MLX5_ST_SZ_DW(mpein_reg)] = {}; 218 int err; 219 220 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), 221 MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MPEIN, 0, 0); 222 223 *p_status = MLX5_GET(mpein_reg, out, pwr_status); 224 *p_power = MLX5_GET(mpein_reg, out, pci_power); 225 return err; 226} 227 228static int mlx5_pci_enable_device(struct mlx5_core_dev *dev) 229{ 230 struct pci_dev *pdev = dev->pdev; 231 int err = 0; 232 233 mutex_lock(&dev->pci_status_mutex); 234 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) { 235 err = pci_enable_device(pdev); 236 if (!err) 237 dev->pci_status = MLX5_PCI_STATUS_ENABLED; 238 } 239 mutex_unlock(&dev->pci_status_mutex); 240 241 return err; 242} 243 244static void mlx5_pci_disable_device(struct mlx5_core_dev *dev) 245{ 246 struct pci_dev *pdev = dev->pdev; 247 248 mutex_lock(&dev->pci_status_mutex); 249 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) { 250 pci_disable_device(pdev); 251 dev->pci_status = MLX5_PCI_STATUS_DISABLED; 252 } 253 mutex_unlock(&dev->pci_status_mutex); 254} 255 256static int request_bar(struct pci_dev *pdev) 257{ 258 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 259 int err = 0; 260 261 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 262 mlx5_core_err(dev, "Missing registers BAR, aborting\n"); 263 return -ENODEV; 264 } 265 266 err = pci_request_regions(pdev, DRIVER_NAME); 267 if (err) 268 mlx5_core_err(dev, "Couldn't get PCI resources, aborting\n"); 269 270 return err; 271} 272 273static void release_bar(struct pci_dev *pdev) 274{ 275 pci_release_regions(pdev); 276} 277 278static int mlx5_enable_msix(struct mlx5_core_dev *dev) 279{ 280 struct mlx5_priv *priv = &dev->priv; 281 struct mlx5_eq_table *table = &priv->eq_table; 282 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); 283 int limit = dev->msix_eqvec; 284 int nvec = MLX5_EQ_VEC_COMP_BASE; 285 int i; 286 287 if (limit > 0) 288 nvec += limit; 289 else 290 nvec += MLX5_CAP_GEN(dev, num_ports) * num_online_cpus(); 291 292 if (nvec > num_eqs) 293 nvec = num_eqs; 294 if (nvec > 256) 295 nvec = 256; /* limit of firmware API */ 296 if (nvec <= MLX5_EQ_VEC_COMP_BASE) 297 return -ENOMEM; 298 299 priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL); 300 301 for (i = 0; i < nvec; i++) 302 priv->msix_arr[i].entry = i; 303 304 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr, 305 MLX5_EQ_VEC_COMP_BASE + 1, nvec); 306 if (nvec < 0) 307 return nvec; 308 309 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; 310 return 0; 311} 312 313static void mlx5_disable_msix(struct mlx5_core_dev *dev) 314{ 315 struct mlx5_priv *priv = &dev->priv; 316 317 pci_disable_msix(dev->pdev); 318 kfree(priv->msix_arr); 319} 320 321struct mlx5_reg_host_endianess { 322 u8 he; 323 u8 rsvd[15]; 324}; 325 326 327#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) 328 329enum { 330 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | 331 MLX5_DEV_CAP_FLAG_DCT | 332 MLX5_DEV_CAP_FLAG_DRAIN_SIGERR, 333}; 334 335static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size) 336{ 337 switch (size) { 338 case 128: 339 return 0; 340 case 256: 341 return 1; 342 case 512: 343 return 2; 344 case 1024: 345 return 3; 346 case 2048: 347 return 4; 348 case 4096: 349 return 5; 350 default: 351 mlx5_core_warn(dev, "invalid pkey table size %d\n", size); 352 return 0; 353 } 354} 355 356static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, 357 enum mlx5_cap_type cap_type, 358 enum mlx5_cap_mode cap_mode) 359{ 360 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; 361 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 362 void *out, *hca_caps; 363 u16 opmod = (cap_type << 1) | (cap_mode & 0x01); 364 int err; 365 366 memset(in, 0, sizeof(in)); 367 out = kzalloc(out_sz, GFP_KERNEL); 368 369 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 370 MLX5_SET(query_hca_cap_in, in, op_mod, opmod); 371 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); 372 if (err) { 373 mlx5_core_warn(dev, 374 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n", 375 cap_type, cap_mode, err); 376 goto query_ex; 377 } 378 379 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 380 381 switch (cap_mode) { 382 case HCA_CAP_OPMOD_GET_MAX: 383 memcpy(dev->hca_caps_max[cap_type], hca_caps, 384 MLX5_UN_SZ_BYTES(hca_cap_union)); 385 break; 386 case HCA_CAP_OPMOD_GET_CUR: 387 memcpy(dev->hca_caps_cur[cap_type], hca_caps, 388 MLX5_UN_SZ_BYTES(hca_cap_union)); 389 break; 390 default: 391 mlx5_core_warn(dev, 392 "Tried to query dev cap type(%x) with wrong opmode(%x)\n", 393 cap_type, cap_mode); 394 err = -EINVAL; 395 break; 396 } 397query_ex: 398 kfree(out); 399 return err; 400} 401 402int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type) 403{ 404 int ret; 405 406 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR); 407 if (ret) 408 return ret; 409 410 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX); 411} 412 413static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) 414{ 415 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0}; 416 417 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); 418 419 return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); 420} 421 422static int handle_hca_cap(struct mlx5_core_dev *dev) 423{ 424 void *set_ctx = NULL; 425 struct mlx5_profile *prof = dev->profile; 426 int err = -ENOMEM; 427 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); 428 void *set_hca_cap; 429 430 set_ctx = kzalloc(set_sz, GFP_KERNEL); 431 432 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); 433 if (err) 434 goto query_ex; 435 436 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, 437 capability); 438 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], 439 MLX5_ST_SZ_BYTES(cmd_hca_cap)); 440 441 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", 442 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)), 443 128); 444 /* we limit the size of the pkey table to 128 entries for now */ 445 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, 446 to_fw_pkey_sz(dev, 128)); 447 448 if (prof->mask & MLX5_PROF_MASK_QP_SIZE) 449 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, 450 prof->log_max_qp); 451 452 /* disable cmdif checksum */ 453 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); 454 455 /* enable drain sigerr */ 456 MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1); 457 458 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); 459 460 err = set_caps(dev, set_ctx, set_sz); 461 462query_ex: 463 kfree(set_ctx); 464 return err; 465} 466 467static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) 468{ 469 void *set_ctx; 470 void *set_hca_cap; 471 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); 472 int req_endianness; 473 int err; 474 475 if (MLX5_CAP_GEN(dev, atomic)) { 476 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); 477 if (err) 478 return err; 479 } else { 480 return 0; 481 } 482 483 req_endianness = 484 MLX5_CAP_ATOMIC(dev, 485 supported_atomic_req_8B_endianess_mode_1); 486 487 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS) 488 return 0; 489 490 set_ctx = kzalloc(set_sz, GFP_KERNEL); 491 if (!set_ctx) 492 return -ENOMEM; 493 494 MLX5_SET(set_hca_cap_in, set_ctx, op_mod, 495 MLX5_SET_HCA_CAP_OP_MOD_ATOMIC << 1); 496 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); 497 498 /* Set requestor to host endianness */ 499 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode, 500 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS); 501 502 err = set_caps(dev, set_ctx, set_sz); 503 504 kfree(set_ctx); 505 return err; 506} 507 508static int set_hca_ctrl(struct mlx5_core_dev *dev) 509{ 510 struct mlx5_reg_host_endianess he_in; 511 struct mlx5_reg_host_endianess he_out; 512 int err; 513 514 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && 515 !MLX5_CAP_GEN(dev, roce)) 516 return 0; 517 518 memset(&he_in, 0, sizeof(he_in)); 519 he_in.he = MLX5_SET_HOST_ENDIANNESS; 520 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in), 521 &he_out, sizeof(he_out), 522 MLX5_REG_HOST_ENDIANNESS, 0, 1); 523 return err; 524} 525 526static int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id) 527{ 528 u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0}; 529 u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0}; 530 531 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); 532 MLX5_SET(enable_hca_in, in, function_id, func_id); 533 return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 534} 535 536static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) 537{ 538 u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0}; 539 u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0}; 540 541 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); 542 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 543} 544 545static int mlx5_core_set_issi(struct mlx5_core_dev *dev) 546{ 547 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0}; 548 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0}; 549 u32 sup_issi; 550 int err; 551 552 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); 553 554 err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), query_out, sizeof(query_out)); 555 if (err) { 556 u32 syndrome; 557 u8 status; 558 559 mlx5_cmd_mbox_status(query_out, &status, &syndrome); 560 if (status == MLX5_CMD_STAT_BAD_OP_ERR) { 561 mlx5_core_dbg(dev, "Only ISSI 0 is supported\n"); 562 return 0; 563 } 564 565 mlx5_core_err(dev, "failed to query ISSI\n"); 566 return err; 567 } 568 569 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); 570 571 if (sup_issi & (1 << 1)) { 572 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0}; 573 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0}; 574 575 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI); 576 MLX5_SET(set_issi_in, set_in, current_issi, 1); 577 578 err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out)); 579 if (err) { 580 mlx5_core_err(dev, "failed to set ISSI=1 err(%d)\n", err); 581 return err; 582 } 583 584 dev->issi = 1; 585 586 return 0; 587 } else if (sup_issi & (1 << 0)) { 588 return 0; 589 } 590 591 return -ENOTSUPP; 592} 593 594 595int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) 596{ 597 struct mlx5_eq_table *table = &dev->priv.eq_table; 598 struct mlx5_eq *eq; 599 int err = -ENOENT; 600 601 spin_lock(&table->lock); 602 list_for_each_entry(eq, &table->comp_eqs_list, list) { 603 if (eq->index == vector) { 604 *eqn = eq->eqn; 605 *irqn = eq->irqn; 606 err = 0; 607 break; 608 } 609 } 610 spin_unlock(&table->lock); 611 612 return err; 613} 614EXPORT_SYMBOL(mlx5_vector2eqn); 615 616static void free_comp_eqs(struct mlx5_core_dev *dev) 617{ 618 struct mlx5_eq_table *table = &dev->priv.eq_table; 619 struct mlx5_eq *eq, *n; 620 621 spin_lock(&table->lock); 622 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { 623 list_del(&eq->list); 624 spin_unlock(&table->lock); 625 if (mlx5_destroy_unmap_eq(dev, eq)) 626 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n", 627 eq->eqn); 628 kfree(eq); 629 spin_lock(&table->lock); 630 } 631 spin_unlock(&table->lock); 632} 633 634static int alloc_comp_eqs(struct mlx5_core_dev *dev) 635{ 636 struct mlx5_eq_table *table = &dev->priv.eq_table; 637 struct mlx5_eq *eq; 638 int ncomp_vec; 639 int nent; 640 int err; 641 int i; 642 643 INIT_LIST_HEAD(&table->comp_eqs_list); 644 ncomp_vec = table->num_comp_vectors; 645 nent = MLX5_COMP_EQ_SIZE; 646 for (i = 0; i < ncomp_vec; i++) { 647 eq = kzalloc(sizeof(*eq), GFP_KERNEL); 648 649 err = mlx5_create_map_eq(dev, eq, 650 i + MLX5_EQ_VEC_COMP_BASE, nent, 0, 651 &dev->priv.uuari.uars[0]); 652 if (err) { 653 kfree(eq); 654 goto clean; 655 } 656 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn); 657 eq->index = i; 658 spin_lock(&table->lock); 659 list_add_tail(&eq->list, &table->comp_eqs_list); 660 spin_unlock(&table->lock); 661 } 662 663 return 0; 664 665clean: 666 free_comp_eqs(dev); 667 return err; 668} 669 670static int map_bf_area(struct mlx5_core_dev *dev) 671{ 672 resource_size_t bf_start = pci_resource_start(dev->pdev, 0); 673 resource_size_t bf_len = pci_resource_len(dev->pdev, 0); 674 675 dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len); 676 677 return dev->priv.bf_mapping ? 0 : -ENOMEM; 678} 679 680static void unmap_bf_area(struct mlx5_core_dev *dev) 681{ 682 if (dev->priv.bf_mapping) 683 io_mapping_free(dev->priv.bf_mapping); 684} 685 686static inline int fw_initializing(struct mlx5_core_dev *dev) 687{ 688 return ioread32be(&dev->iseg->initializing) >> 31; 689} 690 691static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili, 692 u32 warn_time_mili) 693{ 694 int warn = jiffies + msecs_to_jiffies(warn_time_mili); 695 int end = jiffies + msecs_to_jiffies(max_wait_mili); 696 int err = 0; 697 698 MPASS(max_wait_mili > warn_time_mili); 699 700 while (fw_initializing(dev) == 1) { 701 if (time_after(jiffies, end)) { 702 err = -EBUSY; 703 break; 704 } 705 if (warn_time_mili && time_after(jiffies, warn)) { 706 mlx5_core_warn(dev, 707 "Waiting for FW initialization, timeout abort in %u s\n", 708 (unsigned int)(jiffies_to_msecs(end - warn) / 1000)); 709 warn = jiffies + msecs_to_jiffies(warn_time_mili); 710 } 711 msleep(FW_INIT_WAIT_MS); 712 } 713 714 if (err != 0) 715 mlx5_core_dbg(dev, "Full initializing bit dword = 0x%x\n", 716 ioread32be(&dev->iseg->initializing)); 717 718 return err; 719} 720 721static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 722{ 723 struct mlx5_device_context *dev_ctx; 724 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 725 726 dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL); 727 if (!dev_ctx) 728 return; 729 730 dev_ctx->intf = intf; 731 CURVNET_SET_QUIET(vnet0); 732 dev_ctx->context = intf->add(dev); 733 CURVNET_RESTORE(); 734 735 if (dev_ctx->context) { 736 spin_lock_irq(&priv->ctx_lock); 737 list_add_tail(&dev_ctx->list, &priv->ctx_list); 738 spin_unlock_irq(&priv->ctx_lock); 739 } else { 740 kfree(dev_ctx); 741 } 742} 743 744static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 745{ 746 struct mlx5_device_context *dev_ctx; 747 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 748 749 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 750 if (dev_ctx->intf == intf) { 751 spin_lock_irq(&priv->ctx_lock); 752 list_del(&dev_ctx->list); 753 spin_unlock_irq(&priv->ctx_lock); 754 755 intf->remove(dev, dev_ctx->context); 756 kfree(dev_ctx); 757 return; 758 } 759} 760 761int 762mlx5_register_device(struct mlx5_core_dev *dev) 763{ 764 struct mlx5_priv *priv = &dev->priv; 765 struct mlx5_interface *intf; 766 767 mutex_lock(&intf_mutex); 768 list_add_tail(&priv->dev_list, &dev_list); 769 list_for_each_entry(intf, &intf_list, list) 770 mlx5_add_device(intf, priv); 771 mutex_unlock(&intf_mutex); 772 773 return 0; 774} 775 776void 777mlx5_unregister_device(struct mlx5_core_dev *dev) 778{ 779 struct mlx5_priv *priv = &dev->priv; 780 struct mlx5_interface *intf; 781 782 mutex_lock(&intf_mutex); 783 list_for_each_entry(intf, &intf_list, list) 784 mlx5_remove_device(intf, priv); 785 list_del(&priv->dev_list); 786 mutex_unlock(&intf_mutex); 787} 788 789int mlx5_register_interface(struct mlx5_interface *intf) 790{ 791 struct mlx5_priv *priv; 792 793 if (!intf->add || !intf->remove) 794 return -EINVAL; 795 796 mutex_lock(&intf_mutex); 797 list_add_tail(&intf->list, &intf_list); 798 list_for_each_entry(priv, &dev_list, dev_list) 799 mlx5_add_device(intf, priv); 800 mutex_unlock(&intf_mutex); 801 802 return 0; 803} 804EXPORT_SYMBOL(mlx5_register_interface); 805 806void mlx5_unregister_interface(struct mlx5_interface *intf) 807{ 808 struct mlx5_priv *priv; 809 810 mutex_lock(&intf_mutex); 811 list_for_each_entry(priv, &dev_list, dev_list) 812 mlx5_remove_device(intf, priv); 813 list_del(&intf->list); 814 mutex_unlock(&intf_mutex); 815} 816EXPORT_SYMBOL(mlx5_unregister_interface); 817 818void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) 819{ 820 struct mlx5_priv *priv = &mdev->priv; 821 struct mlx5_device_context *dev_ctx; 822 unsigned long flags; 823 void *result = NULL; 824 825 spin_lock_irqsave(&priv->ctx_lock, flags); 826 827 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list) 828 if ((dev_ctx->intf->protocol == protocol) && 829 dev_ctx->intf->get_dev) { 830 result = dev_ctx->intf->get_dev(dev_ctx->context); 831 break; 832 } 833 834 spin_unlock_irqrestore(&priv->ctx_lock, flags); 835 836 return result; 837} 838EXPORT_SYMBOL(mlx5_get_protocol_dev); 839 840static int mlx5_auto_fw_update; 841SYSCTL_INT(_hw_mlx5, OID_AUTO, auto_fw_update, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 842 &mlx5_auto_fw_update, 0, 843 "Allow automatic firmware update on driver start"); 844static int 845mlx5_firmware_update(struct mlx5_core_dev *dev) 846{ 847 const struct firmware *fw; 848 int err; 849 850 TUNABLE_INT_FETCH("hw.mlx5.auto_fw_update", &mlx5_auto_fw_update); 851 if (!mlx5_auto_fw_update) 852 return (0); 853 fw = firmware_get("mlx5fw_mfa"); 854 if (fw) { 855 err = mlx5_firmware_flash(dev, fw); 856 firmware_put(fw, FIRMWARE_UNLOAD); 857 } 858 else 859 return (-ENOENT); 860 861 return err; 862} 863 864static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 865{ 866 struct pci_dev *pdev = dev->pdev; 867 device_t bsddev; 868 int err; 869 870 pdev = dev->pdev; 871 bsddev = pdev->dev.bsddev; 872 pci_set_drvdata(dev->pdev, dev); 873 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); 874 priv->name[MLX5_MAX_NAME_LEN - 1] = 0; 875 876 mutex_init(&priv->pgdir_mutex); 877 INIT_LIST_HEAD(&priv->pgdir_list); 878 spin_lock_init(&priv->mkey_lock); 879 880 priv->numa_node = NUMA_NO_NODE; 881 882 err = mlx5_pci_enable_device(dev); 883 if (err) { 884 mlx5_core_err(dev, "Cannot enable PCI device, aborting\n"); 885 goto err_dbg; 886 } 887 888 err = request_bar(pdev); 889 if (err) { 890 mlx5_core_err(dev, "error requesting BARs, aborting\n"); 891 goto err_disable; 892 } 893 894 pci_set_master(pdev); 895 896 err = set_dma_caps(pdev); 897 if (err) { 898 mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n"); 899 goto err_clr_master; 900 } 901 902 dev->iseg_base = pci_resource_start(dev->pdev, 0); 903 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); 904 if (!dev->iseg) { 905 err = -ENOMEM; 906 mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n"); 907 goto err_clr_master; 908 } 909 910 return 0; 911 912err_clr_master: 913 release_bar(dev->pdev); 914err_disable: 915 mlx5_pci_disable_device(dev); 916err_dbg: 917 return err; 918} 919 920static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 921{ 922#ifdef PCI_IOV 923 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) 924 pci_iov_detach(dev->pdev->dev.bsddev); 925#endif 926 iounmap(dev->iseg); 927 release_bar(dev->pdev); 928 mlx5_pci_disable_device(dev); 929} 930 931static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 932{ 933 int err; 934 935 err = mlx5_vsc_find_cap(dev); 936 if (err) 937 mlx5_core_err(dev, "Unable to find vendor specific capabilities\n"); 938 939 err = mlx5_query_hca_caps(dev); 940 if (err) { 941 mlx5_core_err(dev, "query hca failed\n"); 942 goto out; 943 } 944 945 err = mlx5_query_board_id(dev); 946 if (err) { 947 mlx5_core_err(dev, "query board id failed\n"); 948 goto out; 949 } 950 951 err = mlx5_eq_init(dev); 952 if (err) { 953 mlx5_core_err(dev, "failed to initialize eq\n"); 954 goto out; 955 } 956 957 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); 958 959 err = mlx5_init_cq_table(dev); 960 if (err) { 961 mlx5_core_err(dev, "failed to initialize cq table\n"); 962 goto err_eq_cleanup; 963 } 964 965 mlx5_init_qp_table(dev); 966 mlx5_init_srq_table(dev); 967 mlx5_init_mr_table(dev); 968 969 mlx5_init_reserved_gids(dev); 970 mlx5_fpga_init(dev); 971 972 return 0; 973 974err_eq_cleanup: 975 mlx5_eq_cleanup(dev); 976 977out: 978 return err; 979} 980 981static void mlx5_cleanup_once(struct mlx5_core_dev *dev) 982{ 983 mlx5_fpga_cleanup(dev); 984 mlx5_cleanup_reserved_gids(dev); 985 mlx5_cleanup_mr_table(dev); 986 mlx5_cleanup_srq_table(dev); 987 mlx5_cleanup_qp_table(dev); 988 mlx5_cleanup_cq_table(dev); 989 mlx5_eq_cleanup(dev); 990} 991 992static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, 993 bool boot) 994{ 995 int err; 996 997 mutex_lock(&dev->intf_state_mutex); 998 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 999 mlx5_core_warn(dev, "interface is up, NOP\n"); 1000 goto out; 1001 } 1002 1003 mlx5_core_dbg(dev, "firmware version: %d.%d.%d\n", 1004 fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); 1005 1006 /* 1007 * On load removing any previous indication of internal error, 1008 * device is up 1009 */ 1010 dev->state = MLX5_DEVICE_STATE_UP; 1011 1012 /* wait for firmware to accept initialization segments configurations 1013 */ 1014 err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, 1015 FW_INIT_WARN_MESSAGE_INTERVAL); 1016 if (err) { 1017 dev_err(&dev->pdev->dev, 1018 "Firmware over %d MS in pre-initializing state, aborting\n", 1019 FW_PRE_INIT_TIMEOUT_MILI); 1020 goto out_err; 1021 } 1022 1023 err = mlx5_cmd_init(dev); 1024 if (err) { 1025 mlx5_core_err(dev, 1026 "Failed initializing command interface, aborting\n"); 1027 goto out_err; 1028 } 1029 1030 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0); 1031 if (err) { 1032 mlx5_core_err(dev, 1033 "Firmware over %d MS in initializing state, aborting\n", 1034 FW_INIT_TIMEOUT_MILI); 1035 goto err_cmd_cleanup; 1036 } 1037 1038 err = mlx5_core_enable_hca(dev, 0); 1039 if (err) { 1040 mlx5_core_err(dev, "enable hca failed\n"); 1041 goto err_cmd_cleanup; 1042 } 1043 1044 err = mlx5_core_set_issi(dev); 1045 if (err) { 1046 mlx5_core_err(dev, "failed to set issi\n"); 1047 goto err_disable_hca; 1048 } 1049 1050 err = mlx5_pagealloc_start(dev); 1051 if (err) { 1052 mlx5_core_err(dev, "mlx5_pagealloc_start failed\n"); 1053 goto err_disable_hca; 1054 } 1055 1056 err = mlx5_satisfy_startup_pages(dev, 1); 1057 if (err) { 1058 mlx5_core_err(dev, "failed to allocate boot pages\n"); 1059 goto err_pagealloc_stop; 1060 } 1061 1062 err = set_hca_ctrl(dev); 1063 if (err) { 1064 mlx5_core_err(dev, "set_hca_ctrl failed\n"); 1065 goto reclaim_boot_pages; 1066 } 1067 1068 err = handle_hca_cap(dev); 1069 if (err) { 1070 mlx5_core_err(dev, "handle_hca_cap failed\n"); 1071 goto reclaim_boot_pages; 1072 } 1073 1074 err = handle_hca_cap_atomic(dev); 1075 if (err) { 1076 mlx5_core_err(dev, "handle_hca_cap_atomic failed\n"); 1077 goto reclaim_boot_pages; 1078 } 1079 1080 err = mlx5_satisfy_startup_pages(dev, 0); 1081 if (err) { 1082 mlx5_core_err(dev, "failed to allocate init pages\n"); 1083 goto reclaim_boot_pages; 1084 } 1085 1086 err = mlx5_cmd_init_hca(dev); 1087 if (err) { 1088 mlx5_core_err(dev, "init hca failed\n"); 1089 goto reclaim_boot_pages; 1090 } 1091 1092 mlx5_start_health_poll(dev); 1093 1094 if (boot && mlx5_init_once(dev, priv)) { 1095 mlx5_core_err(dev, "sw objs init failed\n"); 1096 goto err_stop_poll; 1097 } 1098 1099 err = mlx5_enable_msix(dev); 1100 if (err) { 1101 mlx5_core_err(dev, "enable msix failed\n"); 1102 goto err_cleanup_once; 1103 } 1104 1105 err = mlx5_alloc_uuars(dev, &priv->uuari); 1106 if (err) { 1107 mlx5_core_err(dev, "Failed allocating uar, aborting\n"); 1108 goto err_disable_msix; 1109 } 1110 1111 err = mlx5_start_eqs(dev); 1112 if (err) { 1113 mlx5_core_err(dev, "Failed to start pages and async EQs\n"); 1114 goto err_free_uar; 1115 } 1116 1117 err = alloc_comp_eqs(dev); 1118 if (err) { 1119 mlx5_core_err(dev, "Failed to alloc completion EQs\n"); 1120 goto err_stop_eqs; 1121 } 1122 1123 if (map_bf_area(dev)) 1124 mlx5_core_err(dev, "Failed to map blue flame area\n"); 1125 1126 err = mlx5_init_fs(dev); 1127 if (err) { 1128 mlx5_core_err(dev, "flow steering init %d\n", err); 1129 goto err_free_comp_eqs; 1130 } 1131 1132 err = mlx5_mpfs_init(dev); 1133 if (err) { 1134 mlx5_core_err(dev, "mpfs init failed %d\n", err); 1135 goto err_fs; 1136 } 1137 1138 err = mlx5_fpga_device_start(dev); 1139 if (err) { 1140 mlx5_core_err(dev, "fpga device start failed %d\n", err); 1141 goto err_mpfs; 1142 } 1143 1144 err = mlx5_register_device(dev); 1145 if (err) { 1146 mlx5_core_err(dev, "mlx5_register_device failed %d\n", err); 1147 goto err_fpga; 1148 } 1149 1150 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1151 1152out: 1153 mutex_unlock(&dev->intf_state_mutex); 1154 return 0; 1155 1156err_fpga: 1157 mlx5_fpga_device_stop(dev); 1158 1159err_mpfs: 1160 mlx5_mpfs_destroy(dev); 1161 1162err_fs: 1163 mlx5_cleanup_fs(dev); 1164 1165err_free_comp_eqs: 1166 free_comp_eqs(dev); 1167 unmap_bf_area(dev); 1168 1169err_stop_eqs: 1170 mlx5_stop_eqs(dev); 1171 1172err_free_uar: 1173 mlx5_free_uuars(dev, &priv->uuari); 1174 1175err_disable_msix: 1176 mlx5_disable_msix(dev); 1177 1178err_cleanup_once: 1179 if (boot) 1180 mlx5_cleanup_once(dev); 1181 1182err_stop_poll: 1183 mlx5_stop_health_poll(dev, boot); 1184 if (mlx5_cmd_teardown_hca(dev)) { 1185 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n"); 1186 goto out_err; 1187 } 1188 1189reclaim_boot_pages: 1190 mlx5_reclaim_startup_pages(dev); 1191 1192err_pagealloc_stop: 1193 mlx5_pagealloc_stop(dev); 1194 1195err_disable_hca: 1196 mlx5_core_disable_hca(dev); 1197 1198err_cmd_cleanup: 1199 mlx5_cmd_cleanup(dev); 1200 1201out_err: 1202 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 1203 mutex_unlock(&dev->intf_state_mutex); 1204 1205 return err; 1206} 1207 1208static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, 1209 bool cleanup) 1210{ 1211 int err = 0; 1212 1213 if (cleanup) 1214 mlx5_drain_health_recovery(dev); 1215 1216 mutex_lock(&dev->intf_state_mutex); 1217 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 1218 mlx5_core_warn(dev, "%s: interface is down, NOP\n", __func__); 1219 if (cleanup) 1220 mlx5_cleanup_once(dev); 1221 goto out; 1222 } 1223 1224 mlx5_unregister_device(dev); 1225 1226 mlx5_eswitch_cleanup(dev->priv.eswitch); 1227 mlx5_fpga_device_stop(dev); 1228 mlx5_mpfs_destroy(dev); 1229 mlx5_cleanup_fs(dev); 1230 unmap_bf_area(dev); 1231 mlx5_wait_for_reclaim_vfs_pages(dev); 1232 free_comp_eqs(dev); 1233 mlx5_stop_eqs(dev); 1234 mlx5_free_uuars(dev, &priv->uuari); 1235 mlx5_disable_msix(dev); 1236 if (cleanup) 1237 mlx5_cleanup_once(dev); 1238 mlx5_stop_health_poll(dev, cleanup); 1239 err = mlx5_cmd_teardown_hca(dev); 1240 if (err) { 1241 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n"); 1242 goto out; 1243 } 1244 mlx5_pagealloc_stop(dev); 1245 mlx5_reclaim_startup_pages(dev); 1246 mlx5_core_disable_hca(dev); 1247 mlx5_cmd_cleanup(dev); 1248 1249out: 1250 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1251 mutex_unlock(&dev->intf_state_mutex); 1252 return err; 1253} 1254 1255void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, 1256 unsigned long param) 1257{ 1258 struct mlx5_priv *priv = &dev->priv; 1259 struct mlx5_device_context *dev_ctx; 1260 unsigned long flags; 1261 1262 spin_lock_irqsave(&priv->ctx_lock, flags); 1263 1264 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 1265 if (dev_ctx->intf->event) 1266 dev_ctx->intf->event(dev, dev_ctx->context, event, param); 1267 1268 spin_unlock_irqrestore(&priv->ctx_lock, flags); 1269} 1270 1271struct mlx5_core_event_handler { 1272 void (*event)(struct mlx5_core_dev *dev, 1273 enum mlx5_dev_event event, 1274 void *data); 1275}; 1276 1277#define MLX5_STATS_DESC(a, b, c, d, e, ...) d, e, 1278 1279#define MLX5_PORT_MODULE_ERROR_STATS(m) \ 1280m(+1, u64, power_budget_exceeded, "power_budget", "Module Power Budget Exceeded") \ 1281m(+1, u64, long_range, "long_range", "Module Long Range for non MLNX cable/module") \ 1282m(+1, u64, bus_stuck, "bus_stuck", "Module Bus stuck(I2C or data shorted)") \ 1283m(+1, u64, no_eeprom, "no_eeprom", "No EEPROM/retry timeout") \ 1284m(+1, u64, enforce_part_number, "enforce_part_number", "Module Enforce part number list") \ 1285m(+1, u64, unknown_id, "unknown_id", "Module Unknown identifier") \ 1286m(+1, u64, high_temp, "high_temp", "Module High Temperature") \ 1287m(+1, u64, cable_shorted, "cable_shorted", "Module Cable is shorted") 1288 1289static const char *mlx5_pme_err_desc[] = { 1290 MLX5_PORT_MODULE_ERROR_STATS(MLX5_STATS_DESC) 1291}; 1292 1293static int init_one(struct pci_dev *pdev, 1294 const struct pci_device_id *id) 1295{ 1296 struct mlx5_core_dev *dev; 1297 struct mlx5_priv *priv; 1298 device_t bsddev = pdev->dev.bsddev; 1299#ifdef PCI_IOV 1300 nvlist_t *pf_schema, *vf_schema; 1301 int num_vfs, sriov_pos; 1302#endif 1303 int i,err; 1304 struct sysctl_oid *pme_sysctl_node; 1305 struct sysctl_oid *pme_err_sysctl_node; 1306 struct sysctl_oid *cap_sysctl_node; 1307 struct sysctl_oid *current_cap_sysctl_node; 1308 struct sysctl_oid *max_cap_sysctl_node; 1309 1310 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1311 priv = &dev->priv; 1312 if (id) 1313 priv->pci_dev_data = id->driver_data; 1314 1315 if (mlx5_prof_sel < 0 || mlx5_prof_sel >= ARRAY_SIZE(profiles)) { 1316 device_printf(bsddev, 1317 "WARN: selected profile out of range, selecting default (%d)\n", 1318 MLX5_DEFAULT_PROF); 1319 mlx5_prof_sel = MLX5_DEFAULT_PROF; 1320 } 1321 dev->profile = &profiles[mlx5_prof_sel]; 1322 dev->pdev = pdev; 1323 dev->event = mlx5_core_event; 1324 1325 /* Set desc */ 1326 device_set_desc(bsddev, mlx5_version); 1327 1328 sysctl_ctx_init(&dev->sysctl_ctx); 1329 SYSCTL_ADD_INT(&dev->sysctl_ctx, 1330 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1331 OID_AUTO, "msix_eqvec", CTLFLAG_RDTUN, &dev->msix_eqvec, 0, 1332 "Maximum number of MSIX event queue vectors, if set"); 1333 SYSCTL_ADD_INT(&dev->sysctl_ctx, 1334 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1335 OID_AUTO, "power_status", CTLFLAG_RD, &dev->pwr_status, 0, 1336 "0:Invalid 1:Sufficient 2:Insufficient"); 1337 SYSCTL_ADD_INT(&dev->sysctl_ctx, 1338 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1339 OID_AUTO, "power_value", CTLFLAG_RD, &dev->pwr_value, 0, 1340 "Current power value in Watts"); 1341 1342 pme_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1343 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1344 OID_AUTO, "pme_stats", CTLFLAG_RD, NULL, 1345 "Port module event statistics"); 1346 if (pme_sysctl_node == NULL) { 1347 err = -ENOMEM; 1348 goto clean_sysctl_ctx; 1349 } 1350 pme_err_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1351 SYSCTL_CHILDREN(pme_sysctl_node), 1352 OID_AUTO, "errors", CTLFLAG_RD, NULL, 1353 "Port module event error statistics"); 1354 if (pme_err_sysctl_node == NULL) { 1355 err = -ENOMEM; 1356 goto clean_sysctl_ctx; 1357 } 1358 SYSCTL_ADD_U64(&dev->sysctl_ctx, 1359 SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO, 1360 "module_plug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1361 &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_PLUGGED_ENABLED], 1362 0, "Number of time module plugged"); 1363 SYSCTL_ADD_U64(&dev->sysctl_ctx, 1364 SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO, 1365 "module_unplug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1366 &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_UNPLUGGED], 1367 0, "Number of time module unplugged"); 1368 for (i = 0 ; i < MLX5_MODULE_EVENT_ERROR_NUM; i++) { 1369 SYSCTL_ADD_U64(&dev->sysctl_ctx, 1370 SYSCTL_CHILDREN(pme_err_sysctl_node), OID_AUTO, 1371 mlx5_pme_err_desc[2 * i], CTLFLAG_RD | CTLFLAG_MPSAFE, 1372 &dev->priv.pme_stats.error_counters[i], 1373 0, mlx5_pme_err_desc[2 * i + 1]); 1374 } 1375 1376 cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1377 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1378 OID_AUTO, "caps", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1379 "hardware capabilities raw bitstrings"); 1380 if (cap_sysctl_node == NULL) { 1381 err = -ENOMEM; 1382 goto clean_sysctl_ctx; 1383 } 1384 current_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1385 SYSCTL_CHILDREN(cap_sysctl_node), 1386 OID_AUTO, "current", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1387 ""); 1388 if (current_cap_sysctl_node == NULL) { 1389 err = -ENOMEM; 1390 goto clean_sysctl_ctx; 1391 } 1392 max_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1393 SYSCTL_CHILDREN(cap_sysctl_node), 1394 OID_AUTO, "max", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1395 ""); 1396 if (max_cap_sysctl_node == NULL) { 1397 err = -ENOMEM; 1398 goto clean_sysctl_ctx; 1399 } 1400 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1401 SYSCTL_CHILDREN(current_cap_sysctl_node), 1402 OID_AUTO, "general", CTLFLAG_RD | CTLFLAG_MPSAFE, 1403 &dev->hca_caps_cur[MLX5_CAP_GENERAL], 1404 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1405 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1406 SYSCTL_CHILDREN(max_cap_sysctl_node), 1407 OID_AUTO, "general", CTLFLAG_RD | CTLFLAG_MPSAFE, 1408 &dev->hca_caps_max[MLX5_CAP_GENERAL], 1409 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1410 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1411 SYSCTL_CHILDREN(current_cap_sysctl_node), 1412 OID_AUTO, "ether", CTLFLAG_RD | CTLFLAG_MPSAFE, 1413 &dev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], 1414 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1415 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1416 SYSCTL_CHILDREN(max_cap_sysctl_node), 1417 OID_AUTO, "ether", CTLFLAG_RD | CTLFLAG_MPSAFE, 1418 &dev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], 1419 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1420 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1421 SYSCTL_CHILDREN(current_cap_sysctl_node), 1422 OID_AUTO, "odp", CTLFLAG_RD | CTLFLAG_MPSAFE, 1423 &dev->hca_caps_cur[MLX5_CAP_ODP], 1424 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1425 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1426 SYSCTL_CHILDREN(max_cap_sysctl_node), 1427 OID_AUTO, "odp", CTLFLAG_RD | CTLFLAG_MPSAFE, 1428 &dev->hca_caps_max[MLX5_CAP_ODP], 1429 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1430 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1431 SYSCTL_CHILDREN(current_cap_sysctl_node), 1432 OID_AUTO, "atomic", CTLFLAG_RD | CTLFLAG_MPSAFE, 1433 &dev->hca_caps_cur[MLX5_CAP_ATOMIC], 1434 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1435 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1436 SYSCTL_CHILDREN(max_cap_sysctl_node), 1437 OID_AUTO, "atomic", CTLFLAG_RD | CTLFLAG_MPSAFE, 1438 &dev->hca_caps_max[MLX5_CAP_ATOMIC], 1439 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1440 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1441 SYSCTL_CHILDREN(current_cap_sysctl_node), 1442 OID_AUTO, "roce", CTLFLAG_RD | CTLFLAG_MPSAFE, 1443 &dev->hca_caps_cur[MLX5_CAP_ROCE], 1444 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1445 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1446 SYSCTL_CHILDREN(max_cap_sysctl_node), 1447 OID_AUTO, "roce", CTLFLAG_RD | CTLFLAG_MPSAFE, 1448 &dev->hca_caps_max[MLX5_CAP_ROCE], 1449 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1450 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1451 SYSCTL_CHILDREN(current_cap_sysctl_node), 1452 OID_AUTO, "ipoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1453 &dev->hca_caps_cur[MLX5_CAP_IPOIB_OFFLOADS], 1454 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1455 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1456 SYSCTL_CHILDREN(max_cap_sysctl_node), 1457 OID_AUTO, "ipoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1458 &dev->hca_caps_max[MLX5_CAP_IPOIB_OFFLOADS], 1459 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1460 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1461 SYSCTL_CHILDREN(current_cap_sysctl_node), 1462 OID_AUTO, "eoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1463 &dev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], 1464 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1465 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1466 SYSCTL_CHILDREN(max_cap_sysctl_node), 1467 OID_AUTO, "eoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1468 &dev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], 1469 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1470 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1471 SYSCTL_CHILDREN(current_cap_sysctl_node), 1472 OID_AUTO, "flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1473 &dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], 1474 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1475 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1476 SYSCTL_CHILDREN(max_cap_sysctl_node), 1477 OID_AUTO, "flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1478 &dev->hca_caps_max[MLX5_CAP_FLOW_TABLE], 1479 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1480 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1481 SYSCTL_CHILDREN(current_cap_sysctl_node), 1482 OID_AUTO, "eswitch_flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1483 &dev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], 1484 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1485 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1486 SYSCTL_CHILDREN(max_cap_sysctl_node), 1487 OID_AUTO, "eswitch_flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1488 &dev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], 1489 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1490 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1491 SYSCTL_CHILDREN(current_cap_sysctl_node), 1492 OID_AUTO, "eswitch", CTLFLAG_RD | CTLFLAG_MPSAFE, 1493 &dev->hca_caps_cur[MLX5_CAP_ESWITCH], 1494 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1495 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1496 SYSCTL_CHILDREN(max_cap_sysctl_node), 1497 OID_AUTO, "eswitch", CTLFLAG_RD | CTLFLAG_MPSAFE, 1498 &dev->hca_caps_max[MLX5_CAP_ESWITCH], 1499 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1500 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1501 SYSCTL_CHILDREN(current_cap_sysctl_node), 1502 OID_AUTO, "snapshot", CTLFLAG_RD | CTLFLAG_MPSAFE, 1503 &dev->hca_caps_cur[MLX5_CAP_SNAPSHOT], 1504 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1505 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1506 SYSCTL_CHILDREN(max_cap_sysctl_node), 1507 OID_AUTO, "snapshot", CTLFLAG_RD | CTLFLAG_MPSAFE, 1508 &dev->hca_caps_max[MLX5_CAP_SNAPSHOT], 1509 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1510 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1511 SYSCTL_CHILDREN(current_cap_sysctl_node), 1512 OID_AUTO, "vector_calc", CTLFLAG_RD | CTLFLAG_MPSAFE, 1513 &dev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], 1514 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1515 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1516 SYSCTL_CHILDREN(max_cap_sysctl_node), 1517 OID_AUTO, "vector_calc", CTLFLAG_RD | CTLFLAG_MPSAFE, 1518 &dev->hca_caps_max[MLX5_CAP_VECTOR_CALC], 1519 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1520 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1521 SYSCTL_CHILDREN(current_cap_sysctl_node), 1522 OID_AUTO, "qos", CTLFLAG_RD | CTLFLAG_MPSAFE, 1523 &dev->hca_caps_cur[MLX5_CAP_QOS], 1524 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1525 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1526 SYSCTL_CHILDREN(max_cap_sysctl_node), 1527 OID_AUTO, "qos", CTLFLAG_RD | CTLFLAG_MPSAFE, 1528 &dev->hca_caps_max[MLX5_CAP_QOS], 1529 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1530 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1531 SYSCTL_CHILDREN(current_cap_sysctl_node), 1532 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1533 &dev->hca_caps_cur[MLX5_CAP_DEBUG], 1534 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1535 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1536 SYSCTL_CHILDREN(max_cap_sysctl_node), 1537 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1538 &dev->hca_caps_max[MLX5_CAP_DEBUG], 1539 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1540 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1541 SYSCTL_CHILDREN(cap_sysctl_node), 1542 OID_AUTO, "pcam", CTLFLAG_RD | CTLFLAG_MPSAFE, 1543 &dev->caps.pcam, sizeof(dev->caps.pcam), "IU", ""); 1544 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1545 SYSCTL_CHILDREN(cap_sysctl_node), 1546 OID_AUTO, "mcam", CTLFLAG_RD | CTLFLAG_MPSAFE, 1547 &dev->caps.mcam, sizeof(dev->caps.mcam), "IU", ""); 1548 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1549 SYSCTL_CHILDREN(cap_sysctl_node), 1550 OID_AUTO, "qcam", CTLFLAG_RD | CTLFLAG_MPSAFE, 1551 &dev->caps.qcam, sizeof(dev->caps.qcam), "IU", ""); 1552 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1553 SYSCTL_CHILDREN(cap_sysctl_node), 1554 OID_AUTO, "fpga", CTLFLAG_RD | CTLFLAG_MPSAFE, 1555 &dev->caps.fpga, sizeof(dev->caps.fpga), "IU", ""); 1556 1557 INIT_LIST_HEAD(&priv->ctx_list); 1558 spin_lock_init(&priv->ctx_lock); 1559 mutex_init(&dev->pci_status_mutex); 1560 mutex_init(&dev->intf_state_mutex); 1561 mtx_init(&dev->dump_lock, "mlx5dmp", NULL, MTX_DEF | MTX_NEW); 1562 err = mlx5_pci_init(dev, priv); 1563 if (err) { 1564 mlx5_core_err(dev, "mlx5_pci_init failed %d\n", err); 1565 goto clean_dev; 1566 } 1567 1568 err = mlx5_health_init(dev); 1569 if (err) { 1570 mlx5_core_err(dev, "mlx5_health_init failed %d\n", err); 1571 goto close_pci; 1572 } 1573 1574 mlx5_pagealloc_init(dev); 1575 1576 err = mlx5_load_one(dev, priv, true); 1577 if (err) { 1578 mlx5_core_err(dev, "mlx5_load_one failed %d\n", err); 1579 goto clean_health; 1580 } 1581 1582 mlx5_fwdump_prep(dev); 1583 1584 mlx5_firmware_update(dev); 1585 1586#ifdef PCI_IOV 1587 if (MLX5_CAP_GEN(dev, vport_group_manager)) { 1588 if (pci_find_extcap(bsddev, PCIZ_SRIOV, &sriov_pos) == 0) { 1589 num_vfs = pci_read_config(bsddev, sriov_pos + 1590 PCIR_SRIOV_TOTAL_VFS, 2); 1591 } else { 1592 mlx5_core_err(dev, "cannot find SR-IOV PCIe cap\n"); 1593 num_vfs = 0; 1594 } 1595 err = mlx5_eswitch_init(dev, 1 + num_vfs); 1596 if (err == 0) { 1597 pf_schema = pci_iov_schema_alloc_node(); 1598 vf_schema = pci_iov_schema_alloc_node(); 1599 pci_iov_schema_add_unicast_mac(vf_schema, 1600 iov_mac_addr_name, 0, NULL); 1601 err = pci_iov_attach(bsddev, pf_schema, vf_schema); 1602 if (err != 0) { 1603 device_printf(bsddev, 1604 "Failed to initialize SR-IOV support, error %d\n", 1605 err); 1606 } 1607 } else { 1608 mlx5_core_err(dev, "eswitch init failed, error %d\n", 1609 err); 1610 } 1611 } 1612#endif 1613 1614 pci_save_state(bsddev); 1615 return 0; 1616 1617clean_health: 1618 mlx5_pagealloc_cleanup(dev); 1619 mlx5_health_cleanup(dev); 1620close_pci: 1621 mlx5_pci_close(dev, priv); 1622clean_dev: 1623 mtx_destroy(&dev->dump_lock); 1624clean_sysctl_ctx: 1625 sysctl_ctx_free(&dev->sysctl_ctx); 1626 kfree(dev); 1627 return err; 1628} 1629 1630static void remove_one(struct pci_dev *pdev) 1631{ 1632 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1633 struct mlx5_priv *priv = &dev->priv; 1634 1635 if (mlx5_unload_one(dev, priv, true)) { 1636 mlx5_core_err(dev, "mlx5_unload_one failed\n"); 1637 mlx5_health_cleanup(dev); 1638 return; 1639 } 1640 1641 mlx5_pagealloc_cleanup(dev); 1642 mlx5_health_cleanup(dev); 1643 mlx5_fwdump_clean(dev); 1644 mlx5_pci_close(dev, priv); 1645 mtx_destroy(&dev->dump_lock); 1646 pci_set_drvdata(pdev, NULL); 1647 sysctl_ctx_free(&dev->sysctl_ctx); 1648 kfree(dev); 1649} 1650 1651static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, 1652 pci_channel_state_t state) 1653{ 1654 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1655 struct mlx5_priv *priv = &dev->priv; 1656 1657 mlx5_core_info(dev, "%s was called\n", __func__); 1658 mlx5_enter_error_state(dev, false); 1659 mlx5_unload_one(dev, priv, false); 1660 1661 if (state) { 1662 mlx5_drain_health_wq(dev); 1663 mlx5_pci_disable_device(dev); 1664 } 1665 1666 return state == pci_channel_io_perm_failure ? 1667 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 1668} 1669 1670static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) 1671{ 1672 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1673 int err = 0; 1674 1675 mlx5_core_info(dev,"%s was called\n", __func__); 1676 1677 err = mlx5_pci_enable_device(dev); 1678 if (err) { 1679 mlx5_core_err(dev, "mlx5_pci_enable_device failed with error code: %d\n" 1680 ,err); 1681 return PCI_ERS_RESULT_DISCONNECT; 1682 } 1683 pci_set_master(pdev); 1684 pci_set_powerstate(pdev->dev.bsddev, PCI_POWERSTATE_D0); 1685 pci_restore_state(pdev->dev.bsddev); 1686 pci_save_state(pdev->dev.bsddev); 1687 1688 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 1689} 1690 1691/* wait for the device to show vital signs. For now we check 1692 * that we can read the device ID and that the health buffer 1693 * shows a non zero value which is different than 0xffffffff 1694 */ 1695static void wait_vital(struct pci_dev *pdev) 1696{ 1697 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1698 struct mlx5_core_health *health = &dev->priv.health; 1699 const int niter = 100; 1700 u32 count; 1701 u16 did; 1702 int i; 1703 1704 /* Wait for firmware to be ready after reset */ 1705 msleep(1000); 1706 for (i = 0; i < niter; i++) { 1707 if (pci_read_config_word(pdev, 2, &did)) { 1708 mlx5_core_warn(dev, "failed reading config word\n"); 1709 break; 1710 } 1711 if (did == pdev->device) { 1712 mlx5_core_info(dev, 1713 "device ID correctly read after %d iterations\n", i); 1714 break; 1715 } 1716 msleep(50); 1717 } 1718 if (i == niter) 1719 mlx5_core_warn(dev, "could not read device ID\n"); 1720 1721 for (i = 0; i < niter; i++) { 1722 count = ioread32be(health->health_counter); 1723 if (count && count != 0xffffffff) { 1724 mlx5_core_info(dev, 1725 "Counter value 0x%x after %d iterations\n", count, i); 1726 break; 1727 } 1728 msleep(50); 1729 } 1730 1731 if (i == niter) 1732 mlx5_core_warn(dev, "could not read device ID\n"); 1733} 1734 1735static void mlx5_pci_resume(struct pci_dev *pdev) 1736{ 1737 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1738 struct mlx5_priv *priv = &dev->priv; 1739 int err; 1740 1741 mlx5_core_info(dev,"%s was called\n", __func__); 1742 1743 wait_vital(pdev); 1744 1745 err = mlx5_load_one(dev, priv, false); 1746 if (err) 1747 mlx5_core_err(dev, 1748 "mlx5_load_one failed with error code: %d\n" ,err); 1749 else 1750 mlx5_core_info(dev,"device recovered\n"); 1751} 1752 1753static const struct pci_error_handlers mlx5_err_handler = { 1754 .error_detected = mlx5_pci_err_detected, 1755 .slot_reset = mlx5_pci_slot_reset, 1756 .resume = mlx5_pci_resume 1757}; 1758 1759#ifdef PCI_IOV 1760static int 1761mlx5_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 1762{ 1763 struct pci_dev *pdev; 1764 struct mlx5_core_dev *core_dev; 1765 struct mlx5_priv *priv; 1766 int err; 1767 1768 pdev = device_get_softc(dev); 1769 core_dev = pci_get_drvdata(pdev); 1770 priv = &core_dev->priv; 1771 1772 if (priv->eswitch == NULL) 1773 return (ENXIO); 1774 if (priv->eswitch->total_vports < num_vfs + 1) 1775 num_vfs = priv->eswitch->total_vports - 1; 1776 err = mlx5_eswitch_enable_sriov(priv->eswitch, num_vfs); 1777 return (-err); 1778} 1779 1780static void 1781mlx5_iov_uninit(device_t dev) 1782{ 1783 struct pci_dev *pdev; 1784 struct mlx5_core_dev *core_dev; 1785 struct mlx5_priv *priv; 1786 1787 pdev = device_get_softc(dev); 1788 core_dev = pci_get_drvdata(pdev); 1789 priv = &core_dev->priv; 1790 1791 mlx5_eswitch_disable_sriov(priv->eswitch); 1792} 1793 1794static int 1795mlx5_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 1796{ 1797 struct pci_dev *pdev; 1798 struct mlx5_core_dev *core_dev; 1799 struct mlx5_priv *priv; 1800 const void *mac; 1801 size_t mac_size; 1802 int error; 1803 1804 pdev = device_get_softc(dev); 1805 core_dev = pci_get_drvdata(pdev); 1806 priv = &core_dev->priv; 1807 1808 if (vfnum + 1 >= priv->eswitch->total_vports) 1809 return (ENXIO); 1810 1811 if (nvlist_exists_binary(vf_config, iov_mac_addr_name)) { 1812 mac = nvlist_get_binary(vf_config, iov_mac_addr_name, 1813 &mac_size); 1814 error = -mlx5_eswitch_set_vport_mac(priv->eswitch, 1815 vfnum + 1, __DECONST(u8 *, mac)); 1816 } 1817 1818 error = -mlx5_eswitch_set_vport_state(priv->eswitch, vfnum + 1, 1819 VPORT_STATE_FOLLOW); 1820 if (error != 0) { 1821 mlx5_core_err(core_dev, 1822 "upping vport for VF %d failed, error %d\n", 1823 vfnum + 1, error); 1824 } 1825 error = -mlx5_core_enable_hca(core_dev, vfnum + 1); 1826 if (error != 0) { 1827 mlx5_core_err(core_dev, "enabling VF %d failed, error %d\n", 1828 vfnum + 1, error); 1829 } 1830 return (error); 1831} 1832#endif 1833 1834static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) 1835{ 1836 bool fast_teardown, force_teardown; 1837 int err; 1838 1839 if (!mlx5_fast_unload_enabled) { 1840 mlx5_core_dbg(dev, "fast unload is disabled by user\n"); 1841 return -EOPNOTSUPP; 1842 } 1843 1844 fast_teardown = MLX5_CAP_GEN(dev, fast_teardown); 1845 force_teardown = MLX5_CAP_GEN(dev, force_teardown); 1846 1847 mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown); 1848 mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown); 1849 1850 if (!fast_teardown && !force_teardown) 1851 return -EOPNOTSUPP; 1852 1853 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1854 mlx5_core_dbg(dev, "Device in internal error state, giving up\n"); 1855 return -EAGAIN; 1856 } 1857 1858 /* Panic tear down fw command will stop the PCI bus communication 1859 * with the HCA, so the health polll is no longer needed. 1860 */ 1861 mlx5_drain_health_wq(dev); 1862 mlx5_stop_health_poll(dev, false); 1863 1864 err = mlx5_cmd_fast_teardown_hca(dev); 1865 if (!err) 1866 goto done; 1867 1868 err = mlx5_cmd_force_teardown_hca(dev); 1869 if (!err) 1870 goto done; 1871 1872 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", err); 1873 mlx5_start_health_poll(dev); 1874 return err; 1875done: 1876 mlx5_enter_error_state(dev, true); 1877 return 0; 1878} 1879 1880static void mlx5_shutdown_disable_interrupts(struct mlx5_core_dev *mdev) 1881{ 1882 int nvec = mdev->priv.eq_table.num_comp_vectors + MLX5_EQ_VEC_COMP_BASE; 1883 int x; 1884 1885 mdev->priv.disable_irqs = 1; 1886 1887 /* wait for all IRQ handlers to finish processing */ 1888 for (x = 0; x != nvec; x++) 1889 synchronize_irq(mdev->priv.msix_arr[x].vector); 1890} 1891 1892static void shutdown_one(struct pci_dev *pdev) 1893{ 1894 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1895 struct mlx5_priv *priv = &dev->priv; 1896 int err; 1897 1898 /* enter polling mode */ 1899 mlx5_cmd_use_polling(dev); 1900 1901 set_bit(MLX5_INTERFACE_STATE_TEARDOWN, &dev->intf_state); 1902 1903 /* disable all interrupts */ 1904 mlx5_shutdown_disable_interrupts(dev); 1905 1906 err = mlx5_try_fast_unload(dev); 1907 if (err) 1908 mlx5_unload_one(dev, priv, false); 1909 mlx5_pci_disable_device(dev); 1910} 1911 1912static const struct pci_device_id mlx5_core_pci_table[] = { 1913 { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */ 1914 { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */ 1915 { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */ 1916 { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */ 1917 { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */ 1918 { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */ 1919 { PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5 */ 1920 { PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */ 1921 { PCI_VDEVICE(MELLANOX, 4121) }, 1922 { PCI_VDEVICE(MELLANOX, 4122) }, 1923 { PCI_VDEVICE(MELLANOX, 4123) }, 1924 { PCI_VDEVICE(MELLANOX, 4124) }, 1925 { PCI_VDEVICE(MELLANOX, 4125) }, 1926 { PCI_VDEVICE(MELLANOX, 4126) }, 1927 { PCI_VDEVICE(MELLANOX, 4127) }, 1928 { PCI_VDEVICE(MELLANOX, 4128) }, 1929 { PCI_VDEVICE(MELLANOX, 4129) }, 1930 { PCI_VDEVICE(MELLANOX, 4130) }, 1931 { PCI_VDEVICE(MELLANOX, 4131) }, 1932 { PCI_VDEVICE(MELLANOX, 4132) }, 1933 { PCI_VDEVICE(MELLANOX, 4133) }, 1934 { PCI_VDEVICE(MELLANOX, 4134) }, 1935 { PCI_VDEVICE(MELLANOX, 4135) }, 1936 { PCI_VDEVICE(MELLANOX, 4136) }, 1937 { PCI_VDEVICE(MELLANOX, 4137) }, 1938 { PCI_VDEVICE(MELLANOX, 4138) }, 1939 { PCI_VDEVICE(MELLANOX, 4139) }, 1940 { PCI_VDEVICE(MELLANOX, 4140) }, 1941 { PCI_VDEVICE(MELLANOX, 4141) }, 1942 { PCI_VDEVICE(MELLANOX, 4142) }, 1943 { PCI_VDEVICE(MELLANOX, 4143) }, 1944 { PCI_VDEVICE(MELLANOX, 4144) }, 1945 { 0, } 1946}; 1947 1948MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); 1949 1950void mlx5_disable_device(struct mlx5_core_dev *dev) 1951{ 1952 mlx5_pci_err_detected(dev->pdev, 0); 1953} 1954 1955void mlx5_recover_device(struct mlx5_core_dev *dev) 1956{ 1957 mlx5_pci_disable_device(dev); 1958 if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) 1959 mlx5_pci_resume(dev->pdev); 1960} 1961 1962struct pci_driver mlx5_core_driver = { 1963 .name = DRIVER_NAME, 1964 .id_table = mlx5_core_pci_table, 1965 .shutdown = shutdown_one, 1966 .probe = init_one, 1967 .remove = remove_one, 1968 .err_handler = &mlx5_err_handler, 1969#ifdef PCI_IOV 1970 .bsd_iov_init = mlx5_iov_init, 1971 .bsd_iov_uninit = mlx5_iov_uninit, 1972 .bsd_iov_add_vf = mlx5_iov_add_vf, 1973#endif 1974}; 1975 1976static int __init init(void) 1977{ 1978 int err; 1979 1980 err = pci_register_driver(&mlx5_core_driver); 1981 if (err) 1982 goto err_debug; 1983 1984 err = mlx5_ctl_init(); 1985 if (err) 1986 goto err_ctl; 1987 1988 return 0; 1989 1990err_ctl: 1991 pci_unregister_driver(&mlx5_core_driver); 1992 1993err_debug: 1994 return err; 1995} 1996 1997static void __exit cleanup(void) 1998{ 1999 mlx5_ctl_fini(); 2000 pci_unregister_driver(&mlx5_core_driver); 2001} 2002 2003module_init(init); 2004module_exit(cleanup); 2005