mlx5_main.c revision 353197
1228753Smm/*- 2228753Smm * Copyright (c) 2013-2019, Mellanox Technologies, Ltd. All rights reserved. 3228753Smm * 4228753Smm * Redistribution and use in source and binary forms, with or without 5228753Smm * modification, are permitted provided that the following conditions 6228753Smm * are met: 7228753Smm * 1. Redistributions of source code must retain the above copyright 8228753Smm * notice, this list of conditions and the following disclaimer. 9228753Smm * 2. Redistributions in binary form must reproduce the above copyright 10228753Smm * notice, this list of conditions and the following disclaimer in the 11228753Smm * documentation and/or other materials provided with the distribution. 12228753Smm * 13228753Smm * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14228753Smm * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15228753Smm * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16228753Smm * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17228753Smm * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18228753Smm * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19228753Smm * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20228753Smm * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21228753Smm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22228753Smm * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23228753Smm * SUCH DAMAGE. 24228753Smm * 25228753Smm * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_main.c 353197 2019-10-07 08:47:08Z hselasky $ 26228753Smm */ 27248616Smm 28238856Smm#include <linux/kmod.h> 29238856Smm#include <linux/module.h> 30238856Smm#include <linux/errno.h> 31232153Smm#include <linux/pci.h> 32232153Smm#include <linux/dma-mapping.h> 33232153Smm#include <linux/slab.h> 34228753Smm#include <linux/io-mapping.h> 35232153Smm#include <linux/interrupt.h> 36232153Smm#include <linux/hardirq.h> 37232153Smm#include <dev/mlx5/driver.h> 38238856Smm#include <dev/mlx5/cq.h> 39238856Smm#include <dev/mlx5/qp.h> 40238856Smm#include <dev/mlx5/srq.h> 41238856Smm#include <dev/mlx5/mpfs.h> 42238856Smm#include <linux/delay.h> 43238856Smm#include <dev/mlx5/mlx5_ifc.h> 44238856Smm#include <dev/mlx5/mlx5_fpga/core.h> 45238856Smm#include <dev/mlx5/mlx5_lib/mlx5.h> 46238856Smm#include "mlx5_core.h" 47238856Smm#include "fs_core.h" 48232153Smm 49228753Smmstatic const char mlx5_version[] = "Mellanox Core driver " 50232153Smm DRIVER_VERSION " (" DRIVER_RELDATE ")"; 51232153SmmMODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 52232153SmmMODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); 53228753SmmMODULE_LICENSE("Dual BSD/GPL"); 54228753SmmMODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1); 55228753SmmMODULE_DEPEND(mlx5, mlxfw, 1, 1, 1); 56228753SmmMODULE_DEPEND(mlx5, firmware, 1, 1, 1); 57228753SmmMODULE_VERSION(mlx5, 1); 58228753Smm 59228753SmmSYSCTL_NODE(_hw, OID_AUTO, mlx5, CTLFLAG_RW, 0, "mlx5 hardware controls"); 60228753Smm 61228753Smmint mlx5_core_debug_mask; 62228753SmmSYSCTL_INT(_hw_mlx5, OID_AUTO, debug_mask, CTLFLAG_RWTUN, 63228763Smm &mlx5_core_debug_mask, 0, 64228753Smm "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); 65228753Smm 66228753Smm#define MLX5_DEFAULT_PROF 2 67232153Smmstatic int mlx5_prof_sel = MLX5_DEFAULT_PROF; 68232153SmmSYSCTL_INT(_hw_mlx5, OID_AUTO, prof_sel, CTLFLAG_RWTUN, 69232153Smm &mlx5_prof_sel, 0, 70232153Smm "profile selector. Valid range 0 - 2"); 71228753Smm 72228753Smmstatic int mlx5_fast_unload_enabled = 1; 73228753SmmSYSCTL_INT(_hw_mlx5, OID_AUTO, fast_unload_enabled, CTLFLAG_RWTUN, 74228753Smm &mlx5_fast_unload_enabled, 0, 75228753Smm "Set to enable fast unload. Clear to disable."); 76228753Smm 77228753Smm#define NUMA_NO_NODE -1 78228753Smm 79228753Smmstatic LIST_HEAD(intf_list); 80228753Smmstatic LIST_HEAD(dev_list); 81228753Smmstatic DEFINE_MUTEX(intf_mutex); 82228753Smm 83228753Smmstruct mlx5_device_context { 84228753Smm struct list_head list; 85228753Smm struct mlx5_interface *intf; 86228753Smm void *context; 87228753Smm}; 88228753Smm 89228753Smmenum { 90228753Smm MLX5_ATOMIC_REQ_MODE_BE = 0x0, 91228753Smm MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1, 92228753Smm}; 93228753Smm 94228753Smmstatic struct mlx5_profile profiles[] = { 95248616Smm [0] = { 96228753Smm .mask = 0, 97228753Smm }, 98228753Smm [1] = { 99228753Smm .mask = MLX5_PROF_MASK_QP_SIZE, 100228753Smm .log_max_qp = 12, 101228753Smm }, 102228753Smm [2] = { 103228753Smm .mask = MLX5_PROF_MASK_QP_SIZE | 104228753Smm MLX5_PROF_MASK_MR_CACHE, 105228753Smm .log_max_qp = 17, 106228753Smm .mr_cache[0] = { 107228753Smm .size = 500, 108228753Smm .limit = 250 109228753Smm }, 110228753Smm .mr_cache[1] = { 111228753Smm .size = 500, 112228753Smm .limit = 250 113228753Smm }, 114228753Smm .mr_cache[2] = { 115228753Smm .size = 500, 116228753Smm .limit = 250 117228753Smm }, 118228753Smm .mr_cache[3] = { 119228753Smm .size = 500, 120228753Smm .limit = 250 121228753Smm }, 122228753Smm .mr_cache[4] = { 123228753Smm .size = 500, 124228753Smm .limit = 250 125228753Smm }, 126228753Smm .mr_cache[5] = { 127228753Smm .size = 500, 128228753Smm .limit = 250 129228753Smm }, 130228753Smm .mr_cache[6] = { 131228753Smm .size = 500, 132228753Smm .limit = 250 133228753Smm }, 134238856Smm .mr_cache[7] = { 135238856Smm .size = 500, 136238856Smm .limit = 250 137238856Smm }, 138238856Smm .mr_cache[8] = { 139238856Smm .size = 500, 140238856Smm .limit = 250 141238856Smm }, 142228753Smm .mr_cache[9] = { 143228753Smm .size = 500, 144228753Smm .limit = 250 145228753Smm }, 146228753Smm .mr_cache[10] = { 147228753Smm .size = 500, 148228753Smm .limit = 250 149228753Smm }, 150228753Smm .mr_cache[11] = { 151228753Smm .size = 500, 152228753Smm .limit = 250 153228753Smm }, 154228753Smm .mr_cache[12] = { 155228753Smm .size = 64, 156228753Smm .limit = 32 157228753Smm }, 158228753Smm .mr_cache[13] = { 159228753Smm .size = 32, 160228753Smm .limit = 16 161228753Smm }, 162228753Smm .mr_cache[14] = { 163228753Smm .size = 16, 164228753Smm .limit = 8 165228753Smm }, 166228753Smm }, 167228753Smm [3] = { 168228753Smm .mask = MLX5_PROF_MASK_QP_SIZE, 169228753Smm .log_max_qp = 17, 170228753Smm }, 171228753Smm}; 172228753Smm 173228753Smmstatic int set_dma_caps(struct pci_dev *pdev) 174228753Smm{ 175228753Smm int err; 176228753Smm 177228753Smm err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 178228753Smm if (err) { 179228753Smm device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit PCI DMA mask\n"); 180238856Smm err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 181228753Smm if (err) { 182228753Smm device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set PCI DMA mask, aborting\n"); 183228753Smm return err; 184228753Smm } 185228753Smm } 186228753Smm 187228753Smm err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 188232153Smm if (err) { 189228753Smm device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit consistent PCI DMA mask\n"); 190228753Smm err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 191228753Smm if (err) { 192228753Smm device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set consistent PCI DMA mask, aborting\n"); 193228753Smm return err; 194228753Smm } 195228753Smm } 196228753Smm 197228753Smm dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); 198228753Smm return err; 199228753Smm} 200228753Smm 201228753Smmint mlx5_pci_read_power_status(struct mlx5_core_dev *dev, 202228753Smm u16 *p_power, u8 *p_status) 203228753Smm{ 204228753Smm u32 in[MLX5_ST_SZ_DW(mpein_reg)] = {}; 205228753Smm u32 out[MLX5_ST_SZ_DW(mpein_reg)] = {}; 206228753Smm int err; 207228753Smm 208228753Smm err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), 209228753Smm MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MPEIN, 0, 0); 210228753Smm 211228753Smm *p_status = MLX5_GET(mpein_reg, out, pwr_status); 212228753Smm *p_power = MLX5_GET(mpein_reg, out, pci_power); 213228753Smm return err; 214228753Smm} 215228753Smm 216228753Smmstatic int mlx5_pci_enable_device(struct mlx5_core_dev *dev) 217232153Smm{ 218232153Smm struct pci_dev *pdev = dev->pdev; 219228753Smm int err = 0; 220228753Smm 221228753Smm mutex_lock(&dev->pci_status_mutex); 222228753Smm if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) { 223228753Smm err = pci_enable_device(pdev); 224228753Smm if (!err) 225228753Smm dev->pci_status = MLX5_PCI_STATUS_ENABLED; 226228753Smm } 227228753Smm mutex_unlock(&dev->pci_status_mutex); 228228753Smm 229228753Smm return err; 230228753Smm} 231228753Smm 232228753Smmstatic void mlx5_pci_disable_device(struct mlx5_core_dev *dev) 233228753Smm{ 234228753Smm struct pci_dev *pdev = dev->pdev; 235228753Smm 236228753Smm mutex_lock(&dev->pci_status_mutex); 237228753Smm if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) { 238228753Smm pci_disable_device(pdev); 239228753Smm dev->pci_status = MLX5_PCI_STATUS_DISABLED; 240228753Smm } 241228753Smm mutex_unlock(&dev->pci_status_mutex); 242228753Smm} 243228753Smm 244228753Smmstatic int request_bar(struct pci_dev *pdev) 245228753Smm{ 246228753Smm int err = 0; 247228753Smm 248228753Smm if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 249228753Smm device_printf((&pdev->dev)->bsddev, "ERR: ""Missing registers BAR, aborting\n"); 250228753Smm return -ENODEV; 251228753Smm } 252228753Smm 253228753Smm err = pci_request_regions(pdev, DRIVER_NAME); 254228753Smm if (err) 255228753Smm device_printf((&pdev->dev)->bsddev, "ERR: ""Couldn't get PCI resources, aborting\n"); 256228753Smm 257228753Smm return err; 258228753Smm} 259228753Smm 260228753Smmstatic void release_bar(struct pci_dev *pdev) 261228753Smm{ 262228753Smm pci_release_regions(pdev); 263228753Smm} 264228753Smm 265228753Smmstatic int mlx5_enable_msix(struct mlx5_core_dev *dev) 266228753Smm{ 267228753Smm struct mlx5_priv *priv = &dev->priv; 268228753Smm struct mlx5_eq_table *table = &priv->eq_table; 269228753Smm int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); 270228753Smm int limit = dev->msix_eqvec; 271228753Smm int nvec = MLX5_EQ_VEC_COMP_BASE; 272228753Smm int i; 273228753Smm 274228753Smm if (limit > 0) 275228753Smm nvec += limit; 276232153Smm else 277232153Smm nvec += MLX5_CAP_GEN(dev, num_ports) * num_online_cpus(); 278232153Smm 279232153Smm if (nvec > num_eqs) 280232153Smm nvec = num_eqs; 281232153Smm if (nvec > 256) 282232153Smm nvec = 256; /* limit of firmware API */ 283232153Smm if (nvec <= MLX5_EQ_VEC_COMP_BASE) 284228753Smm return -ENOMEM; 285228753Smm 286228753Smm priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL); 287228753Smm 288228753Smm priv->irq_info = kzalloc(nvec * sizeof(*priv->irq_info), GFP_KERNEL); 289228753Smm 290228753Smm for (i = 0; i < nvec; i++) 291228753Smm priv->msix_arr[i].entry = i; 292228753Smm 293228753Smm nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr, 294228753Smm MLX5_EQ_VEC_COMP_BASE + 1, nvec); 295232153Smm if (nvec < 0) 296228753Smm return nvec; 297232153Smm 298232153Smm table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; 299232153Smm 300228753Smm return 0; 301232153Smm 302232153Smm} 303228753Smm 304228753Smmstatic void mlx5_disable_msix(struct mlx5_core_dev *dev) 305228753Smm{ 306228753Smm struct mlx5_priv *priv = &dev->priv; 307228753Smm 308228753Smm pci_disable_msix(dev->pdev); 309228753Smm kfree(priv->irq_info); 310228753Smm kfree(priv->msix_arr); 311228753Smm} 312228753Smm 313228753Smmstruct mlx5_reg_host_endianess { 314228753Smm u8 he; 315228753Smm u8 rsvd[15]; 316228753Smm}; 317228753Smm 318228753Smm 319228753Smm#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) 320228753Smm 321228753Smmenum { 322228753Smm MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | 323228753Smm MLX5_DEV_CAP_FLAG_DCT | 324228753Smm MLX5_DEV_CAP_FLAG_DRAIN_SIGERR, 325228753Smm}; 326228753Smm 327228753Smmstatic u16 to_fw_pkey_sz(u32 size) 328228753Smm{ 329232153Smm switch (size) { 330228753Smm case 128: 331228753Smm return 0; 332228753Smm case 256: 333228753Smm return 1; 334228753Smm case 512: 335228753Smm return 2; 336228753Smm case 1024: 337228753Smm return 3; 338228753Smm case 2048: 339232153Smm return 4; 340228753Smm case 4096: 341228753Smm return 5; 342228753Smm default: 343228753Smm printf("mlx5_core: WARN: ""invalid pkey table size %d\n", size); 344228753Smm return 0; 345228753Smm } 346228753Smm} 347228753Smm 348232153Smmstatic int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, 349228753Smm enum mlx5_cap_type cap_type, 350228753Smm enum mlx5_cap_mode cap_mode) 351228753Smm{ 352228753Smm u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; 353228753Smm int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 354228753Smm void *out, *hca_caps; 355228753Smm u16 opmod = (cap_type << 1) | (cap_mode & 0x01); 356228753Smm int err; 357228753Smm 358228753Smm memset(in, 0, sizeof(in)); 359228753Smm out = kzalloc(out_sz, GFP_KERNEL); 360228753Smm 361228753Smm MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 362228753Smm MLX5_SET(query_hca_cap_in, in, op_mod, opmod); 363228753Smm err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); 364228753Smm if (err) { 365228753Smm mlx5_core_warn(dev, 366228753Smm "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n", 367228753Smm cap_type, cap_mode, err); 368228753Smm goto query_ex; 369228753Smm } 370228753Smm 371228753Smm hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 372228753Smm 373228753Smm switch (cap_mode) { 374228753Smm case HCA_CAP_OPMOD_GET_MAX: 375228753Smm memcpy(dev->hca_caps_max[cap_type], hca_caps, 376228753Smm MLX5_UN_SZ_BYTES(hca_cap_union)); 377228753Smm break; 378228753Smm case HCA_CAP_OPMOD_GET_CUR: 379228753Smm memcpy(dev->hca_caps_cur[cap_type], hca_caps, 380228753Smm MLX5_UN_SZ_BYTES(hca_cap_union)); 381228753Smm break; 382228753Smm default: 383228753Smm mlx5_core_warn(dev, 384232153Smm "Tried to query dev cap type(%x) with wrong opmode(%x)\n", 385232153Smm cap_type, cap_mode); 386228753Smm err = -EINVAL; 387232153Smm break; 388228753Smm } 389228753Smmquery_ex: 390228753Smm kfree(out); 391228753Smm return err; 392228753Smm} 393232153Smm 394228753Smmint mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type) 395228753Smm{ 396228753Smm int ret; 397228753Smm 398228753Smm ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR); 399228753Smm if (ret) 400228753Smm return ret; 401228753Smm 402228753Smm return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX); 403228753Smm} 404228753Smm 405228753Smmstatic int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) 406228753Smm{ 407232153Smm u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0}; 408232153Smm 409232153Smm MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); 410228753Smm 411232153Smm return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); 412228753Smm} 413228753Smm 414228753Smmstatic int handle_hca_cap(struct mlx5_core_dev *dev) 415232153Smm{ 416228753Smm void *set_ctx = NULL; 417228753Smm struct mlx5_profile *prof = dev->profile; 418228753Smm int err = -ENOMEM; 419228753Smm int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); 420228753Smm void *set_hca_cap; 421228753Smm 422228753Smm set_ctx = kzalloc(set_sz, GFP_KERNEL); 423228753Smm 424228753Smm err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); 425228753Smm if (err) 426228753Smm goto query_ex; 427228753Smm 428228753Smm set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, 429228753Smm capability); 430228753Smm memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], 431228753Smm MLX5_ST_SZ_BYTES(cmd_hca_cap)); 432228753Smm 433228753Smm mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", 434228753Smm mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)), 435228753Smm 128); 436228753Smm /* we limit the size of the pkey table to 128 entries for now */ 437228753Smm MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, 438228753Smm to_fw_pkey_sz(128)); 439228753Smm 440228753Smm if (prof->mask & MLX5_PROF_MASK_QP_SIZE) 441228753Smm MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, 442228753Smm prof->log_max_qp); 443228753Smm 444228753Smm /* disable cmdif checksum */ 445228753Smm MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); 446228753Smm 447228753Smm /* enable drain sigerr */ 448228753Smm MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1); 449228753Smm 450228753Smm MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); 451228753Smm 452228753Smm err = set_caps(dev, set_ctx, set_sz); 453228753Smm 454228753Smmquery_ex: 455228753Smm kfree(set_ctx); 456228753Smm return err; 457228753Smm} 458228753Smm 459228753Smmstatic int handle_hca_cap_atomic(struct mlx5_core_dev *dev) 460228753Smm{ 461228753Smm void *set_ctx; 462228753Smm void *set_hca_cap; 463228753Smm int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); 464228753Smm int req_endianness; 465228753Smm int err; 466232153Smm 467232153Smm if (MLX5_CAP_GEN(dev, atomic)) { 468232153Smm err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); 469232153Smm if (err) 470232153Smm return err; 471228753Smm } else { 472232153Smm return 0; 473232153Smm } 474232153Smm 475232153Smm req_endianness = 476232153Smm MLX5_CAP_ATOMIC(dev, 477232153Smm supported_atomic_req_8B_endianess_mode_1); 478232153Smm 479232153Smm if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS) 480232153Smm return 0; 481232153Smm 482232153Smm set_ctx = kzalloc(set_sz, GFP_KERNEL); 483232153Smm if (!set_ctx) 484232153Smm return -ENOMEM; 485232153Smm 486232153Smm MLX5_SET(set_hca_cap_in, set_ctx, op_mod, 487232153Smm MLX5_SET_HCA_CAP_OP_MOD_ATOMIC << 1); 488232153Smm set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); 489232153Smm 490232153Smm /* Set requestor to host endianness */ 491232153Smm MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode, 492232153Smm MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS); 493232153Smm 494232153Smm err = set_caps(dev, set_ctx, set_sz); 495232153Smm 496232153Smm kfree(set_ctx); 497232153Smm return err; 498232153Smm} 499232153Smm 500232153Smmstatic int set_hca_ctrl(struct mlx5_core_dev *dev) 501232153Smm{ 502232153Smm struct mlx5_reg_host_endianess he_in; 503232153Smm struct mlx5_reg_host_endianess he_out; 504232153Smm int err; 505232153Smm 506232153Smm if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && 507232153Smm !MLX5_CAP_GEN(dev, roce)) 508232153Smm return 0; 509232153Smm 510232153Smm memset(&he_in, 0, sizeof(he_in)); 511232153Smm he_in.he = MLX5_SET_HOST_ENDIANNESS; 512232153Smm err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in), 513232153Smm &he_out, sizeof(he_out), 514232153Smm MLX5_REG_HOST_ENDIANNESS, 0, 1); 515232153Smm return err; 516232153Smm} 517232153Smm 518232153Smmstatic int mlx5_core_enable_hca(struct mlx5_core_dev *dev) 519232153Smm{ 520232153Smm u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0}; 521232153Smm u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0}; 522232153Smm 523232153Smm MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); 524232153Smm return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 525232153Smm} 526232153Smm 527232153Smmstatic int mlx5_core_disable_hca(struct mlx5_core_dev *dev) 528232153Smm{ 529232153Smm u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0}; 530232153Smm u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0}; 531232153Smm 532232153Smm MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); 533232153Smm return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 534232153Smm} 535232153Smm 536232153Smmstatic int mlx5_core_set_issi(struct mlx5_core_dev *dev) 537232153Smm{ 538232153Smm u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0}; 539232153Smm u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0}; 540232153Smm u32 sup_issi; 541232153Smm int err; 542232153Smm 543232153Smm MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); 544232153Smm 545232153Smm err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), query_out, sizeof(query_out)); 546232153Smm if (err) { 547232153Smm u32 syndrome; 548232153Smm u8 status; 549232153Smm 550232153Smm mlx5_cmd_mbox_status(query_out, &status, &syndrome); 551232153Smm if (status == MLX5_CMD_STAT_BAD_OP_ERR) { 552232153Smm pr_debug("Only ISSI 0 is supported\n"); 553232153Smm return 0; 554232153Smm } 555232153Smm 556232153Smm printf("mlx5_core: ERR: ""failed to query ISSI\n"); 557228753Smm return err; 558228753Smm } 559232153Smm 560228753Smm sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); 561232153Smm 562228753Smm if (sup_issi & (1 << 1)) { 563228753Smm u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0}; 564228753Smm u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0}; 565228753Smm 566228753Smm MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI); 567228753Smm MLX5_SET(set_issi_in, set_in, current_issi, 1); 568228753Smm 569228753Smm err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out)); 570228753Smm if (err) { 571228753Smm printf("mlx5_core: ERR: ""failed to set ISSI=1 err(%d)\n", err); 572228753Smm return err; 573228753Smm } 574228753Smm 575228753Smm dev->issi = 1; 576228753Smm 577228753Smm return 0; 578228753Smm } else if (sup_issi & (1 << 0)) { 579228753Smm return 0; 580232153Smm } 581232153Smm 582232153Smm return -ENOTSUPP; 583232153Smm} 584232153Smm 585232153Smm 586232153Smmint mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) 587232153Smm{ 588232153Smm struct mlx5_eq_table *table = &dev->priv.eq_table; 589232153Smm struct mlx5_eq *eq; 590232153Smm int err = -ENOENT; 591232153Smm 592232153Smm spin_lock(&table->lock); 593232153Smm list_for_each_entry(eq, &table->comp_eqs_list, list) { 594232153Smm if (eq->index == vector) { 595232153Smm *eqn = eq->eqn; 596232153Smm *irqn = eq->irqn; 597232153Smm err = 0; 598232153Smm break; 599232153Smm } 600232153Smm } 601232153Smm spin_unlock(&table->lock); 602232153Smm 603232153Smm return err; 604232153Smm} 605232153SmmEXPORT_SYMBOL(mlx5_vector2eqn); 606232153Smm 607232153Smmint mlx5_rename_eq(struct mlx5_core_dev *dev, int eq_ix, char *name) 608232153Smm{ 609232153Smm struct mlx5_priv *priv = &dev->priv; 610232153Smm struct mlx5_eq_table *table = &priv->eq_table; 611228753Smm struct mlx5_eq *eq; 612228753Smm int err = -ENOENT; 613228753Smm 614228753Smm spin_lock(&table->lock); 615228753Smm list_for_each_entry(eq, &table->comp_eqs_list, list) { 616228753Smm if (eq->index == eq_ix) { 617228753Smm int irq_ix = eq_ix + MLX5_EQ_VEC_COMP_BASE; 618232153Smm 619228753Smm snprintf(priv->irq_info[irq_ix].name, MLX5_MAX_IRQ_NAME, 620232153Smm "%s-%d", name, eq_ix); 621232153Smm 622228753Smm err = 0; 623228753Smm break; 624228753Smm } 625228753Smm } 626248616Smm spin_unlock(&table->lock); 627248616Smm 628232153Smm return err; 629232153Smm} 630232153Smm 631232153Smmstatic void free_comp_eqs(struct mlx5_core_dev *dev) 632228753Smm{ 633228753Smm struct mlx5_eq_table *table = &dev->priv.eq_table; 634228753Smm struct mlx5_eq *eq, *n; 635228753Smm 636228753Smm spin_lock(&table->lock); 637228753Smm list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { 638228753Smm list_del(&eq->list); 639228753Smm spin_unlock(&table->lock); 640228753Smm if (mlx5_destroy_unmap_eq(dev, eq)) 641228753Smm mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n", 642228753Smm eq->eqn); 643228753Smm kfree(eq); 644228753Smm spin_lock(&table->lock); 645228753Smm } 646228753Smm spin_unlock(&table->lock); 647228753Smm} 648228753Smm 649228753Smmstatic int alloc_comp_eqs(struct mlx5_core_dev *dev) 650228753Smm{ 651228753Smm struct mlx5_eq_table *table = &dev->priv.eq_table; 652228753Smm char name[MLX5_MAX_IRQ_NAME]; 653228753Smm struct mlx5_eq *eq; 654228753Smm int ncomp_vec; 655228753Smm int nent; 656228753Smm int err; 657228753Smm int i; 658228753Smm 659228753Smm INIT_LIST_HEAD(&table->comp_eqs_list); 660228753Smm ncomp_vec = table->num_comp_vectors; 661228753Smm nent = MLX5_COMP_EQ_SIZE; 662228753Smm for (i = 0; i < ncomp_vec; i++) { 663228753Smm eq = kzalloc(sizeof(*eq), GFP_KERNEL); 664228753Smm 665228753Smm snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); 666228753Smm err = mlx5_create_map_eq(dev, eq, 667228753Smm i + MLX5_EQ_VEC_COMP_BASE, nent, 0, 668228753Smm name, &dev->priv.uuari.uars[0]); 669228753Smm if (err) { 670228753Smm kfree(eq); 671228753Smm goto clean; 672228753Smm } 673228753Smm mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn); 674228753Smm eq->index = i; 675228753Smm spin_lock(&table->lock); 676228753Smm list_add_tail(&eq->list, &table->comp_eqs_list); 677228753Smm spin_unlock(&table->lock); 678228753Smm } 679228753Smm 680228753Smm return 0; 681228753Smm 682228753Smmclean: 683232153Smm free_comp_eqs(dev); 684228753Smm return err; 685232153Smm} 686232153Smm 687228753Smmstatic int map_bf_area(struct mlx5_core_dev *dev) 688228753Smm{ 689228753Smm resource_size_t bf_start = pci_resource_start(dev->pdev, 0); 690228753Smm resource_size_t bf_len = pci_resource_len(dev->pdev, 0); 691228753Smm 692228753Smm dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len); 693228753Smm 694228753Smm return dev->priv.bf_mapping ? 0 : -ENOMEM; 695228753Smm} 696228753Smm 697228753Smmstatic void unmap_bf_area(struct mlx5_core_dev *dev) 698228753Smm{ 699228753Smm if (dev->priv.bf_mapping) 700228753Smm io_mapping_free(dev->priv.bf_mapping); 701228753Smm} 702228753Smm 703228753Smmstatic inline int fw_initializing(struct mlx5_core_dev *dev) 704228753Smm{ 705228753Smm return ioread32be(&dev->iseg->initializing) >> 31; 706228753Smm} 707228753Smm 708228753Smmstatic int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) 709228753Smm{ 710228753Smm u64 end = jiffies + msecs_to_jiffies(max_wait_mili); 711228753Smm int err = 0; 712228753Smm 713228753Smm while (fw_initializing(dev)) { 714228753Smm if (time_after(jiffies, end)) { 715228753Smm err = -EBUSY; 716228753Smm break; 717228753Smm } 718228753Smm msleep(FW_INIT_WAIT_MS); 719228753Smm } 720228753Smm 721228753Smm return err; 722228753Smm} 723228753Smm 724228753Smmstatic void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 725228753Smm{ 726228753Smm struct mlx5_device_context *dev_ctx; 727228753Smm struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 728228753Smm 729228753Smm dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL); 730228753Smm if (!dev_ctx) 731228753Smm return; 732228753Smm 733228753Smm dev_ctx->intf = intf; 734228753Smm CURVNET_SET_QUIET(vnet0); 735228753Smm dev_ctx->context = intf->add(dev); 736228753Smm CURVNET_RESTORE(); 737228753Smm 738228753Smm if (dev_ctx->context) { 739228753Smm spin_lock_irq(&priv->ctx_lock); 740228753Smm list_add_tail(&dev_ctx->list, &priv->ctx_list); 741228753Smm spin_unlock_irq(&priv->ctx_lock); 742228753Smm } else { 743228753Smm kfree(dev_ctx); 744228753Smm } 745228753Smm} 746228753Smm 747228753Smmstatic void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 748228753Smm{ 749228753Smm struct mlx5_device_context *dev_ctx; 750248616Smm struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 751248616Smm 752228753Smm list_for_each_entry(dev_ctx, &priv->ctx_list, list) 753228753Smm if (dev_ctx->intf == intf) { 754228753Smm spin_lock_irq(&priv->ctx_lock); 755228753Smm list_del(&dev_ctx->list); 756228753Smm spin_unlock_irq(&priv->ctx_lock); 757228753Smm 758228753Smm intf->remove(dev, dev_ctx->context); 759228753Smm kfree(dev_ctx); 760228753Smm return; 761228753Smm } 762228753Smm} 763228753Smm 764228753Smmint 765228753Smmmlx5_register_device(struct mlx5_core_dev *dev) 766232153Smm{ 767228753Smm struct mlx5_priv *priv = &dev->priv; 768232153Smm struct mlx5_interface *intf; 769228753Smm 770228753Smm mutex_lock(&intf_mutex); 771228753Smm list_add_tail(&priv->dev_list, &dev_list); 772228753Smm list_for_each_entry(intf, &intf_list, list) 773228753Smm mlx5_add_device(intf, priv); 774228753Smm mutex_unlock(&intf_mutex); 775228753Smm 776232153Smm return 0; 777228753Smm} 778228753Smm 779228753Smmvoid 780228753Smmmlx5_unregister_device(struct mlx5_core_dev *dev) 781228753Smm{ 782228753Smm struct mlx5_priv *priv = &dev->priv; 783232153Smm struct mlx5_interface *intf; 784228753Smm 785228753Smm mutex_lock(&intf_mutex); 786232153Smm list_for_each_entry(intf, &intf_list, list) 787228753Smm mlx5_remove_device(intf, priv); 788228753Smm list_del(&priv->dev_list); 789228753Smm mutex_unlock(&intf_mutex); 790228753Smm} 791228753Smm 792228753Smmint mlx5_register_interface(struct mlx5_interface *intf) 793232153Smm{ 794228753Smm struct mlx5_priv *priv; 795228753Smm 796228753Smm if (!intf->add || !intf->remove) 797228753Smm return -EINVAL; 798228753Smm 799228753Smm mutex_lock(&intf_mutex); 800228753Smm list_add_tail(&intf->list, &intf_list); 801228753Smm list_for_each_entry(priv, &dev_list, dev_list) 802228753Smm mlx5_add_device(intf, priv); 803228753Smm mutex_unlock(&intf_mutex); 804228753Smm 805228753Smm return 0; 806228753Smm} 807228753SmmEXPORT_SYMBOL(mlx5_register_interface); 808228753Smm 809228753Smmvoid mlx5_unregister_interface(struct mlx5_interface *intf) 810228753Smm{ 811228753Smm struct mlx5_priv *priv; 812232153Smm 813228753Smm mutex_lock(&intf_mutex); 814228753Smm list_for_each_entry(priv, &dev_list, dev_list) 815228753Smm mlx5_remove_device(intf, priv); 816232153Smm list_del(&intf->list); 817228753Smm mutex_unlock(&intf_mutex); 818228753Smm} 819232153SmmEXPORT_SYMBOL(mlx5_unregister_interface); 820228753Smm 821228753Smmvoid *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) 822228753Smm{ 823228753Smm struct mlx5_priv *priv = &mdev->priv; 824232153Smm struct mlx5_device_context *dev_ctx; 825228753Smm unsigned long flags; 826228753Smm void *result = NULL; 827228753Smm 828228753Smm spin_lock_irqsave(&priv->ctx_lock, flags); 829228753Smm 830228753Smm list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list) 831228753Smm if ((dev_ctx->intf->protocol == protocol) && 832228753Smm dev_ctx->intf->get_dev) { 833228753Smm result = dev_ctx->intf->get_dev(dev_ctx->context); 834232153Smm break; 835228753Smm } 836228753Smm 837228753Smm spin_unlock_irqrestore(&priv->ctx_lock, flags); 838228753Smm 839228753Smm return result; 840228753Smm} 841232153SmmEXPORT_SYMBOL(mlx5_get_protocol_dev); 842228753Smm 843228753Smmstatic int mlx5_auto_fw_update; 844228753SmmSYSCTL_INT(_hw_mlx5, OID_AUTO, auto_fw_update, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 845248616Smm &mlx5_auto_fw_update, 0, 846248616Smm "Allow automatic firmware update on driver start"); 847248616Smmstatic int 848248616Smmmlx5_firmware_update(struct mlx5_core_dev *dev) 849248616Smm{ 850228753Smm const struct firmware *fw; 851248616Smm int err; 852248616Smm 853228753Smm TUNABLE_INT_FETCH("hw.mlx5.auto_fw_update", &mlx5_auto_fw_update); 854228753Smm if (!mlx5_auto_fw_update) 855228753Smm return (0); 856228753Smm fw = firmware_get("mlx5fw_mfa"); 857228753Smm if (fw) { 858228753Smm err = mlx5_firmware_flash(dev, fw); 859228753Smm firmware_put(fw, FIRMWARE_UNLOAD); 860228753Smm } 861228753Smm else 862228753Smm return (-ENOENT); 863228753Smm 864228753Smm return err; 865232153Smm} 866228753Smm 867228753Smmstatic int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 868232153Smm{ 869228753Smm struct pci_dev *pdev = dev->pdev; 870228753Smm int err = 0; 871228753Smm 872228753Smm pci_set_drvdata(dev->pdev, dev); 873228753Smm strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); 874232153Smm priv->name[MLX5_MAX_NAME_LEN - 1] = 0; 875228753Smm 876232153Smm mutex_init(&priv->pgdir_mutex); 877228753Smm INIT_LIST_HEAD(&priv->pgdir_list); 878228753Smm spin_lock_init(&priv->mkey_lock); 879228753Smm 880228753Smm priv->numa_node = NUMA_NO_NODE; 881228753Smm 882228753Smm err = mlx5_pci_enable_device(dev); 883228753Smm if (err) { 884228753Smm device_printf((&pdev->dev)->bsddev, "ERR: ""Cannot enable PCI device, aborting\n"); 885232153Smm goto err_dbg; 886232153Smm } 887228753Smm 888228753Smm err = request_bar(pdev); 889228753Smm if (err) { 890228753Smm device_printf((&pdev->dev)->bsddev, "ERR: ""error requesting BARs, aborting\n"); 891228753Smm goto err_disable; 892232153Smm } 893228753Smm 894232153Smm pci_set_master(pdev); 895228753Smm 896228753Smm err = set_dma_caps(pdev); 897228753Smm if (err) { 898228753Smm device_printf((&pdev->dev)->bsddev, "ERR: ""Failed setting DMA capabilities mask, aborting\n"); 899228753Smm goto err_clr_master; 900228753Smm } 901228753Smm 902228753Smm dev->iseg_base = pci_resource_start(dev->pdev, 0); 903232153Smm dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); 904232153Smm if (!dev->iseg) { 905228753Smm err = -ENOMEM; 906228753Smm device_printf((&pdev->dev)->bsddev, "ERR: ""Failed mapping initialization segment, aborting\n"); 907228753Smm goto err_clr_master; 908228753Smm } 909228753Smm 910232153Smm return 0; 911228753Smm 912228753Smmerr_clr_master: 913228753Smm release_bar(dev->pdev); 914228753Smmerr_disable: 915228753Smm mlx5_pci_disable_device(dev); 916232153Smmerr_dbg: 917228753Smm return err; 918228753Smm} 919228753Smm 920232153Smmstatic void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 921228753Smm{ 922232153Smm iounmap(dev->iseg); 923228753Smm release_bar(dev->pdev); 924228753Smm mlx5_pci_disable_device(dev); 925228753Smm} 926248616Smm 927228753Smmstatic int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 928228753Smm{ 929228753Smm struct pci_dev *pdev = dev->pdev; 930228753Smm int err; 931228753Smm 932232153Smm err = mlx5_vsc_find_cap(dev); 933228753Smm if (err) 934228753Smm dev_err(&pdev->dev, "Unable to find vendor specific capabilities\n"); 935228753Smm 936228753Smm err = mlx5_query_hca_caps(dev); 937228753Smm if (err) { 938228753Smm dev_err(&pdev->dev, "query hca failed\n"); 939228753Smm goto out; 940232153Smm } 941228753Smm 942228753Smm err = mlx5_query_board_id(dev); 943228753Smm if (err) { 944228753Smm dev_err(&pdev->dev, "query board id failed\n"); 945228753Smm goto out; 946228753Smm } 947232153Smm 948228753Smm err = mlx5_eq_init(dev); 949228753Smm if (err) { 950228753Smm dev_err(&pdev->dev, "failed to initialize eq\n"); 951228753Smm goto out; 952228753Smm } 953228753Smm 954232153Smm MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); 955228753Smm 956232153Smm err = mlx5_init_cq_table(dev); 957232153Smm if (err) { 958232153Smm dev_err(&pdev->dev, "failed to initialize cq table\n"); 959232153Smm goto err_eq_cleanup; 960232153Smm } 961232153Smm 962248616Smm mlx5_init_qp_table(dev); 963228753Smm mlx5_init_srq_table(dev); 964248616Smm mlx5_init_mr_table(dev); 965228753Smm 966228753Smm mlx5_init_reserved_gids(dev); 967228753Smm mlx5_fpga_init(dev); 968228753Smm 969228753Smm return 0; 970228753Smm 971228753Smmerr_eq_cleanup: 972228753Smm mlx5_eq_cleanup(dev); 973228753Smm 974228753Smmout: 975228753Smm return err; 976228753Smm} 977228753Smm 978228753Smmstatic void mlx5_cleanup_once(struct mlx5_core_dev *dev) 979228753Smm{ 980228753Smm mlx5_fpga_cleanup(dev); 981228753Smm mlx5_cleanup_reserved_gids(dev); 982228753Smm mlx5_cleanup_mr_table(dev); 983228753Smm mlx5_cleanup_srq_table(dev); 984228753Smm mlx5_cleanup_qp_table(dev); 985228753Smm mlx5_cleanup_cq_table(dev); 986228753Smm mlx5_eq_cleanup(dev); 987228753Smm} 988228753Smm 989232153Smmstatic int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, 990228753Smm bool boot) 991232153Smm{ 992228753Smm struct pci_dev *pdev = dev->pdev; 993232153Smm int err; 994232153Smm 995232153Smm mutex_lock(&dev->intf_state_mutex); 996228753Smm if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 997228753Smm dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", 998228753Smm __func__); 999232153Smm goto out; 1000228753Smm } 1001228753Smm 1002228753Smm device_printf((&pdev->dev)->bsddev, "INFO: ""firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); 1003228753Smm 1004228753Smm /* 1005228753Smm * On load removing any previous indication of internal error, 1006228753Smm * device is up 1007228753Smm */ 1008228753Smm dev->state = MLX5_DEVICE_STATE_UP; 1009228753Smm 1010228753Smm err = mlx5_cmd_init(dev); 1011228753Smm if (err) { 1012228753Smm device_printf((&pdev->dev)->bsddev, "ERR: ""Failed initializing command interface, aborting\n"); 1013228753Smm goto out_err; 1014228753Smm } 1015248616Smm 1016248616Smm err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI); 1017228753Smm if (err) { 1018228753Smm device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Firmware over %d MS in initializing state, aborting\n", FW_INIT_TIMEOUT_MILI); 1019228753Smm goto err_cmd_cleanup; 1020228753Smm } 1021228753Smm 1022228753Smm err = mlx5_core_enable_hca(dev); 1023228753Smm if (err) { 1024228753Smm device_printf((&pdev->dev)->bsddev, "ERR: ""enable hca failed\n"); 1025228753Smm goto err_cmd_cleanup; 1026228753Smm } 1027228753Smm 1028228753Smm err = mlx5_core_set_issi(dev); 1029248616Smm if (err) { 1030248616Smm device_printf((&pdev->dev)->bsddev, "ERR: ""failed to set issi\n"); 1031228753Smm goto err_disable_hca; 1032228753Smm } 1033228753Smm 1034248616Smm err = mlx5_pagealloc_start(dev); 1035248616Smm if (err) { 1036248616Smm device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_pagealloc_start failed\n"); 1037248616Smm goto err_disable_hca; 1038248616Smm } 1039248616Smm 1040248616Smm err = mlx5_satisfy_startup_pages(dev, 1); 1041248616Smm if (err) { 1042248616Smm device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate boot pages\n"); 1043248616Smm goto err_pagealloc_stop; 1044248616Smm } 1045228753Smm 1046228753Smm err = set_hca_ctrl(dev); 1047232153Smm if (err) { 1048228753Smm device_printf((&pdev->dev)->bsddev, "ERR: ""set_hca_ctrl failed\n"); 1049228753Smm goto reclaim_boot_pages; 1050228753Smm } 1051228753Smm 1052228753Smm err = handle_hca_cap(dev); 1053228753Smm if (err) { 1054228753Smm device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap failed\n"); 1055228753Smm goto reclaim_boot_pages; 1056248616Smm } 1057248616Smm 1058248616Smm err = handle_hca_cap_atomic(dev); 1059248616Smm if (err) { 1060248616Smm device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap_atomic failed\n"); 1061248616Smm goto reclaim_boot_pages; 1062248616Smm } 1063228753Smm 1064248616Smm err = mlx5_satisfy_startup_pages(dev, 0); 1065248616Smm if (err) { 1066248616Smm device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate init pages\n"); 1067248616Smm goto reclaim_boot_pages; 1068248616Smm } 1069248616Smm 1070228753Smm err = mlx5_cmd_init_hca(dev); 1071228753Smm if (err) { 1072232153Smm device_printf((&pdev->dev)->bsddev, "ERR: ""init hca failed\n"); 1073228753Smm goto reclaim_boot_pages; 1074228753Smm } 1075228753Smm 1076228753Smm mlx5_start_health_poll(dev); 1077228753Smm 1078228753Smm if (boot && mlx5_init_once(dev, priv)) { 1079228753Smm dev_err(&pdev->dev, "sw objs init failed\n"); 1080228753Smm goto err_stop_poll; 1081228753Smm } 1082228753Smm 1083228753Smm err = mlx5_enable_msix(dev); 1084228753Smm if (err) { 1085228753Smm device_printf((&pdev->dev)->bsddev, "ERR: ""enable msix failed\n"); 1086228753Smm goto err_cleanup_once; 1087228753Smm } 1088232153Smm 1089228753Smm err = mlx5_alloc_uuars(dev, &priv->uuari); 1090228753Smm if (err) { 1091228753Smm device_printf((&pdev->dev)->bsddev, "ERR: ""Failed allocating uar, aborting\n"); 1092228753Smm goto err_disable_msix; 1093228753Smm } 1094228753Smm 1095228753Smm err = mlx5_start_eqs(dev); 1096228753Smm if (err) { 1097228753Smm device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to start pages and async EQs\n"); 1098228753Smm goto err_free_uar; 1099228753Smm } 1100228753Smm 1101228753Smm err = alloc_comp_eqs(dev); 1102228753Smm if (err) { 1103228753Smm device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to alloc completion EQs\n"); 1104228753Smm goto err_stop_eqs; 1105228753Smm } 1106228753Smm 1107228753Smm if (map_bf_area(dev)) 1108228753Smm device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to map blue flame area\n"); 1109228753Smm 1110228753Smm err = mlx5_init_fs(dev); 1111228753Smm if (err) { 1112228753Smm mlx5_core_err(dev, "flow steering init %d\n", err); 1113228753Smm goto err_free_comp_eqs; 1114228753Smm } 1115228753Smm 1116228753Smm err = mlx5_mpfs_init(dev); 1117228753Smm if (err) { 1118228753Smm mlx5_core_err(dev, "mpfs init failed %d\n", err); 1119228753Smm goto err_fs; 1120228753Smm } 1121228753Smm 1122228753Smm err = mlx5_fpga_device_start(dev); 1123228753Smm if (err) { 1124228753Smm dev_err(&pdev->dev, "fpga device start failed %d\n", err); 1125228753Smm goto err_mpfs; 1126228753Smm } 1127228753Smm 1128228753Smm err = mlx5_register_device(dev); 1129228753Smm if (err) { 1130228753Smm dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); 1131228753Smm goto err_fpga; 1132228753Smm } 1133228753Smm 1134228753Smm set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1135228753Smm 1136228753Smmout: 1137228753Smm mutex_unlock(&dev->intf_state_mutex); 1138228753Smm return 0; 1139228753Smm 1140228753Smmerr_fpga: 1141228753Smm mlx5_fpga_device_stop(dev); 1142228753Smm 1143228753Smmerr_mpfs: 1144228753Smm mlx5_mpfs_destroy(dev); 1145228753Smm 1146228753Smmerr_fs: 1147228753Smm mlx5_cleanup_fs(dev); 1148228753Smm 1149228753Smmerr_free_comp_eqs: 1150228753Smm free_comp_eqs(dev); 1151228753Smm unmap_bf_area(dev); 1152228753Smm 1153228753Smmerr_stop_eqs: 1154228753Smm mlx5_stop_eqs(dev); 1155228753Smm 1156228753Smmerr_free_uar: 1157228753Smm mlx5_free_uuars(dev, &priv->uuari); 1158228753Smm 1159228753Smmerr_disable_msix: 1160228753Smm mlx5_disable_msix(dev); 1161228753Smm 1162228753Smmerr_cleanup_once: 1163228753Smm if (boot) 1164228753Smm mlx5_cleanup_once(dev); 1165228753Smm 1166228753Smmerr_stop_poll: 1167228753Smm mlx5_stop_health_poll(dev, boot); 1168228753Smm if (mlx5_cmd_teardown_hca(dev)) { 1169228753Smm device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n"); 1170228753Smm goto out_err; 1171228753Smm } 1172228753Smm 1173228753Smmreclaim_boot_pages: 1174228753Smm mlx5_reclaim_startup_pages(dev); 1175228753Smm 1176228753Smmerr_pagealloc_stop: 1177228753Smm mlx5_pagealloc_stop(dev); 1178228753Smm 1179228753Smmerr_disable_hca: 1180228753Smm mlx5_core_disable_hca(dev); 1181228753Smm 1182228753Smmerr_cmd_cleanup: 1183228753Smm mlx5_cmd_cleanup(dev); 1184228753Smm 1185228753Smmout_err: 1186228753Smm dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 1187228753Smm mutex_unlock(&dev->intf_state_mutex); 1188228753Smm 1189228753Smm return err; 1190228753Smm} 1191228753Smm 1192228753Smmstatic int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, 1193228753Smm bool cleanup) 1194228753Smm{ 1195228753Smm int err = 0; 1196228753Smm 1197228753Smm if (cleanup) 1198228753Smm mlx5_drain_health_recovery(dev); 1199228753Smm 1200228753Smm mutex_lock(&dev->intf_state_mutex); 1201228753Smm if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 1202228753Smm dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", __func__); 1203248616Smm if (cleanup) 1204228753Smm mlx5_cleanup_once(dev); 1205228753Smm goto out; 1206248616Smm } 1207248616Smm 1208228753Smm mlx5_unregister_device(dev); 1209228753Smm 1210232153Smm mlx5_fpga_device_stop(dev); 1211232153Smm mlx5_mpfs_destroy(dev); 1212232153Smm mlx5_cleanup_fs(dev); 1213228753Smm unmap_bf_area(dev); 1214232153Smm mlx5_wait_for_reclaim_vfs_pages(dev); 1215228753Smm free_comp_eqs(dev); 1216228753Smm mlx5_stop_eqs(dev); 1217228753Smm mlx5_free_uuars(dev, &priv->uuari); 1218228753Smm mlx5_disable_msix(dev); 1219228753Smm if (cleanup) 1220228753Smm mlx5_cleanup_once(dev); 1221228753Smm mlx5_stop_health_poll(dev, cleanup); 1222248616Smm err = mlx5_cmd_teardown_hca(dev); 1223248616Smm if (err) { 1224248616Smm device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n"); 1225228753Smm goto out; 1226228753Smm } 1227228753Smm mlx5_pagealloc_stop(dev); 1228228753Smm mlx5_reclaim_startup_pages(dev); 1229228753Smm mlx5_core_disable_hca(dev); 1230228753Smm mlx5_cmd_cleanup(dev); 1231228753Smm 1232248616Smmout: 1233248616Smm clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1234228753Smm mutex_unlock(&dev->intf_state_mutex); 1235228753Smm return err; 1236228753Smm} 1237228753Smm 1238228753Smmvoid mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, 1239228753Smm unsigned long param) 1240228753Smm{ 1241228753Smm struct mlx5_priv *priv = &dev->priv; 1242228753Smm struct mlx5_device_context *dev_ctx; 1243228753Smm unsigned long flags; 1244228753Smm 1245228753Smm spin_lock_irqsave(&priv->ctx_lock, flags); 1246228753Smm 1247228753Smm list_for_each_entry(dev_ctx, &priv->ctx_list, list) 1248228753Smm if (dev_ctx->intf->event) 1249228753Smm dev_ctx->intf->event(dev, dev_ctx->context, event, param); 1250228753Smm 1251228753Smm spin_unlock_irqrestore(&priv->ctx_lock, flags); 1252228753Smm} 1253228753Smm 1254228753Smmstruct mlx5_core_event_handler { 1255228753Smm void (*event)(struct mlx5_core_dev *dev, 1256228753Smm enum mlx5_dev_event event, 1257228753Smm void *data); 1258228753Smm}; 1259228753Smm 1260228753Smmstatic int init_one(struct pci_dev *pdev, 1261228753Smm const struct pci_device_id *id) 1262228753Smm{ 1263228753Smm struct mlx5_core_dev *dev; 1264228753Smm struct mlx5_priv *priv; 1265228753Smm device_t bsddev = pdev->dev.bsddev; 1266228753Smm int err; 1267228753Smm 1268228753Smm dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1269228753Smm priv = &dev->priv; 1270228753Smm if (id) 1271228753Smm priv->pci_dev_data = id->driver_data; 1272228753Smm 1273228753Smm if (mlx5_prof_sel < 0 || mlx5_prof_sel >= ARRAY_SIZE(profiles)) { 1274228753Smm device_printf(bsddev, "WARN: selected profile out of range, selecting default (%d)\n", MLX5_DEFAULT_PROF); 1275228753Smm mlx5_prof_sel = MLX5_DEFAULT_PROF; 1276228753Smm } 1277228753Smm dev->profile = &profiles[mlx5_prof_sel]; 1278228753Smm dev->pdev = pdev; 1279232153Smm dev->event = mlx5_core_event; 1280228753Smm 1281228753Smm /* Set desc */ 1282228753Smm device_set_desc(bsddev, mlx5_version); 1283228753Smm 1284228753Smm sysctl_ctx_init(&dev->sysctl_ctx); 1285228753Smm SYSCTL_ADD_INT(&dev->sysctl_ctx, 1286232153Smm SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1287228753Smm OID_AUTO, "msix_eqvec", CTLFLAG_RDTUN, &dev->msix_eqvec, 0, 1288228753Smm "Maximum number of MSIX event queue vectors, if set"); 1289228753Smm SYSCTL_ADD_INT(&dev->sysctl_ctx, 1290228753Smm SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1291228753Smm OID_AUTO, "power_status", CTLFLAG_RD, &dev->pwr_status, 0, 1292228753Smm "0:Invalid 1:Sufficient 2:Insufficient"); 1293228753Smm SYSCTL_ADD_INT(&dev->sysctl_ctx, 1294228753Smm SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1295228753Smm OID_AUTO, "power_value", CTLFLAG_RD, &dev->pwr_value, 0, 1296228753Smm "Current power value in Watts"); 1297228753Smm 1298228753Smm INIT_LIST_HEAD(&priv->ctx_list); 1299228753Smm spin_lock_init(&priv->ctx_lock); 1300228753Smm mutex_init(&dev->pci_status_mutex); 1301228753Smm mutex_init(&dev->intf_state_mutex); 1302228753Smm mtx_init(&dev->dump_lock, "mlx5dmp", NULL, MTX_DEF | MTX_NEW); 1303228753Smm err = mlx5_pci_init(dev, priv); 1304228753Smm if (err) { 1305228753Smm device_printf(bsddev, "ERR: mlx5_pci_init failed %d\n", err); 1306228753Smm goto clean_dev; 1307228753Smm } 1308228753Smm 1309228753Smm err = mlx5_health_init(dev); 1310228753Smm if (err) { 1311228753Smm device_printf(bsddev, "ERR: mlx5_health_init failed %d\n", err); 1312228753Smm goto close_pci; 1313228753Smm } 1314228753Smm 1315228753Smm mlx5_pagealloc_init(dev); 1316228753Smm 1317228753Smm err = mlx5_load_one(dev, priv, true); 1318228753Smm if (err) { 1319228753Smm device_printf(bsddev, "ERR: mlx5_load_one failed %d\n", err); 1320228753Smm goto clean_health; 1321228753Smm } 1322228753Smm 1323228753Smm mlx5_fwdump_prep(dev); 1324228753Smm 1325228753Smm mlx5_firmware_update(dev); 1326228753Smm 1327228753Smm pci_save_state(bsddev); 1328228753Smm return 0; 1329228753Smm 1330228753Smmclean_health: 1331228753Smm mlx5_pagealloc_cleanup(dev); 1332228753Smm mlx5_health_cleanup(dev); 1333228753Smmclose_pci: 1334228753Smm mlx5_pci_close(dev, priv); 1335228753Smmclean_dev: 1336228753Smm sysctl_ctx_free(&dev->sysctl_ctx); 1337228753Smm mtx_destroy(&dev->dump_lock); 1338228753Smm kfree(dev); 1339228753Smm return err; 1340228753Smm} 1341228753Smm 1342228753Smmstatic void remove_one(struct pci_dev *pdev) 1343228753Smm{ 1344228753Smm struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1345228753Smm struct mlx5_priv *priv = &dev->priv; 1346228753Smm 1347228753Smm if (mlx5_unload_one(dev, priv, true)) { 1348228753Smm dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n"); 1349228753Smm mlx5_health_cleanup(dev); 1350228753Smm return; 1351228753Smm } 1352228753Smm 1353228753Smm mlx5_pagealloc_cleanup(dev); 1354228753Smm mlx5_health_cleanup(dev); 1355228753Smm mlx5_fwdump_clean(dev); 1356228753Smm mlx5_pci_close(dev, priv); 1357228753Smm mtx_destroy(&dev->dump_lock); 1358228753Smm pci_set_drvdata(pdev, NULL); 1359228753Smm sysctl_ctx_free(&dev->sysctl_ctx); 1360228753Smm kfree(dev); 1361228753Smm} 1362228753Smm 1363232153Smmstatic pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, 1364228753Smm pci_channel_state_t state) 1365228753Smm{ 1366228753Smm struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1367228753Smm struct mlx5_priv *priv = &dev->priv; 1368228753Smm 1369228753Smm dev_info(&pdev->dev, "%s was called\n", __func__); 1370228753Smm mlx5_enter_error_state(dev, false); 1371228753Smm mlx5_unload_one(dev, priv, false); 1372228753Smm 1373228753Smm if (state) { 1374228753Smm mlx5_drain_health_wq(dev); 1375228753Smm mlx5_pci_disable_device(dev); 1376228753Smm } 1377228753Smm 1378228753Smm return state == pci_channel_io_perm_failure ? 1379228753Smm PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 1380228753Smm} 1381228753Smm 1382228753Smmstatic pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) 1383228753Smm{ 1384228753Smm struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1385228753Smm int err = 0; 1386228753Smm 1387228753Smm dev_info(&pdev->dev, "%s was called\n", __func__); 1388228753Smm 1389228753Smm err = mlx5_pci_enable_device(dev); 1390228753Smm if (err) { 1391228753Smm dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n" 1392228753Smm , __func__, err); 1393228753Smm return PCI_ERS_RESULT_DISCONNECT; 1394228753Smm } 1395228753Smm pci_set_master(pdev); 1396228753Smm pci_set_powerstate(pdev->dev.bsddev, PCI_POWERSTATE_D0); 1397228753Smm pci_restore_state(pdev->dev.bsddev); 1398228753Smm pci_save_state(pdev->dev.bsddev); 1399228753Smm 1400228753Smm return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 1401228753Smm} 1402228753Smm 1403228753Smm/* wait for the device to show vital signs. For now we check 1404228753Smm * that we can read the device ID and that the health buffer 1405228753Smm * shows a non zero value which is different than 0xffffffff 1406228753Smm */ 1407228753Smmstatic void wait_vital(struct pci_dev *pdev) 1408228753Smm{ 1409228753Smm struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1410228753Smm struct mlx5_core_health *health = &dev->priv.health; 1411228753Smm const int niter = 100; 1412228753Smm u32 count; 1413228753Smm u16 did; 1414228753Smm int i; 1415228753Smm 1416228753Smm /* Wait for firmware to be ready after reset */ 1417228753Smm msleep(1000); 1418228753Smm for (i = 0; i < niter; i++) { 1419228753Smm if (pci_read_config_word(pdev, 2, &did)) { 1420228753Smm dev_warn(&pdev->dev, "failed reading config word\n"); 1421228753Smm break; 1422228753Smm } 1423228753Smm if (did == pdev->device) { 1424228753Smm dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i); 1425228753Smm break; 1426228753Smm } 1427232153Smm msleep(50); 1428228753Smm } 1429228753Smm if (i == niter) 1430228753Smm dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__); 1431228753Smm 1432228753Smm for (i = 0; i < niter; i++) { 1433228753Smm count = ioread32be(health->health_counter); 1434228753Smm if (count && count != 0xffffffff) { 1435228753Smm dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i); 1436228753Smm break; 1437228753Smm } 1438228753Smm msleep(50); 1439228753Smm } 1440228753Smm 1441228753Smm if (i == niter) 1442228753Smm dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__); 1443228753Smm} 1444228753Smm 1445228753Smmstatic void mlx5_pci_resume(struct pci_dev *pdev) 1446228753Smm{ 1447228753Smm struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1448228753Smm struct mlx5_priv *priv = &dev->priv; 1449228753Smm int err; 1450228753Smm 1451228753Smm dev_info(&pdev->dev, "%s was called\n", __func__); 1452228753Smm 1453228753Smm wait_vital(pdev); 1454228753Smm 1455228753Smm err = mlx5_load_one(dev, priv, false); 1456228753Smm if (err) 1457228753Smm dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n" 1458228753Smm , __func__, err); 1459228753Smm else 1460232153Smm dev_info(&pdev->dev, "%s: device recovered\n", __func__); 1461228753Smm} 1462228753Smm 1463228753Smmstatic const struct pci_error_handlers mlx5_err_handler = { 1464228753Smm .error_detected = mlx5_pci_err_detected, 1465228753Smm .slot_reset = mlx5_pci_slot_reset, 1466228753Smm .resume = mlx5_pci_resume 1467228753Smm}; 1468228753Smm 1469228753Smmstatic int mlx5_try_fast_unload(struct mlx5_core_dev *dev) 1470228753Smm{ 1471228753Smm bool fast_teardown, force_teardown; 1472228753Smm int err; 1473228753Smm 1474228753Smm if (!mlx5_fast_unload_enabled) { 1475228753Smm mlx5_core_dbg(dev, "fast unload is disabled by user\n"); 1476228753Smm return -EOPNOTSUPP; 1477228753Smm } 1478228753Smm 1479228753Smm fast_teardown = MLX5_CAP_GEN(dev, fast_teardown); 1480228753Smm force_teardown = MLX5_CAP_GEN(dev, force_teardown); 1481228753Smm 1482228753Smm mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown); 1483228753Smm mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown); 1484228753Smm 1485228753Smm if (!fast_teardown && !force_teardown) 1486228753Smm return -EOPNOTSUPP; 1487228753Smm 1488228753Smm if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1489228753Smm mlx5_core_dbg(dev, "Device in internal error state, giving up\n"); 1490228753Smm return -EAGAIN; 1491228753Smm } 1492228753Smm 1493228753Smm /* Panic tear down fw command will stop the PCI bus communication 1494228753Smm * with the HCA, so the health polll is no longer needed. 1495228753Smm */ 1496228753Smm mlx5_drain_health_wq(dev); 1497228753Smm mlx5_stop_health_poll(dev, false); 1498228753Smm 1499228753Smm err = mlx5_cmd_fast_teardown_hca(dev); 1500228753Smm if (!err) 1501228753Smm goto done; 1502228753Smm 1503228753Smm err = mlx5_cmd_force_teardown_hca(dev); 1504228753Smm if (!err) 1505228753Smm goto done; 1506228753Smm 1507228753Smm mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", err); 1508228753Smm mlx5_start_health_poll(dev); 1509228753Smm return err; 1510228753Smmdone: 1511228753Smm mlx5_enter_error_state(dev, true); 1512228753Smm return 0; 1513228753Smm} 1514228753Smm 1515228753Smmstatic void mlx5_disable_interrupts(struct mlx5_core_dev *mdev) 1516228753Smm{ 1517228753Smm int nvec = mdev->priv.eq_table.num_comp_vectors + MLX5_EQ_VEC_COMP_BASE; 1518228753Smm int x; 1519228753Smm 1520228753Smm mdev->priv.disable_irqs = 1; 1521228753Smm 1522228753Smm /* wait for all IRQ handlers to finish processing */ 1523228753Smm for (x = 0; x != nvec; x++) 1524228753Smm synchronize_irq(mdev->priv.msix_arr[x].vector); 1525228753Smm} 1526228753Smm 1527228753Smmstatic void shutdown_one(struct pci_dev *pdev) 1528228753Smm{ 1529228753Smm struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1530228753Smm struct mlx5_priv *priv = &dev->priv; 1531228753Smm int err; 1532228753Smm 1533228753Smm /* enter polling mode */ 1534228753Smm mlx5_cmd_use_polling(dev); 1535228753Smm 1536228753Smm /* disable all interrupts */ 1537228753Smm mlx5_disable_interrupts(dev); 1538228753Smm 1539228753Smm err = mlx5_try_fast_unload(dev); 1540228753Smm if (err) 1541228753Smm mlx5_unload_one(dev, priv, false); 1542228753Smm mlx5_pci_disable_device(dev); 1543228753Smm} 1544228753Smm 1545228753Smmstatic const struct pci_device_id mlx5_core_pci_table[] = { 1546228753Smm { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */ 1547228753Smm { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */ 1548228753Smm { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */ 1549228753Smm { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */ 1550228753Smm { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */ 1551228753Smm { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */ 1552228753Smm { PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5 */ 1553228753Smm { PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */ 1554238856Smm { PCI_VDEVICE(MELLANOX, 4121) }, 1555228753Smm { PCI_VDEVICE(MELLANOX, 4122) }, 1556228753Smm { PCI_VDEVICE(MELLANOX, 4123) }, 1557228753Smm { PCI_VDEVICE(MELLANOX, 4124) }, 1558228753Smm { PCI_VDEVICE(MELLANOX, 4125) }, 1559228753Smm { PCI_VDEVICE(MELLANOX, 4126) }, 1560228753Smm { PCI_VDEVICE(MELLANOX, 4127) }, 1561228753Smm { PCI_VDEVICE(MELLANOX, 4128) }, 1562228753Smm { PCI_VDEVICE(MELLANOX, 4129) }, 1563228753Smm { PCI_VDEVICE(MELLANOX, 4130) }, 1564228753Smm { PCI_VDEVICE(MELLANOX, 4131) }, 1565228753Smm { PCI_VDEVICE(MELLANOX, 4132) }, 1566228753Smm { PCI_VDEVICE(MELLANOX, 4133) }, 1567228753Smm { PCI_VDEVICE(MELLANOX, 4134) }, 1568238856Smm { PCI_VDEVICE(MELLANOX, 4135) }, 1569238856Smm { PCI_VDEVICE(MELLANOX, 4136) }, 1570238856Smm { PCI_VDEVICE(MELLANOX, 4137) }, 1571238856Smm { PCI_VDEVICE(MELLANOX, 4138) }, 1572238856Smm { PCI_VDEVICE(MELLANOX, 4139) }, 1573238856Smm { PCI_VDEVICE(MELLANOX, 4140) }, 1574238856Smm { PCI_VDEVICE(MELLANOX, 4141) }, 1575228753Smm { PCI_VDEVICE(MELLANOX, 4142) }, 1576228753Smm { PCI_VDEVICE(MELLANOX, 4143) }, 1577228753Smm { PCI_VDEVICE(MELLANOX, 4144) }, 1578228753Smm { 0, } 1579228753Smm}; 1580228753Smm 1581228753SmmMODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); 1582228753Smm 1583228753Smmvoid mlx5_disable_device(struct mlx5_core_dev *dev) 1584228753Smm{ 1585228753Smm mlx5_pci_err_detected(dev->pdev, 0); 1586228753Smm} 1587228753Smm 1588228753Smmvoid mlx5_recover_device(struct mlx5_core_dev *dev) 1589228753Smm{ 1590228753Smm mlx5_pci_disable_device(dev); 1591228753Smm if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) 1592228753Smm mlx5_pci_resume(dev->pdev); 1593228753Smm} 1594238856Smm 1595238856Smmstruct pci_driver mlx5_core_driver = { 1596238856Smm .name = DRIVER_NAME, 1597238856Smm .id_table = mlx5_core_pci_table, 1598238856Smm .shutdown = shutdown_one, 1599238856Smm .probe = init_one, 1600238856Smm .remove = remove_one, 1601228753Smm .err_handler = &mlx5_err_handler 1602238856Smm}; 1603238856Smm 1604228753Smmstatic int __init init(void) 1605228753Smm{ 1606228753Smm int err; 1607228753Smm 1608228753Smm err = pci_register_driver(&mlx5_core_driver); 1609228753Smm if (err) 1610228753Smm goto err_debug; 1611228753Smm 1612228753Smm err = mlx5_ctl_init(); 1613228753Smm if (err) 1614228753Smm goto err_ctl; 1615228753Smm 1616228753Smm return 0; 1617228753Smm 1618228753Smmerr_ctl: 1619228753Smm pci_unregister_driver(&mlx5_core_driver); 1620228753Smm 1621228753Smmerr_debug: 1622228753Smm return err; 1623228753Smm} 1624228753Smm 1625228753Smmstatic void __exit cleanup(void) 1626228753Smm{ 1627228753Smm mlx5_ctl_fini(); 1628228753Smm pci_unregister_driver(&mlx5_core_driver); 1629228753Smm} 1630228753Smm 1631228753Smmmodule_init(init); 1632228753Smmmodule_exit(cleanup); 1633228753Smm