1/* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36#include <linux/module.h> 37#include <linux/init.h> 38#include <linux/errno.h> 39#include <linux/pci.h> 40#include <linux/dma-mapping.h> 41#include <linux/slab.h> 42 43#include <linux/mlx4/device.h> 44#include <linux/mlx4/doorbell.h> 45 46#include "mlx4.h" 47#include "fw.h" 48#include "icm.h" 49 50MODULE_AUTHOR("Roland Dreier"); 51MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 52MODULE_LICENSE("Dual BSD/GPL"); 53MODULE_VERSION(DRV_VERSION); 54 55struct workqueue_struct *mlx4_wq; 56 57#ifdef CONFIG_MLX4_DEBUG 58 59int mlx4_debug_level = 0; 60module_param_named(debug_level, mlx4_debug_level, int, 0644); 61MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 62 63#endif /* CONFIG_MLX4_DEBUG */ 64 65#ifdef CONFIG_PCI_MSI 66 67static int msi_x = 1; 68module_param(msi_x, int, 0444); 69MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 70 71#else /* CONFIG_PCI_MSI */ 72 73#define msi_x (0) 74 75#endif /* CONFIG_PCI_MSI */ 76 77static char mlx4_version[] __devinitdata = 78 DRV_NAME ": Mellanox ConnectX core driver v" 79 DRV_VERSION " (" DRV_RELDATE ")\n"; 80 81static struct mlx4_profile default_profile = { 82 .num_qp = 1 << 17, 83 .num_srq = 1 << 16, 84 .rdmarc_per_qp = 1 << 4, 85 .num_cq = 1 << 16, 86 .num_mcg = 1 << 13, 87 .num_mpt = 1 << 17, 88 .num_mtt = 1 << 20, 89}; 90 91static int log_num_mac = 2; 92module_param_named(log_num_mac, log_num_mac, int, 0444); 93MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 94 95static int log_num_vlan; 96module_param_named(log_num_vlan, log_num_vlan, int, 0444); 97MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 98 99static int use_prio; 100module_param_named(use_prio, use_prio, bool, 0444); 101MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " 102 "(0/1, default 0)"); 103 104static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 105module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 106MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)"); 107 108int mlx4_check_port_params(struct mlx4_dev *dev, 109 enum mlx4_port_type *port_type) 110{ 111 int i; 112 113 for (i = 0; i < dev->caps.num_ports - 1; i++) { 114 if (port_type[i] != port_type[i + 1]) { 115 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 116 mlx4_err(dev, "Only same port types supported " 117 "on this HCA, aborting.\n"); 118 return -EINVAL; 119 } 120 if (port_type[i] == MLX4_PORT_TYPE_ETH && 121 port_type[i + 1] == MLX4_PORT_TYPE_IB) 122 return -EINVAL; 123 } 124 } 125 126 for (i = 0; i < dev->caps.num_ports; i++) { 127 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 128 mlx4_err(dev, "Requested port type for port %d is not " 129 "supported on this HCA\n", i + 1); 130 return -EINVAL; 131 } 132 } 133 return 0; 134} 135 136static void mlx4_set_port_mask(struct mlx4_dev *dev) 137{ 138 int i; 139 140 dev->caps.port_mask = 0; 141 for (i = 1; i <= dev->caps.num_ports; ++i) 142 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB) 143 dev->caps.port_mask |= 1 << (i - 1); 144} 145static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 146{ 147 int err; 148 int i; 149 150 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 151 if (err) { 152 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 153 return err; 154 } 155 156 if (dev_cap->min_page_sz > PAGE_SIZE) { 157 mlx4_err(dev, "HCA minimum page size of %d bigger than " 158 "kernel PAGE_SIZE of %ld, aborting.\n", 159 dev_cap->min_page_sz, PAGE_SIZE); 160 return -ENODEV; 161 } 162 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 163 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 164 "aborting.\n", 165 dev_cap->num_ports, MLX4_MAX_PORTS); 166 return -ENODEV; 167 } 168 169 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { 170 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " 171 "PCI resource 2 size of 0x%llx, aborting.\n", 172 dev_cap->uar_size, 173 (unsigned long long) pci_resource_len(dev->pdev, 2)); 174 return -ENODEV; 175 } 176 177 dev->caps.num_ports = dev_cap->num_ports; 178 for (i = 1; i <= dev->caps.num_ports; ++i) { 179 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 180 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; 181 dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; 182 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; 183 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; 184 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i]; 185 dev->caps.def_mac[i] = dev_cap->def_mac[i]; 186 dev->caps.supported_type[i] = dev_cap->supported_port_types[i]; 187 } 188 189 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 190 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 191 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 192 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 193 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 194 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 195 dev->caps.max_wqes = dev_cap->max_qp_sz; 196 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 197 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 198 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 199 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 200 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 201 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 202 dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM; 203 /* 204 * Subtract 1 from the limit because we need to allocate a 205 * spare CQE so the HCA HW can tell the difference between an 206 * empty CQ and a full CQ. 207 */ 208 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 209 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 210 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 211 dev->caps.mtts_per_seg = 1 << log_mtts_per_seg; 212 dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, 213 dev->caps.mtts_per_seg); 214 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 215 dev->caps.reserved_uars = dev_cap->reserved_uars; 216 dev->caps.reserved_pds = dev_cap->reserved_pds; 217 dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; 218 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 219 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 220 dev->caps.flags = dev_cap->flags; 221 dev->caps.bmme_flags = dev_cap->bmme_flags; 222 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 223 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 224 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 225 226 dev->caps.log_num_macs = log_num_mac; 227 dev->caps.log_num_vlans = log_num_vlan; 228 dev->caps.log_num_prios = use_prio ? 3 : 0; 229 230 for (i = 1; i <= dev->caps.num_ports; ++i) { 231 if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH) 232 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 233 else 234 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 235 dev->caps.possible_type[i] = dev->caps.port_type[i]; 236 mlx4_priv(dev)->sense.sense_allowed[i] = 237 dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO; 238 239 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { 240 dev->caps.log_num_macs = dev_cap->log_max_macs[i]; 241 mlx4_warn(dev, "Requested number of MACs is too much " 242 "for port %d, reducing to %d.\n", 243 i, 1 << dev->caps.log_num_macs); 244 } 245 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { 246 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; 247 mlx4_warn(dev, "Requested number of VLANs is too much " 248 "for port %d, reducing to %d.\n", 249 i, 1 << dev->caps.log_num_vlans); 250 } 251 } 252 253 mlx4_set_port_mask(dev); 254 255 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 256 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 257 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 258 (1 << dev->caps.log_num_macs) * 259 (1 << dev->caps.log_num_vlans) * 260 (1 << dev->caps.log_num_prios) * 261 dev->caps.num_ports; 262 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 263 264 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 265 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 266 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 267 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 268 269 return 0; 270} 271 272/* 273 * Change the port configuration of the device. 274 * Every user of this function must hold the port mutex. 275 */ 276int mlx4_change_port_types(struct mlx4_dev *dev, 277 enum mlx4_port_type *port_types) 278{ 279 int err = 0; 280 int change = 0; 281 int port; 282 283 for (port = 0; port < dev->caps.num_ports; port++) { 284 /* Change the port type only if the new type is different 285 * from the current, and not set to Auto */ 286 if (port_types[port] != dev->caps.port_type[port + 1]) { 287 change = 1; 288 dev->caps.port_type[port + 1] = port_types[port]; 289 } 290 } 291 if (change) { 292 mlx4_unregister_device(dev); 293 for (port = 1; port <= dev->caps.num_ports; port++) { 294 mlx4_CLOSE_PORT(dev, port); 295 err = mlx4_SET_PORT(dev, port); 296 if (err) { 297 mlx4_err(dev, "Failed to set port %d, " 298 "aborting\n", port); 299 goto out; 300 } 301 } 302 mlx4_set_port_mask(dev); 303 err = mlx4_register_device(dev); 304 } 305 306out: 307 return err; 308} 309 310static ssize_t show_port_type(struct device *dev, 311 struct device_attribute *attr, 312 char *buf) 313{ 314 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 315 port_attr); 316 struct mlx4_dev *mdev = info->dev; 317 char type[8]; 318 319 sprintf(type, "%s", 320 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 321 "ib" : "eth"); 322 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 323 sprintf(buf, "auto (%s)\n", type); 324 else 325 sprintf(buf, "%s\n", type); 326 327 return strlen(buf); 328} 329 330static ssize_t set_port_type(struct device *dev, 331 struct device_attribute *attr, 332 const char *buf, size_t count) 333{ 334 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 335 port_attr); 336 struct mlx4_dev *mdev = info->dev; 337 struct mlx4_priv *priv = mlx4_priv(mdev); 338 enum mlx4_port_type types[MLX4_MAX_PORTS]; 339 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 340 int i; 341 int err = 0; 342 343 if (!strcmp(buf, "ib\n")) 344 info->tmp_type = MLX4_PORT_TYPE_IB; 345 else if (!strcmp(buf, "eth\n")) 346 info->tmp_type = MLX4_PORT_TYPE_ETH; 347 else if (!strcmp(buf, "auto\n")) 348 info->tmp_type = MLX4_PORT_TYPE_AUTO; 349 else { 350 mlx4_err(mdev, "%s is not supported port type\n", buf); 351 return -EINVAL; 352 } 353 354 mlx4_stop_sense(mdev); 355 mutex_lock(&priv->port_mutex); 356 /* Possible type is always the one that was delivered */ 357 mdev->caps.possible_type[info->port] = info->tmp_type; 358 359 for (i = 0; i < mdev->caps.num_ports; i++) { 360 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 361 mdev->caps.possible_type[i+1]; 362 if (types[i] == MLX4_PORT_TYPE_AUTO) 363 types[i] = mdev->caps.port_type[i+1]; 364 } 365 366 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 367 for (i = 1; i <= mdev->caps.num_ports; i++) { 368 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 369 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 370 err = -EINVAL; 371 } 372 } 373 } 374 if (err) { 375 mlx4_err(mdev, "Auto sensing is not supported on this HCA. " 376 "Set only 'eth' or 'ib' for both ports " 377 "(should be the same)\n"); 378 goto out; 379 } 380 381 mlx4_do_sense_ports(mdev, new_types, types); 382 383 err = mlx4_check_port_params(mdev, new_types); 384 if (err) 385 goto out; 386 387 /* We are about to apply the changes after the configuration 388 * was verified, no need to remember the temporary types 389 * any more */ 390 for (i = 0; i < mdev->caps.num_ports; i++) 391 priv->port[i + 1].tmp_type = 0; 392 393 err = mlx4_change_port_types(mdev, new_types); 394 395out: 396 mlx4_start_sense(mdev); 397 mutex_unlock(&priv->port_mutex); 398 return err ? err : count; 399} 400 401static int mlx4_load_fw(struct mlx4_dev *dev) 402{ 403 struct mlx4_priv *priv = mlx4_priv(dev); 404 int err; 405 406 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 407 GFP_HIGHUSER | __GFP_NOWARN, 0); 408 if (!priv->fw.fw_icm) { 409 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); 410 return -ENOMEM; 411 } 412 413 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 414 if (err) { 415 mlx4_err(dev, "MAP_FA command failed, aborting.\n"); 416 goto err_free; 417 } 418 419 err = mlx4_RUN_FW(dev); 420 if (err) { 421 mlx4_err(dev, "RUN_FW command failed, aborting.\n"); 422 goto err_unmap_fa; 423 } 424 425 return 0; 426 427err_unmap_fa: 428 mlx4_UNMAP_FA(dev); 429 430err_free: 431 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 432 return err; 433} 434 435static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 436 int cmpt_entry_sz) 437{ 438 struct mlx4_priv *priv = mlx4_priv(dev); 439 int err; 440 441 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 442 cmpt_base + 443 ((u64) (MLX4_CMPT_TYPE_QP * 444 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 445 cmpt_entry_sz, dev->caps.num_qps, 446 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 447 0, 0); 448 if (err) 449 goto err; 450 451 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 452 cmpt_base + 453 ((u64) (MLX4_CMPT_TYPE_SRQ * 454 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 455 cmpt_entry_sz, dev->caps.num_srqs, 456 dev->caps.reserved_srqs, 0, 0); 457 if (err) 458 goto err_qp; 459 460 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 461 cmpt_base + 462 ((u64) (MLX4_CMPT_TYPE_CQ * 463 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 464 cmpt_entry_sz, dev->caps.num_cqs, 465 dev->caps.reserved_cqs, 0, 0); 466 if (err) 467 goto err_srq; 468 469 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 470 cmpt_base + 471 ((u64) (MLX4_CMPT_TYPE_EQ * 472 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 473 cmpt_entry_sz, 474 dev->caps.num_eqs, dev->caps.num_eqs, 0, 0); 475 if (err) 476 goto err_cq; 477 478 return 0; 479 480err_cq: 481 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 482 483err_srq: 484 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 485 486err_qp: 487 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 488 489err: 490 return err; 491} 492 493static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 494 struct mlx4_init_hca_param *init_hca, u64 icm_size) 495{ 496 struct mlx4_priv *priv = mlx4_priv(dev); 497 u64 aux_pages; 498 int err; 499 500 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 501 if (err) { 502 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); 503 return err; 504 } 505 506 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", 507 (unsigned long long) icm_size >> 10, 508 (unsigned long long) aux_pages << 2); 509 510 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 511 GFP_HIGHUSER | __GFP_NOWARN, 0); 512 if (!priv->fw.aux_icm) { 513 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); 514 return -ENOMEM; 515 } 516 517 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 518 if (err) { 519 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); 520 goto err_free_aux; 521 } 522 523 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 524 if (err) { 525 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); 526 goto err_unmap_aux; 527 } 528 529 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 530 init_hca->eqc_base, dev_cap->eqc_entry_sz, 531 dev->caps.num_eqs, dev->caps.num_eqs, 532 0, 0); 533 if (err) { 534 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); 535 goto err_unmap_cmpt; 536 } 537 538 /* 539 * Reserved MTT entries must be aligned up to a cacheline 540 * boundary, since the FW will write to them, while the driver 541 * writes to all other MTT entries. (The variable 542 * dev->caps.mtt_entry_sz below is really the MTT segment 543 * size, not the raw entry size) 544 */ 545 dev->caps.reserved_mtts = 546 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 547 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 548 549 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 550 init_hca->mtt_base, 551 dev->caps.mtt_entry_sz, 552 dev->caps.num_mtt_segs, 553 dev->caps.reserved_mtts, 1, 0); 554 if (err) { 555 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); 556 goto err_unmap_eq; 557 } 558 559 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 560 init_hca->dmpt_base, 561 dev_cap->dmpt_entry_sz, 562 dev->caps.num_mpts, 563 dev->caps.reserved_mrws, 1, 1); 564 if (err) { 565 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); 566 goto err_unmap_mtt; 567 } 568 569 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 570 init_hca->qpc_base, 571 dev_cap->qpc_entry_sz, 572 dev->caps.num_qps, 573 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 574 0, 0); 575 if (err) { 576 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 577 goto err_unmap_dmpt; 578 } 579 580 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 581 init_hca->auxc_base, 582 dev_cap->aux_entry_sz, 583 dev->caps.num_qps, 584 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 585 0, 0); 586 if (err) { 587 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 588 goto err_unmap_qp; 589 } 590 591 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 592 init_hca->altc_base, 593 dev_cap->altc_entry_sz, 594 dev->caps.num_qps, 595 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 596 0, 0); 597 if (err) { 598 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 599 goto err_unmap_auxc; 600 } 601 602 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 603 init_hca->rdmarc_base, 604 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 605 dev->caps.num_qps, 606 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 607 0, 0); 608 if (err) { 609 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 610 goto err_unmap_altc; 611 } 612 613 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 614 init_hca->cqc_base, 615 dev_cap->cqc_entry_sz, 616 dev->caps.num_cqs, 617 dev->caps.reserved_cqs, 0, 0); 618 if (err) { 619 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); 620 goto err_unmap_rdmarc; 621 } 622 623 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 624 init_hca->srqc_base, 625 dev_cap->srq_entry_sz, 626 dev->caps.num_srqs, 627 dev->caps.reserved_srqs, 0, 0); 628 if (err) { 629 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); 630 goto err_unmap_cq; 631 } 632 633 /* 634 * It's not strictly required, but for simplicity just map the 635 * whole multicast group table now. The table isn't very big 636 * and it's a lot easier than trying to track ref counts. 637 */ 638 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 639 init_hca->mc_base, MLX4_MGM_ENTRY_SIZE, 640 dev->caps.num_mgms + dev->caps.num_amgms, 641 dev->caps.num_mgms + dev->caps.num_amgms, 642 0, 0); 643 if (err) { 644 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); 645 goto err_unmap_srq; 646 } 647 648 return 0; 649 650err_unmap_srq: 651 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 652 653err_unmap_cq: 654 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 655 656err_unmap_rdmarc: 657 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 658 659err_unmap_altc: 660 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 661 662err_unmap_auxc: 663 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 664 665err_unmap_qp: 666 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 667 668err_unmap_dmpt: 669 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 670 671err_unmap_mtt: 672 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 673 674err_unmap_eq: 675 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 676 677err_unmap_cmpt: 678 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 679 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 680 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 681 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 682 683err_unmap_aux: 684 mlx4_UNMAP_ICM_AUX(dev); 685 686err_free_aux: 687 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 688 689 return err; 690} 691 692static void mlx4_free_icms(struct mlx4_dev *dev) 693{ 694 struct mlx4_priv *priv = mlx4_priv(dev); 695 696 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 697 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 698 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 699 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 700 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 701 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 702 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 703 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 704 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 705 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 706 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 707 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 708 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 709 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 710 711 mlx4_UNMAP_ICM_AUX(dev); 712 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 713} 714 715static void mlx4_close_hca(struct mlx4_dev *dev) 716{ 717 mlx4_CLOSE_HCA(dev, 0); 718 mlx4_free_icms(dev); 719 mlx4_UNMAP_FA(dev); 720 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 721} 722 723static int mlx4_init_hca(struct mlx4_dev *dev) 724{ 725 struct mlx4_priv *priv = mlx4_priv(dev); 726 struct mlx4_adapter adapter; 727 struct mlx4_dev_cap dev_cap; 728 struct mlx4_mod_stat_cfg mlx4_cfg; 729 struct mlx4_profile profile; 730 struct mlx4_init_hca_param init_hca; 731 u64 icm_size; 732 int err; 733 734 err = mlx4_QUERY_FW(dev); 735 if (err) { 736 if (err == -EACCES) 737 mlx4_info(dev, "non-primary physical function, skipping.\n"); 738 else 739 mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); 740 return err; 741 } 742 743 err = mlx4_load_fw(dev); 744 if (err) { 745 mlx4_err(dev, "Failed to start FW, aborting.\n"); 746 return err; 747 } 748 749 mlx4_cfg.log_pg_sz_m = 1; 750 mlx4_cfg.log_pg_sz = 0; 751 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 752 if (err) 753 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 754 755 err = mlx4_dev_cap(dev, &dev_cap); 756 if (err) { 757 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 758 goto err_stop_fw; 759 } 760 761 profile = default_profile; 762 763 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca); 764 if ((long long) icm_size < 0) { 765 err = icm_size; 766 goto err_stop_fw; 767 } 768 769 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 770 771 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 772 if (err) 773 goto err_stop_fw; 774 775 err = mlx4_INIT_HCA(dev, &init_hca); 776 if (err) { 777 mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); 778 goto err_free_icm; 779 } 780 781 err = mlx4_QUERY_ADAPTER(dev, &adapter); 782 if (err) { 783 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); 784 goto err_close; 785 } 786 787 priv->eq_table.inta_pin = adapter.inta_pin; 788 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 789 790 return 0; 791 792err_close: 793 mlx4_CLOSE_HCA(dev, 0); 794 795err_free_icm: 796 mlx4_free_icms(dev); 797 798err_stop_fw: 799 mlx4_UNMAP_FA(dev); 800 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 801 802 return err; 803} 804 805static int mlx4_setup_hca(struct mlx4_dev *dev) 806{ 807 struct mlx4_priv *priv = mlx4_priv(dev); 808 int err; 809 int port; 810 __be32 ib_port_default_caps; 811 812 err = mlx4_init_uar_table(dev); 813 if (err) { 814 mlx4_err(dev, "Failed to initialize " 815 "user access region table, aborting.\n"); 816 return err; 817 } 818 819 err = mlx4_uar_alloc(dev, &priv->driver_uar); 820 if (err) { 821 mlx4_err(dev, "Failed to allocate driver access region, " 822 "aborting.\n"); 823 goto err_uar_table_free; 824 } 825 826 priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 827 if (!priv->kar) { 828 mlx4_err(dev, "Couldn't map kernel access region, " 829 "aborting.\n"); 830 err = -ENOMEM; 831 goto err_uar_free; 832 } 833 834 err = mlx4_init_pd_table(dev); 835 if (err) { 836 mlx4_err(dev, "Failed to initialize " 837 "protection domain table, aborting.\n"); 838 goto err_kar_unmap; 839 } 840 841 err = mlx4_init_mr_table(dev); 842 if (err) { 843 mlx4_err(dev, "Failed to initialize " 844 "memory region table, aborting.\n"); 845 goto err_pd_table_free; 846 } 847 848 err = mlx4_init_eq_table(dev); 849 if (err) { 850 mlx4_err(dev, "Failed to initialize " 851 "event queue table, aborting.\n"); 852 goto err_mr_table_free; 853 } 854 855 err = mlx4_cmd_use_events(dev); 856 if (err) { 857 mlx4_err(dev, "Failed to switch to event-driven " 858 "firmware commands, aborting.\n"); 859 goto err_eq_table_free; 860 } 861 862 err = mlx4_NOP(dev); 863 if (err) { 864 if (dev->flags & MLX4_FLAG_MSI_X) { 865 mlx4_warn(dev, "NOP command failed to generate MSI-X " 866 "interrupt IRQ %d).\n", 867 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 868 mlx4_warn(dev, "Trying again without MSI-X.\n"); 869 } else { 870 mlx4_err(dev, "NOP command failed to generate interrupt " 871 "(IRQ %d), aborting.\n", 872 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 873 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 874 } 875 876 goto err_cmd_poll; 877 } 878 879 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 880 881 err = mlx4_init_cq_table(dev); 882 if (err) { 883 mlx4_err(dev, "Failed to initialize " 884 "completion queue table, aborting.\n"); 885 goto err_cmd_poll; 886 } 887 888 err = mlx4_init_srq_table(dev); 889 if (err) { 890 mlx4_err(dev, "Failed to initialize " 891 "shared receive queue table, aborting.\n"); 892 goto err_cq_table_free; 893 } 894 895 err = mlx4_init_qp_table(dev); 896 if (err) { 897 mlx4_err(dev, "Failed to initialize " 898 "queue pair table, aborting.\n"); 899 goto err_srq_table_free; 900 } 901 902 err = mlx4_init_mcg_table(dev); 903 if (err) { 904 mlx4_err(dev, "Failed to initialize " 905 "multicast group table, aborting.\n"); 906 goto err_qp_table_free; 907 } 908 909 for (port = 1; port <= dev->caps.num_ports; port++) { 910 ib_port_default_caps = 0; 911 err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps); 912 if (err) 913 mlx4_warn(dev, "failed to get port %d default " 914 "ib capabilities (%d). Continuing with " 915 "caps = 0\n", port, err); 916 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 917 err = mlx4_SET_PORT(dev, port); 918 if (err) { 919 mlx4_err(dev, "Failed to set port %d, aborting\n", 920 port); 921 goto err_mcg_table_free; 922 } 923 } 924 925 return 0; 926 927err_mcg_table_free: 928 mlx4_cleanup_mcg_table(dev); 929 930err_qp_table_free: 931 mlx4_cleanup_qp_table(dev); 932 933err_srq_table_free: 934 mlx4_cleanup_srq_table(dev); 935 936err_cq_table_free: 937 mlx4_cleanup_cq_table(dev); 938 939err_cmd_poll: 940 mlx4_cmd_use_polling(dev); 941 942err_eq_table_free: 943 mlx4_cleanup_eq_table(dev); 944 945err_mr_table_free: 946 mlx4_cleanup_mr_table(dev); 947 948err_pd_table_free: 949 mlx4_cleanup_pd_table(dev); 950 951err_kar_unmap: 952 iounmap(priv->kar); 953 954err_uar_free: 955 mlx4_uar_free(dev, &priv->driver_uar); 956 957err_uar_table_free: 958 mlx4_cleanup_uar_table(dev); 959 return err; 960} 961 962static void mlx4_enable_msi_x(struct mlx4_dev *dev) 963{ 964 struct mlx4_priv *priv = mlx4_priv(dev); 965 struct msix_entry *entries; 966 int nreq; 967 int err; 968 int i; 969 970 if (msi_x) { 971 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 972 num_possible_cpus() + 1); 973 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 974 if (!entries) 975 goto no_msi; 976 977 for (i = 0; i < nreq; ++i) 978 entries[i].entry = i; 979 980 retry: 981 err = pci_enable_msix(dev->pdev, entries, nreq); 982 if (err) { 983 /* Try again if at least 2 vectors are available */ 984 if (err > 1) { 985 mlx4_info(dev, "Requested %d vectors, " 986 "but only %d MSI-X vectors available, " 987 "trying again\n", nreq, err); 988 nreq = err; 989 goto retry; 990 } 991 kfree(entries); 992 goto no_msi; 993 } 994 995 dev->caps.num_comp_vectors = nreq - 1; 996 for (i = 0; i < nreq; ++i) 997 priv->eq_table.eq[i].irq = entries[i].vector; 998 999 dev->flags |= MLX4_FLAG_MSI_X; 1000 1001 kfree(entries); 1002 return; 1003 } 1004 1005no_msi: 1006 dev->caps.num_comp_vectors = 1; 1007 1008 for (i = 0; i < 2; ++i) 1009 priv->eq_table.eq[i].irq = dev->pdev->irq; 1010} 1011 1012static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 1013{ 1014 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 1015 int err = 0; 1016 1017 info->dev = dev; 1018 info->port = port; 1019 mlx4_init_mac_table(dev, &info->mac_table); 1020 mlx4_init_vlan_table(dev, &info->vlan_table); 1021 1022 sprintf(info->dev_name, "mlx4_port%d", port); 1023 info->port_attr.attr.name = info->dev_name; 1024 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 1025 info->port_attr.show = show_port_type; 1026 info->port_attr.store = set_port_type; 1027 sysfs_attr_init(&info->port_attr.attr); 1028 1029 err = device_create_file(&dev->pdev->dev, &info->port_attr); 1030 if (err) { 1031 mlx4_err(dev, "Failed to create file for port %d\n", port); 1032 info->port = -1; 1033 } 1034 1035 return err; 1036} 1037 1038static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 1039{ 1040 if (info->port < 0) 1041 return; 1042 1043 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 1044} 1045 1046static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 1047{ 1048 struct mlx4_priv *priv; 1049 struct mlx4_dev *dev; 1050 int err; 1051 int port; 1052 1053 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 1054 1055 err = pci_enable_device(pdev); 1056 if (err) { 1057 dev_err(&pdev->dev, "Cannot enable PCI device, " 1058 "aborting.\n"); 1059 return err; 1060 } 1061 1062 /* 1063 * Check for BARs. We expect 0: 1MB 1064 */ 1065 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 1066 pci_resource_len(pdev, 0) != 1 << 20) { 1067 dev_err(&pdev->dev, "Missing DCS, aborting.\n"); 1068 err = -ENODEV; 1069 goto err_disable_pdev; 1070 } 1071 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 1072 dev_err(&pdev->dev, "Missing UAR, aborting.\n"); 1073 err = -ENODEV; 1074 goto err_disable_pdev; 1075 } 1076 1077 err = pci_request_regions(pdev, DRV_NAME); 1078 if (err) { 1079 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 1080 goto err_disable_pdev; 1081 } 1082 1083 pci_set_master(pdev); 1084 1085 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1086 if (err) { 1087 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 1088 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1089 if (err) { 1090 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 1091 goto err_release_regions; 1092 } 1093 } 1094 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1095 if (err) { 1096 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " 1097 "consistent PCI DMA mask.\n"); 1098 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1099 if (err) { 1100 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " 1101 "aborting.\n"); 1102 goto err_release_regions; 1103 } 1104 } 1105 1106 priv = kzalloc(sizeof *priv, GFP_KERNEL); 1107 if (!priv) { 1108 dev_err(&pdev->dev, "Device struct alloc failed, " 1109 "aborting.\n"); 1110 err = -ENOMEM; 1111 goto err_release_regions; 1112 } 1113 1114 dev = &priv->dev; 1115 dev->pdev = pdev; 1116 INIT_LIST_HEAD(&priv->ctx_list); 1117 spin_lock_init(&priv->ctx_lock); 1118 1119 mutex_init(&priv->port_mutex); 1120 1121 INIT_LIST_HEAD(&priv->pgdir_list); 1122 mutex_init(&priv->pgdir_mutex); 1123 1124 /* 1125 * Now reset the HCA before we touch the PCI capabilities or 1126 * attempt a firmware command, since a boot ROM may have left 1127 * the HCA in an undefined state. 1128 */ 1129 err = mlx4_reset(dev); 1130 if (err) { 1131 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 1132 goto err_free_dev; 1133 } 1134 1135 if (mlx4_cmd_init(dev)) { 1136 mlx4_err(dev, "Failed to init command interface, aborting.\n"); 1137 goto err_free_dev; 1138 } 1139 1140 err = mlx4_init_hca(dev); 1141 if (err) 1142 goto err_cmd; 1143 1144 err = mlx4_alloc_eq_table(dev); 1145 if (err) 1146 goto err_close; 1147 1148 mlx4_enable_msi_x(dev); 1149 1150 err = mlx4_setup_hca(dev); 1151 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { 1152 dev->flags &= ~MLX4_FLAG_MSI_X; 1153 pci_disable_msix(pdev); 1154 err = mlx4_setup_hca(dev); 1155 } 1156 1157 if (err) 1158 goto err_free_eq; 1159 1160 for (port = 1; port <= dev->caps.num_ports; port++) { 1161 err = mlx4_init_port_info(dev, port); 1162 if (err) 1163 goto err_port; 1164 } 1165 1166 err = mlx4_register_device(dev); 1167 if (err) 1168 goto err_port; 1169 1170 mlx4_sense_init(dev); 1171 mlx4_start_sense(dev); 1172 1173 pci_set_drvdata(pdev, dev); 1174 1175 return 0; 1176 1177err_port: 1178 for (--port; port >= 1; --port) 1179 mlx4_cleanup_port_info(&priv->port[port]); 1180 1181 mlx4_cleanup_mcg_table(dev); 1182 mlx4_cleanup_qp_table(dev); 1183 mlx4_cleanup_srq_table(dev); 1184 mlx4_cleanup_cq_table(dev); 1185 mlx4_cmd_use_polling(dev); 1186 mlx4_cleanup_eq_table(dev); 1187 mlx4_cleanup_mr_table(dev); 1188 mlx4_cleanup_pd_table(dev); 1189 mlx4_cleanup_uar_table(dev); 1190 1191err_free_eq: 1192 mlx4_free_eq_table(dev); 1193 1194err_close: 1195 if (dev->flags & MLX4_FLAG_MSI_X) 1196 pci_disable_msix(pdev); 1197 1198 mlx4_close_hca(dev); 1199 1200err_cmd: 1201 mlx4_cmd_cleanup(dev); 1202 1203err_free_dev: 1204 kfree(priv); 1205 1206err_release_regions: 1207 pci_release_regions(pdev); 1208 1209err_disable_pdev: 1210 pci_disable_device(pdev); 1211 pci_set_drvdata(pdev, NULL); 1212 return err; 1213} 1214 1215static int __devinit mlx4_init_one(struct pci_dev *pdev, 1216 const struct pci_device_id *id) 1217{ 1218 printk_once(KERN_INFO "%s", mlx4_version); 1219 1220 return __mlx4_init_one(pdev, id); 1221} 1222 1223static void mlx4_remove_one(struct pci_dev *pdev) 1224{ 1225 struct mlx4_dev *dev = pci_get_drvdata(pdev); 1226 struct mlx4_priv *priv = mlx4_priv(dev); 1227 int p; 1228 1229 if (dev) { 1230 mlx4_stop_sense(dev); 1231 mlx4_unregister_device(dev); 1232 1233 for (p = 1; p <= dev->caps.num_ports; p++) { 1234 mlx4_cleanup_port_info(&priv->port[p]); 1235 mlx4_CLOSE_PORT(dev, p); 1236 } 1237 1238 mlx4_cleanup_mcg_table(dev); 1239 mlx4_cleanup_qp_table(dev); 1240 mlx4_cleanup_srq_table(dev); 1241 mlx4_cleanup_cq_table(dev); 1242 mlx4_cmd_use_polling(dev); 1243 mlx4_cleanup_eq_table(dev); 1244 mlx4_cleanup_mr_table(dev); 1245 mlx4_cleanup_pd_table(dev); 1246 1247 iounmap(priv->kar); 1248 mlx4_uar_free(dev, &priv->driver_uar); 1249 mlx4_cleanup_uar_table(dev); 1250 mlx4_free_eq_table(dev); 1251 mlx4_close_hca(dev); 1252 mlx4_cmd_cleanup(dev); 1253 1254 if (dev->flags & MLX4_FLAG_MSI_X) 1255 pci_disable_msix(pdev); 1256 1257 kfree(priv); 1258 pci_release_regions(pdev); 1259 pci_disable_device(pdev); 1260 pci_set_drvdata(pdev, NULL); 1261 } 1262} 1263 1264int mlx4_restart_one(struct pci_dev *pdev) 1265{ 1266 mlx4_remove_one(pdev); 1267 return __mlx4_init_one(pdev, NULL); 1268} 1269 1270static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { 1271 { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */ 1272 { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */ 1273 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ 1274 { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */ 1275 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */ 1276 { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */ 1277 { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 1278 { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 1279 { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 1280 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 1281 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 1282 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */ 1283 { 0, } 1284}; 1285 1286MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 1287 1288static struct pci_driver mlx4_driver = { 1289 .name = DRV_NAME, 1290 .id_table = mlx4_pci_table, 1291 .probe = mlx4_init_one, 1292 .remove = __devexit_p(mlx4_remove_one) 1293}; 1294 1295static int __init mlx4_verify_params(void) 1296{ 1297 if ((log_num_mac < 0) || (log_num_mac > 7)) { 1298 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac); 1299 return -1; 1300 } 1301 1302 if ((log_num_vlan < 0) || (log_num_vlan > 7)) { 1303 pr_warning("mlx4_core: bad num_vlan: %d\n", log_num_vlan); 1304 return -1; 1305 } 1306 1307 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) { 1308 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); 1309 return -1; 1310 } 1311 1312 return 0; 1313} 1314 1315static int __init mlx4_init(void) 1316{ 1317 int ret; 1318 1319 if (mlx4_verify_params()) 1320 return -EINVAL; 1321 1322 mlx4_catas_init(); 1323 1324 mlx4_wq = create_singlethread_workqueue("mlx4"); 1325 if (!mlx4_wq) 1326 return -ENOMEM; 1327 1328 ret = pci_register_driver(&mlx4_driver); 1329 return ret < 0 ? ret : 0; 1330} 1331 1332static void __exit mlx4_cleanup(void) 1333{ 1334 pci_unregister_driver(&mlx4_driver); 1335 destroy_workqueue(mlx4_wq); 1336} 1337 1338module_init(mlx4_init); 1339module_exit(mlx4_cleanup); 1340