1/* 2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/init.h> 35#include <linux/interrupt.h> 36#include <linux/slab.h> 37#include <linux/mm.h> 38#include <linux/dma-mapping.h> 39 40#include <linux/mlx4/cmd.h> 41 42#include "mlx4.h" 43#include "fw.h" 44 45enum { 46 MLX4_IRQNAME_SIZE = 32 47}; 48 49enum { 50 MLX4_NUM_ASYNC_EQE = 0x100, 51 MLX4_NUM_SPARE_EQE = 0x80, 52 MLX4_EQ_ENTRY_SIZE = 0x20 53}; 54 55#define MLX4_EQ_STATUS_OK ( 0 << 28) 56#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) 57#define MLX4_EQ_OWNER_SW ( 0 << 24) 58#define MLX4_EQ_OWNER_HW ( 1 << 24) 59#define MLX4_EQ_FLAG_EC ( 1 << 18) 60#define MLX4_EQ_FLAG_OI ( 1 << 17) 61#define MLX4_EQ_STATE_ARMED ( 9 << 8) 62#define MLX4_EQ_STATE_FIRED (10 << 8) 63#define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8) 64 65#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \ 66 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \ 67 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \ 68 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \ 69 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \ 70 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \ 71 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ 72 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ 73 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ 74 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ 75 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ 76 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ 77 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ 78 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ 79 (1ull << MLX4_EVENT_TYPE_CMD) | \ 80 (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \ 81 (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ 82 (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ 83 (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) 84 85static u64 get_async_ev_mask(struct mlx4_dev *dev) 86{ 87 u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; 88 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) 89 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); 90 91 return async_ev_mask; 92} 93 94static void eq_set_ci(struct mlx4_eq *eq, int req_not) 95{ 96 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | 97 req_not << 31), 98 eq->doorbell); 99 /* We still want ordering, just not swabbing, so add a barrier */ 100 mb(); 101} 102 103static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor) 104{ 105 /* (entry & (eq->nent - 1)) gives us a cyclic array */ 106 unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor); 107 /* CX3 is capable of extending the EQE from 32 to 64 bytes. 108 * When this feature is enabled, the first (in the lower addresses) 109 * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes 110 * contain the legacy EQE information. 111 */ 112 return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; 113} 114 115static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor) 116{ 117 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor); 118 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; 119} 120 121static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq) 122{ 123 struct mlx4_eqe *eqe = 124 &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)]; 125 return (!!(eqe->owner & 0x80) ^ 126 !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ? 127 eqe : NULL; 128} 129 130void mlx4_gen_slave_eqe(struct work_struct *work) 131{ 132 struct mlx4_mfunc_master_ctx *master = 133 container_of(work, struct mlx4_mfunc_master_ctx, 134 slave_event_work); 135 struct mlx4_mfunc *mfunc = 136 container_of(master, struct mlx4_mfunc, master); 137 struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc); 138 struct mlx4_dev *dev = &priv->dev; 139 struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq; 140 struct mlx4_eqe *eqe; 141 u8 slave; 142 int i; 143 144 for (eqe = next_slave_event_eqe(slave_eq); eqe; 145 eqe = next_slave_event_eqe(slave_eq)) { 146 slave = eqe->slave_id; 147 148 /* All active slaves need to receive the event */ 149 if (slave == ALL_SLAVES) { 150 for (i = 0; i < dev->num_slaves; i++) { 151 if (i != dev->caps.function && 152 master->slave_state[i].active) 153 if (mlx4_GEN_EQE(dev, i, eqe)) 154 mlx4_warn(dev, "Failed to " 155 " generate event " 156 "for slave %d\n", i); 157 } 158 } else { 159 if (mlx4_GEN_EQE(dev, slave, eqe)) 160 mlx4_warn(dev, "Failed to generate event " 161 "for slave %d\n", slave); 162 } 163 ++slave_eq->cons; 164 } 165} 166 167 168static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) 169{ 170 struct mlx4_priv *priv = mlx4_priv(dev); 171 struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq; 172 struct mlx4_eqe *s_eqe; 173 unsigned long flags; 174 175 spin_lock_irqsave(&slave_eq->event_lock, flags); 176 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; 177 if ((!!(s_eqe->owner & 0x80)) ^ 178 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { 179 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. " 180 "No free EQE on slave events queue\n", slave); 181 spin_unlock_irqrestore(&slave_eq->event_lock, flags); 182 return; 183 } 184 185 memcpy(s_eqe, eqe, dev->caps.eqe_size - 1); 186 s_eqe->slave_id = slave; 187 /* ensure all information is written before setting the ownersip bit */ 188 wmb(); 189 s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80; 190 ++slave_eq->prod; 191 192 queue_work(priv->mfunc.master.comm_wq, 193 &priv->mfunc.master.slave_event_work); 194 spin_unlock_irqrestore(&slave_eq->event_lock, flags); 195} 196 197static void mlx4_slave_event(struct mlx4_dev *dev, int slave, 198 struct mlx4_eqe *eqe) 199{ 200 struct mlx4_priv *priv = mlx4_priv(dev); 201 struct mlx4_slave_state *s_slave = 202 &priv->mfunc.master.slave_state[slave]; 203 204 if (!s_slave->active) { 205 /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/ 206 return; 207 } 208 209 slave_event(dev, slave, eqe); 210} 211 212int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) 213{ 214 struct mlx4_eqe eqe; 215 216 struct mlx4_priv *priv = mlx4_priv(dev); 217 struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave]; 218 219 if (!s_slave->active) 220 return 0; 221 222 memset(&eqe, 0, sizeof eqe); 223 224 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; 225 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE; 226 eqe.event.port_mgmt_change.port = port; 227 228 return mlx4_GEN_EQE(dev, slave, &eqe); 229} 230EXPORT_SYMBOL(mlx4_gen_pkey_eqe); 231 232int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port) 233{ 234 struct mlx4_eqe eqe; 235 236 /*don't send if we don't have the that slave */ 237 if (dev->num_vfs < slave) 238 return 0; 239 memset(&eqe, 0, sizeof eqe); 240 241 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; 242 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO; 243 eqe.event.port_mgmt_change.port = port; 244 245 return mlx4_GEN_EQE(dev, slave, &eqe); 246} 247EXPORT_SYMBOL(mlx4_gen_guid_change_eqe); 248 249int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, 250 u8 port_subtype_change) 251{ 252 struct mlx4_eqe eqe; 253 254 /*don't send if we don't have the that slave */ 255 if (dev->num_vfs < slave) 256 return 0; 257 memset(&eqe, 0, sizeof eqe); 258 259 eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE; 260 eqe.subtype = port_subtype_change; 261 eqe.event.port_change.port = cpu_to_be32(port << 28); 262 263 mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__, 264 port_subtype_change, slave, port); 265 return mlx4_GEN_EQE(dev, slave, &eqe); 266} 267EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe); 268 269enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port) 270{ 271 struct mlx4_priv *priv = mlx4_priv(dev); 272 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; 273 if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS) { 274 pr_err("%s: Error: asking for slave:%d, port:%d\n", 275 __func__, slave, port); 276 return SLAVE_PORT_DOWN; 277 } 278 return s_state[slave].port_state[port]; 279} 280EXPORT_SYMBOL(mlx4_get_slave_port_state); 281 282static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, 283 enum slave_port_state state) 284{ 285 struct mlx4_priv *priv = mlx4_priv(dev); 286 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; 287 288 if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) { 289 pr_err("%s: Error: asking for slave:%d, port:%d\n", 290 __func__, slave, port); 291 return -1; 292 } 293 s_state[slave].port_state[port] = state; 294 295 return 0; 296} 297 298static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event) 299{ 300 int i; 301 enum slave_port_gen_event gen_event; 302 303 for (i = 0; i < dev->num_slaves; i++) 304 set_and_calc_slave_port_state(dev, i, port, event, &gen_event); 305} 306/************************************************************************** 307 The function get as input the new event to that port, 308 and according to the prev state change the slave's port state. 309 The events are: 310 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, 311 MLX4_PORT_STATE_DEV_EVENT_PORT_UP 312 MLX4_PORT_STATE_IB_EVENT_GID_VALID 313 MLX4_PORT_STATE_IB_EVENT_GID_INVALID 314***************************************************************************/ 315int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, 316 u8 port, int event, 317 enum slave_port_gen_event *gen_event) 318{ 319 struct mlx4_priv *priv = mlx4_priv(dev); 320 struct mlx4_slave_state *ctx = NULL; 321 unsigned long flags; 322 int ret = -1; 323 enum slave_port_state cur_state = 324 mlx4_get_slave_port_state(dev, slave, port); 325 326 *gen_event = SLAVE_PORT_GEN_EVENT_NONE; 327 328 if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) { 329 pr_err("%s: Error: asking for slave:%d, port:%d\n", 330 __func__, slave, port); 331 return ret; 332 } 333 334 ctx = &priv->mfunc.master.slave_state[slave]; 335 spin_lock_irqsave(&ctx->lock, flags); 336 337 switch (cur_state) { 338 case SLAVE_PORT_DOWN: 339 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event) 340 mlx4_set_slave_port_state(dev, slave, port, 341 SLAVE_PENDING_UP); 342 break; 343 case SLAVE_PENDING_UP: 344 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) 345 mlx4_set_slave_port_state(dev, slave, port, 346 SLAVE_PORT_DOWN); 347 else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) { 348 mlx4_set_slave_port_state(dev, slave, port, 349 SLAVE_PORT_UP); 350 *gen_event = SLAVE_PORT_GEN_EVENT_UP; 351 } 352 break; 353 case SLAVE_PORT_UP: 354 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) { 355 mlx4_set_slave_port_state(dev, slave, port, 356 SLAVE_PORT_DOWN); 357 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; 358 } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID == 359 event) { 360 mlx4_set_slave_port_state(dev, slave, port, 361 SLAVE_PENDING_UP); 362 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; 363 } 364 break; 365 default: 366 pr_err("%s: BUG!!! UNKNOWN state: " 367 "slave:%d, port:%d\n", __func__, slave, port); 368 goto out; 369 } 370 ret = mlx4_get_slave_port_state(dev, slave, port); 371 372out: 373 spin_unlock_irqrestore(&ctx->lock, flags); 374 return ret; 375} 376 377EXPORT_SYMBOL(set_and_calc_slave_port_state); 378 379int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr) 380{ 381 struct mlx4_eqe eqe; 382 383 memset(&eqe, 0, sizeof eqe); 384 385 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; 386 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO; 387 eqe.event.port_mgmt_change.port = port; 388 eqe.event.port_mgmt_change.params.port_info.changed_attr = 389 cpu_to_be32((u32) attr); 390 391 slave_event(dev, ALL_SLAVES, &eqe); 392 return 0; 393} 394EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev); 395 396void mlx4_master_handle_slave_flr(struct work_struct *work) 397{ 398 struct mlx4_mfunc_master_ctx *master = 399 container_of(work, struct mlx4_mfunc_master_ctx, 400 slave_flr_event_work); 401 struct mlx4_mfunc *mfunc = 402 container_of(master, struct mlx4_mfunc, master); 403 struct mlx4_priv *priv = 404 container_of(mfunc, struct mlx4_priv, mfunc); 405 struct mlx4_dev *dev = &priv->dev; 406 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 407 int i; 408 int err; 409 unsigned long flags; 410 411 mlx4_dbg(dev, "mlx4_handle_slave_flr\n"); 412 413 for (i = 0 ; i < dev->num_slaves; i++) { 414 415 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { 416 mlx4_dbg(dev, "mlx4_handle_slave_flr: " 417 "clean slave: %d\n", i); 418 419 mlx4_delete_all_resources_for_slave(dev, i); 420 /*return the slave to running mode*/ 421 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); 422 slave_state[i].last_cmd = MLX4_COMM_CMD_RESET; 423 slave_state[i].is_slave_going_down = 0; 424 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); 425 /*notify the FW:*/ 426 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE, 427 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 428 if (err) 429 mlx4_warn(dev, "Failed to notify FW on " 430 "FLR done (slave:%d)\n", i); 431 } 432 } 433} 434 435static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) 436{ 437 struct mlx4_priv *priv = mlx4_priv(dev); 438 struct mlx4_eqe *eqe; 439 int cqn; 440 int eqes_found = 0; 441 int set_ci = 0; 442 int port; 443 int slave = 0; 444 int ret; 445 u32 flr_slave; 446 u8 update_slave_state; 447 int i; 448 enum slave_port_gen_event gen_event; 449 unsigned long flags; 450 451 while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) { 452 /* 453 * Make sure we read EQ entry contents after we've 454 * checked the ownership bit. 455 */ 456 rmb(); 457 458 switch (eqe->type) { 459 case MLX4_EVENT_TYPE_COMP: 460 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; 461 mlx4_cq_completion(dev, cqn); 462 break; 463 464 case MLX4_EVENT_TYPE_PATH_MIG: 465 case MLX4_EVENT_TYPE_COMM_EST: 466 case MLX4_EVENT_TYPE_SQ_DRAINED: 467 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: 468 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: 469 case MLX4_EVENT_TYPE_PATH_MIG_FAILED: 470 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 471 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: 472 mlx4_dbg(dev, "event %d arrived\n", eqe->type); 473 if (mlx4_is_master(dev)) { 474 /* forward only to slave owning the QP */ 475 ret = mlx4_get_slave_from_resource_id(dev, 476 RES_QP, 477 be32_to_cpu(eqe->event.qp.qpn) 478 & 0xffffff, &slave); 479 if (ret && ret != -ENOENT) { 480 mlx4_dbg(dev, "QP event %02x(%02x) on " 481 "EQ %d at index %u: could " 482 "not get slave id (%d)\n", 483 eqe->type, eqe->subtype, 484 eq->eqn, eq->cons_index, ret); 485 break; 486 } 487 488 if (!ret && slave != dev->caps.function) { 489 mlx4_slave_event(dev, slave, eqe); 490 break; 491 } 492 493 } 494 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 495 0xffffff, eqe->type); 496 break; 497 498 case MLX4_EVENT_TYPE_SRQ_LIMIT: 499 mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", 500 __func__); 501 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: 502 if (mlx4_is_master(dev)) { 503 /* forward only to slave owning the SRQ */ 504 ret = mlx4_get_slave_from_resource_id(dev, 505 RES_SRQ, 506 be32_to_cpu(eqe->event.srq.srqn) 507 & 0xffffff, 508 &slave); 509 if (ret && ret != -ENOENT) { 510 mlx4_warn(dev, "SRQ event %02x(%02x) " 511 "on EQ %d at index %u: could" 512 " not get slave id (%d)\n", 513 eqe->type, eqe->subtype, 514 eq->eqn, eq->cons_index, ret); 515 break; 516 } 517 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x," 518 " event: %02x(%02x)\n", __func__, 519 slave, 520 be32_to_cpu(eqe->event.srq.srqn), 521 eqe->type, eqe->subtype); 522 523 if (!ret && slave != dev->caps.function) { 524 mlx4_warn(dev, "%s: sending event " 525 "%02x(%02x) to slave:%d\n", 526 __func__, eqe->type, 527 eqe->subtype, slave); 528 mlx4_slave_event(dev, slave, eqe); 529 break; 530 } 531 } 532 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 533 0xffffff, eqe->type); 534 break; 535 536 case MLX4_EVENT_TYPE_CMD: 537 mlx4_cmd_event(dev, 538 be16_to_cpu(eqe->event.cmd.token), 539 eqe->event.cmd.status, 540 be64_to_cpu(eqe->event.cmd.out_param)); 541 break; 542 543 case MLX4_EVENT_TYPE_PORT_CHANGE: 544 port = be32_to_cpu(eqe->event.port_change.port) >> 28; 545 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { 546 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, 547 port); 548 mlx4_priv(dev)->sense.do_sense_port[port] = 1; 549 if (!mlx4_is_master(dev)) 550 break; 551 for (i = 0; i < dev->num_slaves; i++) { 552 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { 553 if (i == mlx4_master_func_num(dev)) 554 continue; 555 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN" 556 " to slave: %d, port:%d\n", 557 __func__, i, port); 558 mlx4_slave_event(dev, i, eqe); 559 } else { /* IB port */ 560 set_and_calc_slave_port_state(dev, i, port, 561 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, 562 &gen_event); 563 /*we can be in pending state, then do not send port_down event*/ 564 if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) { 565 if (i == mlx4_master_func_num(dev)) 566 continue; 567 mlx4_slave_event(dev, i, eqe); 568 } 569 } 570 } 571 } else { 572 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port); 573 574 mlx4_priv(dev)->sense.do_sense_port[port] = 0; 575 576 if (!mlx4_is_master(dev)) 577 break; 578 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 579 for (i = 0; i < dev->num_slaves; i++) { 580 if (i == mlx4_master_func_num(dev)) 581 continue; 582 mlx4_slave_event(dev, i, eqe); 583 } 584 else /* IB port */ 585 /* port-up event will be sent to a slave when the 586 * slave's alias-guid is set. This is done in alias_GUID.c 587 */ 588 set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP); 589 } 590 break; 591 592 case MLX4_EVENT_TYPE_CQ_ERROR: 593 mlx4_warn(dev, "CQ %s on CQN %06x\n", 594 eqe->event.cq_err.syndrome == 1 ? 595 "overrun" : "access violation", 596 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); 597 if (mlx4_is_master(dev)) { 598 ret = mlx4_get_slave_from_resource_id(dev, 599 RES_CQ, 600 be32_to_cpu(eqe->event.cq_err.cqn) 601 & 0xffffff, &slave); 602 if (ret && ret != -ENOENT) { 603 mlx4_dbg(dev, "CQ event %02x(%02x) on " 604 "EQ %d at index %u: could " 605 "not get slave id (%d)\n", 606 eqe->type, eqe->subtype, 607 eq->eqn, eq->cons_index, ret); 608 break; 609 } 610 611 if (!ret && slave != dev->caps.function) { 612 mlx4_slave_event(dev, slave, eqe); 613 break; 614 } 615 } 616 mlx4_cq_event(dev, 617 be32_to_cpu(eqe->event.cq_err.cqn) 618 & 0xffffff, 619 eqe->type); 620 break; 621 622 case MLX4_EVENT_TYPE_EQ_OVERFLOW: 623 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); 624 break; 625 626 case MLX4_EVENT_TYPE_OP_REQUIRED: 627 atomic_inc(&priv->opreq_count); 628 /* FW commands can't be executed from interrupt context 629 working in deferred task */ 630 queue_work(mlx4_wq, &priv->opreq_task); 631 break; 632 633 case MLX4_EVENT_TYPE_COMM_CHANNEL: 634 if (!mlx4_is_master(dev)) { 635 mlx4_warn(dev, "Received comm channel event " 636 "for non master device\n"); 637 break; 638 } 639 memcpy(&priv->mfunc.master.comm_arm_bit_vector, 640 eqe->event.comm_channel_arm.bit_vec, 641 sizeof eqe->event.comm_channel_arm.bit_vec); 642 queue_work(priv->mfunc.master.comm_wq, 643 &priv->mfunc.master.comm_work); 644 break; 645 646 case MLX4_EVENT_TYPE_FLR_EVENT: 647 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); 648 if (!mlx4_is_master(dev)) { 649 mlx4_warn(dev, "Non-master function received" 650 "FLR event\n"); 651 break; 652 } 653 654 mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); 655 656 if (flr_slave >= dev->num_slaves) { 657 mlx4_warn(dev, 658 "Got FLR for unknown function: %d\n", 659 flr_slave); 660 update_slave_state = 0; 661 } else 662 update_slave_state = 1; 663 664 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); 665 if (update_slave_state) { 666 priv->mfunc.master.slave_state[flr_slave].active = false; 667 priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR; 668 priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1; 669 } 670 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); 671 queue_work(priv->mfunc.master.comm_wq, 672 &priv->mfunc.master.slave_flr_event_work); 673 break; 674 675 case MLX4_EVENT_TYPE_FATAL_WARNING: 676 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { 677 if (mlx4_is_master(dev)) 678 for (i = 0; i < dev->num_slaves; i++) { 679 mlx4_dbg(dev, "%s: Sending " 680 "MLX4_FATAL_WARNING_SUBTYPE_WARMING" 681 " to slave: %d\n", __func__, i); 682 if (i == dev->caps.function) 683 continue; 684 mlx4_slave_event(dev, i, eqe); 685 } 686 mlx4_err(dev, "Temperature Threshold was reached! " 687 "Threshold: %d celsius degrees; " 688 "Current Temperature: %d\n", 689 be16_to_cpu(eqe->event.warming.warning_threshold), 690 be16_to_cpu(eqe->event.warming.current_temperature)); 691 } else 692 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), " 693 "subtype %02x on EQ %d at index %u. owner=%x, " 694 "nent=0x%x, slave=%x, ownership=%s\n", 695 eqe->type, eqe->subtype, eq->eqn, 696 eq->cons_index, eqe->owner, eq->nent, 697 eqe->slave_id, 698 !!(eqe->owner & 0x80) ^ 699 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); 700 701 break; 702 703 case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT: 704 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, 705 (unsigned long) eqe); 706 break; 707 708 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: 709 case MLX4_EVENT_TYPE_ECC_DETECT: 710 default: 711 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at " 712 "index %u. owner=%x, nent=0x%x, slave=%x, " 713 "ownership=%s\n", 714 eqe->type, eqe->subtype, eq->eqn, 715 eq->cons_index, eqe->owner, eq->nent, 716 eqe->slave_id, 717 !!(eqe->owner & 0x80) ^ 718 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); 719 break; 720 }; 721 722 ++eq->cons_index; 723 eqes_found = 1; 724 ++set_ci; 725 726 /* 727 * The HCA will think the queue has overflowed if we 728 * don't tell it we've been processing events. We 729 * create our EQs with MLX4_NUM_SPARE_EQE extra 730 * entries, so we must update our consumer index at 731 * least that often. 732 */ 733 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { 734 eq_set_ci(eq, 0); 735 set_ci = 0; 736 } 737 } 738 739 eq_set_ci(eq, 1); 740 741 return eqes_found; 742} 743 744static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) 745{ 746 struct mlx4_dev *dev = dev_ptr; 747 struct mlx4_priv *priv = mlx4_priv(dev); 748 int work = 0; 749 int i; 750 751 752 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); 753 754 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 755 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); 756 757 return IRQ_RETVAL(work); 758} 759 760static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) 761{ 762 struct mlx4_eq *eq = eq_ptr; 763 struct mlx4_dev *dev = eq->dev; 764 765 mlx4_eq_int(dev, eq); 766 767 /* MSI-X vectors always belong to us */ 768 return IRQ_HANDLED; 769} 770 771int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, 772 struct mlx4_vhcr *vhcr, 773 struct mlx4_cmd_mailbox *inbox, 774 struct mlx4_cmd_mailbox *outbox, 775 struct mlx4_cmd_info *cmd) 776{ 777 struct mlx4_priv *priv = mlx4_priv(dev); 778 struct mlx4_slave_event_eq_info *event_eq = 779 priv->mfunc.master.slave_state[slave].event_eq; 780 u32 in_modifier = vhcr->in_modifier; 781 u32 eqn = in_modifier & 0x1FF; 782 u64 in_param = vhcr->in_param; 783 int err = 0; 784 int i; 785 786 if (slave == dev->caps.function) 787 err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn, 788 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, 789 MLX4_CMD_NATIVE); 790 if (!err) 791 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) 792 if (in_param & (1LL << i)) 793 event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn; 794 795 return err; 796} 797 798static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, 799 int eq_num) 800{ 801 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, 802 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, 803 MLX4_CMD_WRAPPED); 804} 805 806static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 807 int eq_num) 808{ 809 return mlx4_cmd(dev, mailbox->dma, eq_num, 0, 810 MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A, 811 MLX4_CMD_WRAPPED); 812} 813 814static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 815 int eq_num) 816{ 817 return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 818 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A, 819 MLX4_CMD_WRAPPED); 820} 821 822static int mlx4_num_eq_uar(struct mlx4_dev *dev) 823{ 824 /* 825 * Each UAR holds 4 EQ doorbells. To figure out how many UARs 826 * we need to map, take the difference of highest index and 827 * the lowest index we'll use and add 1. 828 */ 829 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs + 830 dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1; 831} 832 833static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) 834{ 835 struct mlx4_priv *priv = mlx4_priv(dev); 836 int index; 837 838 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4; 839 840 if (!priv->eq_table.uar_map[index]) { 841 priv->eq_table.uar_map[index] = 842 ioremap(pci_resource_start(dev->pdev, 2) + 843 ((eq->eqn / 4) << PAGE_SHIFT), 844 PAGE_SIZE); 845 if (!priv->eq_table.uar_map[index]) { 846 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", 847 eq->eqn); 848 return NULL; 849 } 850 } 851 852 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); 853} 854 855static void mlx4_unmap_uar(struct mlx4_dev *dev) 856{ 857 struct mlx4_priv *priv = mlx4_priv(dev); 858 int i; 859 860 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 861 if (priv->eq_table.uar_map[i]) { 862 iounmap(priv->eq_table.uar_map[i]); 863 priv->eq_table.uar_map[i] = NULL; 864 } 865} 866 867static int mlx4_create_eq(struct mlx4_dev *dev, int nent, 868 u8 intr, struct mlx4_eq *eq) 869{ 870 struct mlx4_priv *priv = mlx4_priv(dev); 871 struct mlx4_cmd_mailbox *mailbox; 872 struct mlx4_eq_context *eq_context; 873 int npages; 874 u64 *dma_list = NULL; 875 dma_addr_t t; 876 u64 mtt_addr; 877 int err = -ENOMEM; 878 int i; 879 880 eq->dev = dev; 881 eq->nent = roundup_pow_of_two(max(nent, 2)); 882 /* CX3 is capable of extending the CQE\EQE from 32 to 64 bytes */ 883 npages = PAGE_ALIGN(eq->nent * (MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor)) / PAGE_SIZE; 884 885 eq->page_list = kmalloc(npages * sizeof *eq->page_list, 886 GFP_KERNEL); 887 if (!eq->page_list) 888 goto err_out; 889 890 for (i = 0; i < npages; ++i) 891 eq->page_list[i].buf = NULL; 892 893 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); 894 if (!dma_list) 895 goto err_out_free; 896 897 mailbox = mlx4_alloc_cmd_mailbox(dev); 898 if (IS_ERR(mailbox)) 899 goto err_out_free; 900 eq_context = mailbox->buf; 901 902 for (i = 0; i < npages; ++i) { 903 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, 904 PAGE_SIZE, &t, GFP_KERNEL); 905 if (!eq->page_list[i].buf) 906 goto err_out_free_pages; 907 908 dma_list[i] = t; 909 eq->page_list[i].map = t; 910 911 memset(eq->page_list[i].buf, 0, PAGE_SIZE); 912 } 913 914 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); 915 if (eq->eqn == -1) 916 goto err_out_free_pages; 917 918 eq->doorbell = mlx4_get_eq_uar(dev, eq); 919 if (!eq->doorbell) { 920 err = -ENOMEM; 921 goto err_out_free_eq; 922 } 923 924 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); 925 if (err) 926 goto err_out_free_eq; 927 928 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); 929 if (err) 930 goto err_out_free_mtt; 931 932 memset(eq_context, 0, sizeof *eq_context); 933 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | 934 MLX4_EQ_STATE_ARMED); 935 eq_context->log_eq_size = ilog2(eq->nent); 936 eq_context->intr = intr; 937 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; 938 939 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); 940 eq_context->mtt_base_addr_h = mtt_addr >> 32; 941 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); 942 943 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn); 944 if (err) { 945 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err); 946 goto err_out_free_mtt; 947 } 948 949 kfree(dma_list); 950 mlx4_free_cmd_mailbox(dev, mailbox); 951 952 eq->cons_index = 0; 953 954 return err; 955 956err_out_free_mtt: 957 mlx4_mtt_cleanup(dev, &eq->mtt); 958 959err_out_free_eq: 960 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); 961 962err_out_free_pages: 963 for (i = 0; i < npages; ++i) 964 if (eq->page_list[i].buf) 965 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 966 eq->page_list[i].buf, 967 eq->page_list[i].map); 968 969 mlx4_free_cmd_mailbox(dev, mailbox); 970 971err_out_free: 972 kfree(eq->page_list); 973 kfree(dma_list); 974 975err_out: 976 return err; 977} 978 979static void mlx4_free_eq(struct mlx4_dev *dev, 980 struct mlx4_eq *eq) 981{ 982 struct mlx4_priv *priv = mlx4_priv(dev); 983 struct mlx4_cmd_mailbox *mailbox; 984 int err; 985 int i; 986 /* CX3 is capable of extending the CQE\EQE from 32 to 64 bytes */ 987 int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor) * eq->nent) / PAGE_SIZE; 988 989 mailbox = mlx4_alloc_cmd_mailbox(dev); 990 if (IS_ERR(mailbox)) 991 return; 992 993 err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn); 994 if (err) 995 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); 996 997 if (0) { 998 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); 999 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) { 1000 if (i % 4 == 0) 1001 pr_cont("[%02x] ", i * 4); 1002 pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4)); 1003 if ((i + 1) % 4 == 0) 1004 pr_cont("\n"); 1005 } 1006 } 1007 1008 mlx4_mtt_cleanup(dev, &eq->mtt); 1009 for (i = 0; i < npages; ++i) 1010 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 1011 eq->page_list[i].buf, 1012 eq->page_list[i].map); 1013 1014 kfree(eq->page_list); 1015 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); 1016 mlx4_free_cmd_mailbox(dev, mailbox); 1017} 1018 1019static void mlx4_free_irqs(struct mlx4_dev *dev) 1020{ 1021 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; 1022 struct mlx4_priv *priv = mlx4_priv(dev); 1023 int i, vec; 1024 1025 if (eq_table->have_irq) 1026 free_irq(dev->pdev->irq, dev); 1027 1028 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 1029 if (eq_table->eq[i].have_irq) { 1030 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 1031 eq_table->eq[i].have_irq = 0; 1032 } 1033 1034 for (i = 0; i < dev->caps.comp_pool; i++) { 1035 /* 1036 * Freeing the assigned irq's 1037 * all bits should be 0, but we need to validate 1038 */ 1039 if (priv->msix_ctl.pool_bm & 1ULL << i) { 1040 /* NO need protecting*/ 1041 vec = dev->caps.num_comp_vectors + 1 + i; 1042 free_irq(priv->eq_table.eq[vec].irq, 1043 &priv->eq_table.eq[vec]); 1044 } 1045 } 1046 1047 1048 kfree(eq_table->irq_names); 1049} 1050 1051static int mlx4_map_clr_int(struct mlx4_dev *dev) 1052{ 1053 struct mlx4_priv *priv = mlx4_priv(dev); 1054 1055 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + 1056 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); 1057 if (!priv->clr_base) { 1058 mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n"); 1059 return -ENOMEM; 1060 } 1061 1062 return 0; 1063} 1064 1065static void mlx4_unmap_clr_int(struct mlx4_dev *dev) 1066{ 1067 struct mlx4_priv *priv = mlx4_priv(dev); 1068 1069 iounmap(priv->clr_base); 1070} 1071 1072int mlx4_alloc_eq_table(struct mlx4_dev *dev) 1073{ 1074 struct mlx4_priv *priv = mlx4_priv(dev); 1075 1076 priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, 1077 sizeof *priv->eq_table.eq, GFP_KERNEL); 1078 if (!priv->eq_table.eq) 1079 return -ENOMEM; 1080 1081 return 0; 1082} 1083 1084void mlx4_free_eq_table(struct mlx4_dev *dev) 1085{ 1086 kfree(mlx4_priv(dev)->eq_table.eq); 1087} 1088 1089int mlx4_init_eq_table(struct mlx4_dev *dev) 1090{ 1091 struct mlx4_priv *priv = mlx4_priv(dev); 1092 int err; 1093 int i; 1094 1095 priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev), 1096 sizeof *priv->eq_table.uar_map, 1097 GFP_KERNEL); 1098 if (!priv->eq_table.uar_map) { 1099 err = -ENOMEM; 1100 goto err_out_free; 1101 } 1102 1103 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, 1104 dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); 1105 if (err) 1106 goto err_out_free; 1107 1108 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 1109 priv->eq_table.uar_map[i] = NULL; 1110 1111 if (!mlx4_is_slave(dev)) { 1112 err = mlx4_map_clr_int(dev); 1113 if (err) 1114 goto err_out_bitmap; 1115 1116 priv->eq_table.clr_mask = 1117 swab32(1 << (priv->eq_table.inta_pin & 31)); 1118 priv->eq_table.clr_int = priv->clr_base + 1119 (priv->eq_table.inta_pin < 32 ? 4 : 0); 1120 } 1121 1122 priv->eq_table.irq_names = 1123 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + 1124 dev->caps.comp_pool), 1125 GFP_KERNEL); 1126 if (!priv->eq_table.irq_names) { 1127 err = -ENOMEM; 1128 goto err_out_clr_int; 1129 } 1130 1131 for (i = 0; i < dev->caps.num_comp_vectors; ++i) { 1132 err = mlx4_create_eq(dev, dev->caps.num_cqs - 1133 dev->caps.reserved_cqs + 1134 MLX4_NUM_SPARE_EQE, 1135 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, 1136 &priv->eq_table.eq[i]); 1137 if (err) { 1138 --i; 1139 goto err_out_unmap; 1140 } 1141 } 1142 1143 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, 1144 (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0, 1145 &priv->eq_table.eq[dev->caps.num_comp_vectors]); 1146 if (err) 1147 goto err_out_comp; 1148 1149 /*if additional completion vectors poolsize is 0 this loop will not run*/ 1150 for (i = dev->caps.num_comp_vectors + 1; 1151 i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) { 1152 1153 err = mlx4_create_eq(dev, dev->caps.num_cqs - 1154 dev->caps.reserved_cqs + 1155 MLX4_NUM_SPARE_EQE, 1156 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, 1157 &priv->eq_table.eq[i]); 1158 if (err) { 1159 --i; 1160 goto err_out_unmap; 1161 } 1162 } 1163 1164 1165 if (dev->flags & MLX4_FLAG_MSI_X) { 1166 const char *eq_name; 1167 1168 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { 1169 if (i < dev->caps.num_comp_vectors) { 1170 snprintf(priv->eq_table.irq_names + 1171 i * MLX4_IRQNAME_SIZE, 1172 MLX4_IRQNAME_SIZE, 1173 "mlx4-comp-%d@pci:%s", i, 1174 pci_name(dev->pdev)); 1175 } else { 1176 snprintf(priv->eq_table.irq_names + 1177 i * MLX4_IRQNAME_SIZE, 1178 MLX4_IRQNAME_SIZE, 1179 "mlx4-async@pci:%s", 1180 pci_name(dev->pdev)); 1181 } 1182 1183 eq_name = priv->eq_table.irq_names + 1184 i * MLX4_IRQNAME_SIZE; 1185 err = request_irq(priv->eq_table.eq[i].irq, 1186 mlx4_msi_x_interrupt, 0, eq_name, 1187 priv->eq_table.eq + i); 1188 if (err) 1189 goto err_out_async; 1190 1191 priv->eq_table.eq[i].have_irq = 1; 1192 } 1193 } else { 1194 snprintf(priv->eq_table.irq_names, 1195 MLX4_IRQNAME_SIZE, 1196 DRV_NAME "@pci:%s", 1197 pci_name(dev->pdev)); 1198 err = request_irq(dev->pdev->irq, mlx4_interrupt, 1199 IRQF_SHARED, priv->eq_table.irq_names, dev); 1200 if (err) 1201 goto err_out_async; 1202 1203 priv->eq_table.have_irq = 1; 1204 } 1205 1206 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, 1207 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); 1208 if (err) 1209 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", 1210 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err); 1211 1212 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 1213 eq_set_ci(&priv->eq_table.eq[i], 1); 1214 1215 return 0; 1216 1217err_out_async: 1218 mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]); 1219 1220err_out_comp: 1221 i = dev->caps.num_comp_vectors - 1; 1222 1223err_out_unmap: 1224 while (i >= 0) { 1225 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 1226 --i; 1227 } 1228 mlx4_free_irqs(dev); 1229 1230err_out_clr_int: 1231 if (!mlx4_is_slave(dev)) 1232 mlx4_unmap_clr_int(dev); 1233 1234err_out_bitmap: 1235 mlx4_unmap_uar(dev); 1236 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 1237 1238err_out_free: 1239 kfree(priv->eq_table.uar_map); 1240 1241 return err; 1242} 1243 1244void mlx4_cleanup_eq_table(struct mlx4_dev *dev) 1245{ 1246 struct mlx4_priv *priv = mlx4_priv(dev); 1247 int i; 1248 1249 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1, 1250 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); 1251 1252 mlx4_free_irqs(dev); 1253 1254 for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) 1255 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 1256 1257 if (!mlx4_is_slave(dev)) 1258 mlx4_unmap_clr_int(dev); 1259 1260 mlx4_unmap_uar(dev); 1261 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 1262 1263 kfree(priv->eq_table.uar_map); 1264} 1265 1266/* A test that verifies that we can accept interrupts on all 1267 * the irq vectors of the device. 1268 * Interrupts are checked using the NOP command. 1269 */ 1270int mlx4_test_interrupts(struct mlx4_dev *dev) 1271{ 1272 struct mlx4_priv *priv = mlx4_priv(dev); 1273 int i; 1274 int err; 1275 1276 err = mlx4_NOP(dev); 1277 /* When not in MSI_X, there is only one irq to check */ 1278 if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev)) 1279 return err; 1280 1281 /* A loop over all completion vectors, for each vector we will check 1282 * whether it works by mapping command completions to that vector 1283 * and performing a NOP command 1284 */ 1285 for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) { 1286 /* Temporary use polling for command completions */ 1287 mlx4_cmd_use_polling(dev); 1288 1289 /* Map the new eq to handle all asyncronous events */ 1290 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, 1291 priv->eq_table.eq[i].eqn); 1292 if (err) { 1293 mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); 1294 mlx4_cmd_use_events(dev); 1295 break; 1296 } 1297 1298 /* Go back to using events */ 1299 mlx4_cmd_use_events(dev); 1300 err = mlx4_NOP(dev); 1301 } 1302 1303 /* Return to default */ 1304 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, 1305 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); 1306 return err; 1307} 1308EXPORT_SYMBOL(mlx4_test_interrupts); 1309 1310int mlx4_assign_eq(struct mlx4_dev *dev, char *name, int *vector) 1311{ 1312 1313 struct mlx4_priv *priv = mlx4_priv(dev); 1314 int vec = 0, err = 0, i; 1315 1316 mutex_lock(&priv->msix_ctl.pool_lock); 1317 for (i = 0; !vec && i < dev->caps.comp_pool; i++) { 1318 if (~priv->msix_ctl.pool_bm & 1ULL << i) { 1319 priv->msix_ctl.pool_bm |= 1ULL << i; 1320 vec = dev->caps.num_comp_vectors + 1 + i; 1321 snprintf(priv->eq_table.irq_names + 1322 vec * MLX4_IRQNAME_SIZE, 1323 MLX4_IRQNAME_SIZE, "%s", name); 1324 err = request_irq(priv->eq_table.eq[vec].irq, 1325 mlx4_msi_x_interrupt, 0, 1326 &priv->eq_table.irq_names[vec<<5], 1327 priv->eq_table.eq + vec); 1328 if (err) { 1329 /*zero out bit by fliping it*/ 1330 priv->msix_ctl.pool_bm ^= 1 << i; 1331 vec = 0; 1332 continue; 1333 /*we dont want to break here*/ 1334 } 1335 eq_set_ci(&priv->eq_table.eq[vec], 1); 1336 } 1337 } 1338 mutex_unlock(&priv->msix_ctl.pool_lock); 1339 1340 if (vec) { 1341 *vector = vec; 1342 } else { 1343 *vector = 0; 1344 err = (i == dev->caps.comp_pool) ? -ENOSPC : err; 1345 } 1346 return err; 1347} 1348EXPORT_SYMBOL(mlx4_assign_eq); 1349 1350void mlx4_release_eq(struct mlx4_dev *dev, int vec) 1351{ 1352 struct mlx4_priv *priv = mlx4_priv(dev); 1353 /*bm index*/ 1354 int i = vec - dev->caps.num_comp_vectors - 1; 1355 1356 if (likely(i >= 0)) { 1357 /*sanity check , making sure were not trying to free irq's 1358 Belonging to a legacy EQ*/ 1359 mutex_lock(&priv->msix_ctl.pool_lock); 1360 if (priv->msix_ctl.pool_bm & 1ULL << i) { 1361 free_irq(priv->eq_table.eq[vec].irq, 1362 &priv->eq_table.eq[vec]); 1363 priv->msix_ctl.pool_bm &= ~(1ULL << i); 1364 } 1365 mutex_unlock(&priv->msix_ctl.pool_lock); 1366 } 1367 1368} 1369EXPORT_SYMBOL(mlx4_release_eq); 1370 1371