1/* 2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/interrupt.h> 35#include <linux/slab.h> 36#include <linux/mm.h> 37#include <linux/dma-mapping.h> 38 39#include <linux/mlx4/cmd.h> 40 41#include "mlx4.h" 42#include "fw.h" 43 44enum { 45 MLX4_IRQNAME_SIZE = 64 46}; 47 48enum { 49 MLX4_NUM_ASYNC_EQE = 0x100, 50 MLX4_NUM_SPARE_EQE = 0x80, 51 MLX4_EQ_ENTRY_SIZE = 0x20 52}; 53 54/* 55 * Must be packed because start is 64 bits but only aligned to 32 bits. 56 */ 57struct mlx4_eq_context { 58 __be32 flags; 59 u16 reserved1[3]; 60 __be16 page_offset; 61 u8 log_eq_size; 62 u8 reserved2[4]; 63 u8 eq_period; 64 u8 reserved3; 65 u8 eq_max_count; 66 u8 reserved4[3]; 67 u8 intr; 68 u8 log_page_size; 69 u8 reserved5[2]; 70 u8 mtt_base_addr_h; 71 __be32 mtt_base_addr_l; 72 u32 reserved6[2]; 73 __be32 consumer_index; 74 __be32 producer_index; 75 u32 reserved7[4]; 76}; 77 78#define MLX4_EQ_STATUS_OK ( 0 << 28) 79#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) 80#define MLX4_EQ_OWNER_SW ( 0 << 24) 81#define MLX4_EQ_OWNER_HW ( 1 << 24) 82#define MLX4_EQ_FLAG_EC ( 1 << 18) 83#define MLX4_EQ_FLAG_OI ( 1 << 17) 84#define MLX4_EQ_STATE_ARMED ( 9 << 8) 85#define MLX4_EQ_STATE_FIRED (10 << 8) 86#define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8) 87 88#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \ 89 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \ 90 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \ 91 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \ 92 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \ 93 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \ 94 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ 95 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ 96 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ 97 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ 98 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ 99 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ 100 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ 101 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ 102 (1ull << MLX4_EVENT_TYPE_CMD)) 103 104struct mlx4_eqe { 105 u8 reserved1; 106 u8 type; 107 u8 reserved2; 108 u8 subtype; 109 union { 110 u32 raw[6]; 111 struct { 112 __be32 cqn; 113 } __packed comp; 114 struct { 115 u16 reserved1; 116 __be16 token; 117 u32 reserved2; 118 u8 reserved3[3]; 119 u8 status; 120 __be64 out_param; 121 } __packed cmd; 122 struct { 123 __be32 qpn; 124 } __packed qp; 125 struct { 126 __be32 srqn; 127 } __packed srq; 128 struct { 129 __be32 cqn; 130 u32 reserved1; 131 u8 reserved2[3]; 132 u8 syndrome; 133 } __packed cq_err; 134 struct { 135 u32 reserved1[2]; 136 __be32 port; 137 } __packed port_change; 138 } event; 139 u8 reserved3[3]; 140 u8 owner; 141} __packed; 142 143static void eq_set_ci(struct mlx4_eq *eq, int req_not) 144{ 145 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | 146 req_not << 31), 147 eq->doorbell); 148 /* We still want ordering, just not swabbing, so add a barrier */ 149 mb(); 150} 151 152static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry) 153{ 154 unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE; 155 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; 156} 157 158static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq) 159{ 160 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index); 161 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; 162} 163 164static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) 165{ 166 struct mlx4_eqe *eqe; 167 int cqn; 168 int eqes_found = 0; 169 int set_ci = 0; 170 int port; 171 172 while ((eqe = next_eqe_sw(eq))) { 173 /* 174 * Make sure we read EQ entry contents after we've 175 * checked the ownership bit. 176 */ 177 rmb(); 178 179 switch (eqe->type) { 180 case MLX4_EVENT_TYPE_COMP: 181 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; 182 mlx4_cq_completion(dev, cqn); 183 break; 184 185 case MLX4_EVENT_TYPE_PATH_MIG: 186 case MLX4_EVENT_TYPE_COMM_EST: 187 case MLX4_EVENT_TYPE_SQ_DRAINED: 188 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: 189 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: 190 case MLX4_EVENT_TYPE_PATH_MIG_FAILED: 191 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 192 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: 193 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 194 eqe->type); 195 break; 196 197 case MLX4_EVENT_TYPE_SRQ_LIMIT: 198 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: 199 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, 200 eqe->type); 201 break; 202 203 case MLX4_EVENT_TYPE_CMD: 204 mlx4_cmd_event(dev, 205 be16_to_cpu(eqe->event.cmd.token), 206 eqe->event.cmd.status, 207 be64_to_cpu(eqe->event.cmd.out_param)); 208 break; 209 210 case MLX4_EVENT_TYPE_PORT_CHANGE: 211 port = be32_to_cpu(eqe->event.port_change.port) >> 28; 212 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { 213 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, 214 port); 215 mlx4_priv(dev)->sense.do_sense_port[port] = 1; 216 } else { 217 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, 218 port); 219 mlx4_priv(dev)->sense.do_sense_port[port] = 0; 220 } 221 break; 222 223 case MLX4_EVENT_TYPE_CQ_ERROR: 224 mlx4_warn(dev, "CQ %s on CQN %06x\n", 225 eqe->event.cq_err.syndrome == 1 ? 226 "overrun" : "access violation", 227 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); 228 mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), 229 eqe->type); 230 break; 231 232 case MLX4_EVENT_TYPE_EQ_OVERFLOW: 233 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); 234 break; 235 236 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: 237 case MLX4_EVENT_TYPE_ECC_DETECT: 238 default: 239 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n", 240 eqe->type, eqe->subtype, eq->eqn, eq->cons_index); 241 break; 242 } 243 244 ++eq->cons_index; 245 eqes_found = 1; 246 ++set_ci; 247 248 /* 249 * The HCA will think the queue has overflowed if we 250 * don't tell it we've been processing events. We 251 * create our EQs with MLX4_NUM_SPARE_EQE extra 252 * entries, so we must update our consumer index at 253 * least that often. 254 */ 255 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { 256 eq_set_ci(eq, 0); 257 set_ci = 0; 258 } 259 } 260 261 eq_set_ci(eq, 1); 262 263 return eqes_found; 264} 265 266static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) 267{ 268 struct mlx4_dev *dev = dev_ptr; 269 struct mlx4_priv *priv = mlx4_priv(dev); 270 int work = 0; 271 int i; 272 273 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); 274 275 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 276 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); 277 278 return IRQ_RETVAL(work); 279} 280 281static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) 282{ 283 struct mlx4_eq *eq = eq_ptr; 284 struct mlx4_dev *dev = eq->dev; 285 286 mlx4_eq_int(dev, eq); 287 288 /* MSI-X vectors always belong to us */ 289 return IRQ_HANDLED; 290} 291 292static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, 293 int eq_num) 294{ 295 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, 296 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B); 297} 298 299static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 300 int eq_num) 301{ 302 return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ, 303 MLX4_CMD_TIME_CLASS_A); 304} 305 306static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 307 int eq_num) 308{ 309 return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ, 310 MLX4_CMD_TIME_CLASS_A); 311} 312 313static int mlx4_num_eq_uar(struct mlx4_dev *dev) 314{ 315 /* 316 * Each UAR holds 4 EQ doorbells. To figure out how many UARs 317 * we need to map, take the difference of highest index and 318 * the lowest index we'll use and add 1. 319 */ 320 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - 321 dev->caps.reserved_eqs / 4 + 1; 322} 323 324static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) 325{ 326 struct mlx4_priv *priv = mlx4_priv(dev); 327 int index; 328 329 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4; 330 331 if (!priv->eq_table.uar_map[index]) { 332 priv->eq_table.uar_map[index] = 333 ioremap(pci_resource_start(dev->pdev, 2) + 334 ((eq->eqn / 4) << PAGE_SHIFT), 335 PAGE_SIZE); 336 if (!priv->eq_table.uar_map[index]) { 337 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", 338 eq->eqn); 339 return NULL; 340 } 341 } 342 343 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); 344} 345 346static int mlx4_create_eq(struct mlx4_dev *dev, int nent, 347 u8 intr, struct mlx4_eq *eq) 348{ 349 struct mlx4_priv *priv = mlx4_priv(dev); 350 struct mlx4_cmd_mailbox *mailbox; 351 struct mlx4_eq_context *eq_context; 352 int npages; 353 u64 *dma_list = NULL; 354 dma_addr_t t; 355 u64 mtt_addr; 356 int err = -ENOMEM; 357 int i; 358 359 eq->dev = dev; 360 eq->nent = roundup_pow_of_two(max(nent, 2)); 361 npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE; 362 363 eq->page_list = kmalloc(npages * sizeof *eq->page_list, 364 GFP_KERNEL); 365 if (!eq->page_list) 366 goto err_out; 367 368 for (i = 0; i < npages; ++i) 369 eq->page_list[i].buf = NULL; 370 371 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); 372 if (!dma_list) 373 goto err_out_free; 374 375 mailbox = mlx4_alloc_cmd_mailbox(dev); 376 if (IS_ERR(mailbox)) 377 goto err_out_free; 378 eq_context = mailbox->buf; 379 380 for (i = 0; i < npages; ++i) { 381 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, 382 PAGE_SIZE, &t, GFP_KERNEL); 383 if (!eq->page_list[i].buf) 384 goto err_out_free_pages; 385 386 dma_list[i] = t; 387 eq->page_list[i].map = t; 388 389 memset(eq->page_list[i].buf, 0, PAGE_SIZE); 390 } 391 392 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); 393 if (eq->eqn == -1) 394 goto err_out_free_pages; 395 396 eq->doorbell = mlx4_get_eq_uar(dev, eq); 397 if (!eq->doorbell) { 398 err = -ENOMEM; 399 goto err_out_free_eq; 400 } 401 402 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); 403 if (err) 404 goto err_out_free_eq; 405 406 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); 407 if (err) 408 goto err_out_free_mtt; 409 410 memset(eq_context, 0, sizeof *eq_context); 411 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | 412 MLX4_EQ_STATE_ARMED); 413 eq_context->log_eq_size = ilog2(eq->nent); 414 eq_context->intr = intr; 415 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; 416 417 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); 418 eq_context->mtt_base_addr_h = mtt_addr >> 32; 419 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); 420 421 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn); 422 if (err) { 423 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err); 424 goto err_out_free_mtt; 425 } 426 427 kfree(dma_list); 428 mlx4_free_cmd_mailbox(dev, mailbox); 429 430 eq->cons_index = 0; 431 432 return err; 433 434err_out_free_mtt: 435 mlx4_mtt_cleanup(dev, &eq->mtt); 436 437err_out_free_eq: 438 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); 439 440err_out_free_pages: 441 for (i = 0; i < npages; ++i) 442 if (eq->page_list[i].buf) 443 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 444 eq->page_list[i].buf, 445 eq->page_list[i].map); 446 447 mlx4_free_cmd_mailbox(dev, mailbox); 448 449err_out_free: 450 kfree(eq->page_list); 451 kfree(dma_list); 452 453err_out: 454 return err; 455} 456 457static void mlx4_free_eq(struct mlx4_dev *dev, 458 struct mlx4_eq *eq) 459{ 460 struct mlx4_priv *priv = mlx4_priv(dev); 461 struct mlx4_cmd_mailbox *mailbox; 462 int err; 463 int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE; 464 int i; 465 466 mailbox = mlx4_alloc_cmd_mailbox(dev); 467 if (IS_ERR(mailbox)) 468 return; 469 470 err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn); 471 if (err) 472 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); 473 474 if (0) { 475 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); 476 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) { 477 if (i % 4 == 0) 478 pr_cont("[%02x] ", i * 4); 479 pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4)); 480 if ((i + 1) % 4 == 0) 481 pr_cont("\n"); 482 } 483 } 484 485 mlx4_mtt_cleanup(dev, &eq->mtt); 486 for (i = 0; i < npages; ++i) 487 pci_free_consistent(dev->pdev, PAGE_SIZE, 488 eq->page_list[i].buf, 489 eq->page_list[i].map); 490 491 kfree(eq->page_list); 492 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); 493 mlx4_free_cmd_mailbox(dev, mailbox); 494} 495 496static void mlx4_free_irqs(struct mlx4_dev *dev) 497{ 498 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; 499 int i; 500 501 if (eq_table->have_irq) 502 free_irq(dev->pdev->irq, dev); 503 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 504 if (eq_table->eq[i].have_irq) { 505 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 506 eq_table->eq[i].have_irq = 0; 507 } 508 509 kfree(eq_table->irq_names); 510} 511 512static int mlx4_map_clr_int(struct mlx4_dev *dev) 513{ 514 struct mlx4_priv *priv = mlx4_priv(dev); 515 516 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + 517 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); 518 if (!priv->clr_base) { 519 mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n"); 520 return -ENOMEM; 521 } 522 523 return 0; 524} 525 526static void mlx4_unmap_clr_int(struct mlx4_dev *dev) 527{ 528 struct mlx4_priv *priv = mlx4_priv(dev); 529 530 iounmap(priv->clr_base); 531} 532 533int mlx4_alloc_eq_table(struct mlx4_dev *dev) 534{ 535 struct mlx4_priv *priv = mlx4_priv(dev); 536 537 priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, 538 sizeof *priv->eq_table.eq, GFP_KERNEL); 539 if (!priv->eq_table.eq) 540 return -ENOMEM; 541 542 return 0; 543} 544 545void mlx4_free_eq_table(struct mlx4_dev *dev) 546{ 547 kfree(mlx4_priv(dev)->eq_table.eq); 548} 549 550int mlx4_init_eq_table(struct mlx4_dev *dev) 551{ 552 struct mlx4_priv *priv = mlx4_priv(dev); 553 int err; 554 int i; 555 556 priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map, 557 mlx4_num_eq_uar(dev), GFP_KERNEL); 558 if (!priv->eq_table.uar_map) { 559 err = -ENOMEM; 560 goto err_out_free; 561 } 562 563 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, 564 dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); 565 if (err) 566 goto err_out_free; 567 568 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 569 priv->eq_table.uar_map[i] = NULL; 570 571 err = mlx4_map_clr_int(dev); 572 if (err) 573 goto err_out_bitmap; 574 575 priv->eq_table.clr_mask = 576 swab32(1 << (priv->eq_table.inta_pin & 31)); 577 priv->eq_table.clr_int = priv->clr_base + 578 (priv->eq_table.inta_pin < 32 ? 4 : 0); 579 580 priv->eq_table.irq_names = 581 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), 582 GFP_KERNEL); 583 if (!priv->eq_table.irq_names) { 584 err = -ENOMEM; 585 goto err_out_bitmap; 586 } 587 588 for (i = 0; i < dev->caps.num_comp_vectors; ++i) { 589 err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, 590 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, 591 &priv->eq_table.eq[i]); 592 if (err) { 593 --i; 594 goto err_out_unmap; 595 } 596 } 597 598 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, 599 (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0, 600 &priv->eq_table.eq[dev->caps.num_comp_vectors]); 601 if (err) 602 goto err_out_comp; 603 604 if (dev->flags & MLX4_FLAG_MSI_X) { 605 const char *eq_name; 606 607 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { 608 if (i < dev->caps.num_comp_vectors) { 609 snprintf(priv->eq_table.irq_names + 610 i * MLX4_IRQNAME_SIZE, 611 MLX4_IRQNAME_SIZE, 612 "mlx4-comp-%d@pci:%s", i, 613 pci_name(dev->pdev)); 614 } else { 615 snprintf(priv->eq_table.irq_names + 616 i * MLX4_IRQNAME_SIZE, 617 MLX4_IRQNAME_SIZE, 618 "mlx4-async@pci:%s", 619 pci_name(dev->pdev)); 620 } 621 622 eq_name = priv->eq_table.irq_names + 623 i * MLX4_IRQNAME_SIZE; 624 err = request_irq(priv->eq_table.eq[i].irq, 625 mlx4_msi_x_interrupt, 0, eq_name, 626 priv->eq_table.eq + i); 627 if (err) 628 goto err_out_async; 629 630 priv->eq_table.eq[i].have_irq = 1; 631 } 632 } else { 633 snprintf(priv->eq_table.irq_names, 634 MLX4_IRQNAME_SIZE, 635 DRV_NAME "@pci:%s", 636 pci_name(dev->pdev)); 637 err = request_irq(dev->pdev->irq, mlx4_interrupt, 638 IRQF_SHARED, priv->eq_table.irq_names, dev); 639 if (err) 640 goto err_out_async; 641 642 priv->eq_table.have_irq = 1; 643 } 644 645 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, 646 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); 647 if (err) 648 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", 649 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err); 650 651 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 652 eq_set_ci(&priv->eq_table.eq[i], 1); 653 654 return 0; 655 656err_out_async: 657 mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]); 658 659err_out_comp: 660 i = dev->caps.num_comp_vectors - 1; 661 662err_out_unmap: 663 while (i >= 0) { 664 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 665 --i; 666 } 667 mlx4_unmap_clr_int(dev); 668 mlx4_free_irqs(dev); 669 670err_out_bitmap: 671 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 672 673err_out_free: 674 kfree(priv->eq_table.uar_map); 675 676 return err; 677} 678 679void mlx4_cleanup_eq_table(struct mlx4_dev *dev) 680{ 681 struct mlx4_priv *priv = mlx4_priv(dev); 682 int i; 683 684 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, 685 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); 686 687 mlx4_free_irqs(dev); 688 689 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 690 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 691 692 mlx4_unmap_clr_int(dev); 693 694 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 695 if (priv->eq_table.uar_map[i]) 696 iounmap(priv->eq_table.uar_map[i]); 697 698 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 699 700 kfree(priv->eq_table.uar_map); 701} 702