• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /barrelfish-2018-10-04/lib/devif/backends/net/mlx4/drivers/infiniband/hw/mthca/

Lines Matching refs:dev

165 static inline u64 async_mask(struct mthca_dev *dev)
167 return dev->mthca_flags & MTHCA_FLAG_SRQ ?
172 static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
184 dev->kar + MTHCA_EQ_DOORBELL,
185 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
188 static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
193 dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8);
198 static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
200 if (mthca_is_memfree(dev))
201 arbel_set_eq_ci(dev, eq, ci);
203 tavor_set_eq_ci(dev, eq, ci);
206 static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn)
209 dev->kar + MTHCA_EQ_DOORBELL,
210 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
213 static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)
215 writel(eqn_mask, dev->eq_regs.arbel.eq_arm);
218 static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
220 if (!mthca_is_memfree(dev)) {
222 dev->kar + MTHCA_EQ_DOORBELL,
223 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
245 static void port_change(struct mthca_dev *dev, int port, int active)
249 mthca_dbg(dev, "Port change to %s for port %d\n",
252 record.device = &dev->ib_dev;
259 static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
276 disarm_cq(dev, eq->eqn, disarm_cqn);
277 mthca_cq_completion(dev, disarm_cqn);
281 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
286 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
291 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
296 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
301 mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
306 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
311 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
316 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
321 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
326 mthca_cmd_event(dev,
333 port_change(dev,
339 mthca_warn(dev, "CQ %s on CQN %06x\n",
343 mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
348 mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
356 mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
378 set_eq_ci(dev, eq, eq->cons_index);
392 struct mthca_dev *dev = dev_ptr;
396 if (dev->eq_table.clr_mask)
397 writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
399 ecr = readl(dev->eq_regs.tavor.ecr_base + 4);
403 writel(ecr, dev->eq_regs.tavor.ecr_base +
407 if (ecr & dev->eq_table.eq[i].eqn_mask) {
408 if (mthca_eq_int(dev, &dev->eq_table.eq[i]))
409 tavor_set_eq_ci(dev, &dev->eq_table.eq[i],
410 dev->eq_table.eq[i].cons_index);
411 tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
420 struct mthca_dev *dev = eq->dev;
422 mthca_eq_int(dev, eq);
423 tavor_set_eq_ci(dev, eq, eq->cons_index);
424 tavor_eq_req_not(dev, eq->eqn);
432 struct mthca_dev *dev = dev_ptr;
436 if (dev->eq_table.clr_mask)
437 writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
440 if (mthca_eq_int(dev, &dev->eq_table.eq[i])) {
442 arbel_set_eq_ci(dev, &dev->eq_table.eq[i],
443 dev->eq_table.eq[i].cons_index);
446 arbel_eq_req_not(dev, dev->eq_table.arm_mask);
454 struct mthca_dev *dev = eq->dev;
456 mthca_eq_int(dev, eq);
457 arbel_set_eq_ci(dev, eq, eq->cons_index);
458 arbel_eq_req_not(dev, eq->eqn_mask);
464 static int mthca_create_eq(struct mthca_dev *dev,
478 eq->dev = dev;
494 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
500 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
514 eq->eqn = mthca_alloc(&dev->eq_table.alloc);
518 err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
532 if (mthca_is_memfree(dev))
536 if (mthca_is_memfree(dev)) {
537 eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
539 eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
540 eq_context->tavor_pd = cpu_to_be32(dev->driver_pd.pd_num);
545 err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status);
547 mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err);
551 mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n",
558 mthca_free_mailbox(dev, mailbox);
563 dev->eq_table.arm_mask |= eq->eqn_mask;
565 mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
571 mthca_free_mr(dev, &eq->mr);
574 mthca_free(&dev->eq_table.alloc, eq->eqn);
579 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
584 mthca_free_mailbox(dev, mailbox);
594 static void mthca_free_eq(struct mthca_dev *dev,
604 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
608 err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status);
610 mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err);
612 mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status);
614 dev->eq_table.arm_mask &= ~eq->eqn_mask;
617 mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
627 mthca_free_mr(dev, &eq->mr);
629 pci_free_consistent(dev->pdev, PAGE_SIZE,
634 mthca_free_mailbox(dev, mailbox);
637 static void mthca_free_irqs(struct mthca_dev *dev)
641 if (dev->eq_table.have_irq)
642 free_irq(dev->pdev->irq, dev);
644 if (dev->eq_table.eq[i].have_irq) {
645 free_irq(dev->eq_table.eq[i].msi_x_vector,
646 dev->eq_table.eq + i);
647 dev->eq_table.eq[i].have_irq = 0;
651 static int mthca_map_reg(struct mthca_dev *dev,
655 unsigned long base = pci_resource_start(dev->pdev, 0);
664 static int mthca_map_eq_regs(struct mthca_dev *dev)
666 if (mthca_is_memfree(dev)) {
674 if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
675 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
676 &dev->clr_base)) {
677 mthca_err(dev, "Couldn't map interrupt clear register, "
686 if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
687 dev->fw.arbel.eq_arm_base) + 4, 4,
688 &dev->eq_regs.arbel.eq_arm)) {
689 mthca_err(dev, "Couldn't map EQ arm register, aborting.\n");
690 iounmap(dev->clr_base);
694 if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
695 dev->fw.arbel.eq_set_ci_base,
697 &dev->eq_regs.arbel.eq_set_ci_base)) {
698 mthca_err(dev, "Couldn't map EQ CI register, aborting.\n");
699 iounmap(dev->eq_regs.arbel.eq_arm);
700 iounmap(dev->clr_base);
704 if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
705 &dev->clr_base)) {
706 mthca_err(dev, "Couldn't map interrupt clear register, "
711 if (mthca_map_reg(dev, MTHCA_ECR_BASE,
713 &dev->eq_regs.tavor.ecr_base)) {
714 mthca_err(dev, "Couldn't map ecr register, "
716 iounmap(dev->clr_base);
725 static void mthca_unmap_eq_regs(struct mthca_dev *dev)
727 if (mthca_is_memfree(dev)) {
728 iounmap(dev->eq_regs.arbel.eq_set_ci_base);
729 iounmap(dev->eq_regs.arbel.eq_arm);
730 iounmap(dev->clr_base);
732 iounmap(dev->eq_regs.tavor.ecr_base);
733 iounmap(dev->clr_base);
737 int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
748 dev->eq_table.icm_virt = icm_virt;
749 dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
750 if (!dev->eq_table.icm_page)
752 dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
754 if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {
755 __free_page(dev->eq_table.icm_page);
759 ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status);
763 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
765 __free_page(dev->eq_table.icm_page);
771 void mthca_unmap_eq_icm(struct mthca_dev *dev)
775 mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1, &status);
776 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
778 __free_page(dev->eq_table.icm_page);
781 int mthca_init_eq_table(struct mthca_dev *dev)
788 err = mthca_alloc_init(&dev->eq_table.alloc,
789 dev->limits.num_eqs,
790 dev->limits.num_eqs - 1,
791 dev->limits.reserved_eqs);
795 err = mthca_map_eq_regs(dev);
799 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
800 dev->eq_table.clr_mask = 0;
802 dev->eq_table.clr_mask =
803 swab32(1 << (dev->eq_table.inta_pin & 31));
804 dev->eq_table.clr_int = dev->clr_base +
805 (dev->eq_table.inta_pin < 32 ? 4 : 0);
808 dev->eq_table.arm_mask = 0;
810 intr = dev->eq_table.inta_pin;
812 err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
813 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
814 &dev->eq_table.eq[MTHCA_EQ_COMP]);
818 err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,
819 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
820 &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
824 err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,
825 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
826 &dev->eq_table.eq[MTHCA_EQ_CMD]);
830 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
838 err = request_irq(dev->eq_table.eq[i].msi_x_vector,
839 mthca_is_memfree(dev) ?
842 0, eq_name[i], dev->eq_table.eq + i);
845 dev->eq_table.eq[i].have_irq = 1;
848 err = request_irq(dev->pdev->irq,
849 mthca_is_memfree(dev) ?
852 IRQF_SHARED, DRV_NAME, dev);
855 dev->eq_table.have_irq = 1;
858 err = mthca_MAP_EQ(dev, async_mask(dev),
859 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
861 mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
862 dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err);
864 mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n",
865 dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status);
867 err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
868 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
870 mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",
871 dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);
873 mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n",
874 dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status);
877 if (mthca_is_memfree(dev))
878 arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
880 tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
885 mthca_free_irqs(dev);
886 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);
889 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
892 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]);
895 mthca_unmap_eq_regs(dev);
898 mthca_alloc_cleanup(&dev->eq_table.alloc);
902 void mthca_cleanup_eq_table(struct mthca_dev *dev)
907 mthca_free_irqs(dev);
909 mthca_MAP_EQ(dev, async_mask(dev),
910 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
911 mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
912 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
915 mthca_free_eq(dev, &dev->eq_table.eq[i]);
917 mthca_unmap_eq_regs(dev);
919 mthca_alloc_cleanup(&dev->eq_table.alloc);