Lines Matching refs:dev

190 static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order,
198 if (mthca_is_memfree(dev))
199 if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg,
208 static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
223 for (i = dev->limits.mtt_seg_size / 8; i < size; i <<= 1)
226 mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
235 struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size)
237 return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy);
240 void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt)
247 mthca_table_put_range(dev, dev->mr_table.mtt_table,
254 static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
263 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
269 mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
270 mtt->first_seg * dev->limits.mtt_seg_size +
284 err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1, &status);
286 mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
290 mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n",
302 mthca_free_mailbox(dev, mailbox);
306 int mthca_write_mtt_size(struct mthca_dev *dev)
308 if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy ||
309 !(dev->mthca_flags & MTHCA_FLAG_FMR))
319 return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff;
322 static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev,
329 mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size +
336 static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev,
348 BUG_ON(s % dev->limits.mtt_seg_size);
350 mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg +
351 s / dev->limits.mtt_seg_size, &dma_handle);
358 dma_sync_single(&dev->pdev->dev, dma_handle, list_len * sizeof (u64), DMA_TO_DEVICE);
361 int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
364 int size = mthca_write_mtt_size(dev);
367 if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy ||
368 !(dev->mthca_flags & MTHCA_FLAG_FMR))
369 return __mthca_write_mtt(dev, mtt, start_index, buffer_list, list_len);
373 if (mthca_is_memfree(dev))
374 mthca_arbel_write_mtt_seg(dev, mtt, start_index,
377 mthca_tavor_write_mtt_seg(dev, mtt, start_index,
408 static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind)
410 if (mthca_is_memfree(dev))
416 static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
418 if (mthca_is_memfree(dev))
424 static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
426 if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
432 int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
444 key = mthca_alloc(&dev->mr_table.mpt_alloc);
447 key = adjust_key(dev, key);
448 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
450 if (mthca_is_memfree(dev)) {
451 err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
456 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
481 cpu_to_be64(dev->mr_table.mtt_base +
482 mr->mtt->first_seg * dev->limits.mtt_seg_size);
485 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
495 err = mthca_SW2HW_MPT(dev, mailbox,
496 key & (dev->limits.num_mpts - 1),
499 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
502 mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
508 mthca_free_mailbox(dev, mailbox);
512 mthca_free_mailbox(dev, mailbox);
515 mthca_table_put(dev, dev->mr_table.mpt_table, key);
518 mthca_free(&dev->mr_table.mpt_alloc, key);
522 int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
526 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
529 int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
536 mr->mtt = mthca_alloc_mtt(dev, list_len);
540 err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len);
542 mthca_free_mtt(dev, mr->mtt);
546 err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova,
549 mthca_free_mtt(dev, mr->mtt);
555 static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
557 mthca_table_put(dev, dev->mr_table.mpt_table,
558 key_to_hw_index(dev, lkey));
560 mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey));
563 void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
568 err = mthca_HW2SW_MPT(dev, NULL,
569 key_to_hw_index(dev, mr->ibmr.lkey) &
570 (dev->limits.num_mpts - 1),
573 mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err);
575 mthca_warn(dev, "HW2SW_MPT returned status 0x%02x\n",
578 mthca_free_region(dev, mr->ibmr.lkey);
579 mthca_free_mtt(dev, mr->mtt);
582 int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
598 if (mthca_is_memfree(dev) &&
604 key = mthca_alloc(&dev->mr_table.mpt_alloc);
607 key = adjust_key(dev, key);
609 idx = key & (dev->limits.num_mpts - 1);
610 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
612 if (mthca_is_memfree(dev)) {
613 err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
617 mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key, NULL);
620 mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base +
623 mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
629 mtt_seg = mr->mtt->first_seg * dev->limits.mtt_seg_size;
631 if (mthca_is_memfree(dev)) {
632 mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
637 mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
639 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
657 mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base + mtt_seg);
660 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
670 err = mthca_SW2HW_MPT(dev, mailbox,
671 key & (dev->limits.num_mpts - 1),
674 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
678 mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
684 mthca_free_mailbox(dev, mailbox);
688 mthca_free_mailbox(dev, mailbox);
691 mthca_free_mtt(dev, mr->mtt);
694 mthca_table_put(dev, dev->mr_table.mpt_table, key);
697 mthca_free(&dev->mr_table.mpt_alloc, key);
701 int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr)
706 mthca_free_region(dev, fmr->ibmr.lkey);
707 mthca_free_mtt(dev, fmr->mtt);
744 struct mthca_dev *dev = to_mdev(ibfmr->device);
756 key += dev->limits.num_mpts;
785 struct mthca_dev *dev = to_mdev(ibfmr->device);
796 if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
799 key += dev->limits.num_mpts;
810 dma_sync_single(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
827 void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
837 void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
847 int mthca_init_mr_table(struct mthca_dev *dev)
852 err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
853 dev->limits.num_mpts,
854 ~0, dev->limits.reserved_mrws);
858 if (!mthca_is_memfree(dev) &&
859 (dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN))
860 dev->limits.fmr_reserved_mtts = 0;
862 dev->mthca_flags |= MTHCA_FLAG_FMR;
864 if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
865 mthca_dbg(dev, "Memory key throughput optimization activated.\n");
867 err = mthca_buddy_init(&dev->mr_table.mtt_buddy,
868 fls(dev->limits.num_mtt_segs - 1));
873 dev->mr_table.tavor_fmr.mpt_base = NULL;
874 dev->mr_table.tavor_fmr.mtt_base = NULL;
876 if (dev->limits.fmr_reserved_mtts) {
877 i = fls(dev->limits.fmr_reserved_mtts - 1);
880 mthca_warn(dev, "Unable to reserve 2^31 FMR MTTs.\n");
886 mtts = dev->limits.num_mtt_segs;
887 mpts = dev->limits.num_mpts;
890 if (!mthca_is_memfree(dev) &&
891 (dev->mthca_flags & MTHCA_FLAG_FMR)) {
893 addr = pci_resource_start(dev->pdev, 4) +
894 ((pci_resource_len(dev->pdev, 4) - 1) &
895 dev->mr_table.mpt_base);
897 dev->mr_table.tavor_fmr.mpt_base =
900 if (!dev->mr_table.tavor_fmr.mpt_base) {
901 mthca_warn(dev, "MPT ioremap for FMR failed.\n");
906 addr = pci_resource_start(dev->pdev, 4) +
907 ((pci_resource_len(dev->pdev, 4) - 1) &
908 dev->mr_table.mtt_base);
910 dev->mr_table.tavor_fmr.mtt_base =
911 ioremap(addr, mtts * dev->limits.mtt_seg_size);
912 if (!dev->mr_table.tavor_fmr.mtt_base) {
913 mthca_warn(dev, "MTT ioremap for FMR failed.\n");
919 if (dev->limits.fmr_reserved_mtts) {
920 err = mthca_buddy_init(&dev->mr_table.tavor_fmr.mtt_buddy, fls(mtts - 1));
925 err = mthca_buddy_alloc(&dev->mr_table.mtt_buddy, fls(mtts - 1));
929 dev->mr_table.fmr_mtt_buddy =
930 &dev->mr_table.tavor_fmr.mtt_buddy;
932 dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy;
935 if (dev->limits.reserved_mtts) {
936 i = fls(dev->limits.reserved_mtts - 1);
938 if (mthca_alloc_mtt_range(dev, i,
939 dev->mr_table.fmr_mtt_buddy) == -1) {
940 mthca_warn(dev, "MTT table of order %d is too small.\n",
941 dev->mr_table.fmr_mtt_buddy->max_order);
951 if (dev->limits.fmr_reserved_mtts)
952 mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
955 if (dev->mr_table.tavor_fmr.mtt_base)
956 iounmap(dev->mr_table.tavor_fmr.mtt_base);
959 if (dev->mr_table.tavor_fmr.mpt_base)
960 iounmap(dev->mr_table.tavor_fmr.mpt_base);
963 mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
966 mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
971 void mthca_cleanup_mr_table(struct mthca_dev *dev)
974 if (dev->limits.fmr_reserved_mtts)
975 mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
977 mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
979 if (dev->mr_table.tavor_fmr.mtt_base)
980 iounmap(dev->mr_table.tavor_fmr.mtt_base);
981 if (dev->mr_table.tavor_fmr.mpt_base)
982 iounmap(dev->mr_table.tavor_fmr.mpt_base);
984 mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);