Lines Matching defs:iommu

14 #include <linux/iommu.h>
29 #include "iommu-pages.h"
101 struct iommu_device iommu;
125 struct sun50i_iommu *iommu;
138 static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset)
140 return readl(iommu->base + offset);
143 static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value)
145 writel(value, iommu->base + offset);
294 struct sun50i_iommu *iommu = sun50i_domain->iommu;
298 dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE);
301 static void sun50i_iommu_zap_iova(struct sun50i_iommu *iommu,
307 iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova);
308 iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, GENMASK(31, 12));
309 iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG,
312 ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_IVLD_ENABLE_REG,
315 dev_warn(iommu->dev, "TLB invalidation timed out!\n");
318 static void sun50i_iommu_zap_ptw_cache(struct sun50i_iommu *iommu,
324 iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, iova);
325 iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG,
328 ret = readl_poll_timeout_atomic(iommu->base + IOMMU_PC_IVLD_ENABLE_REG,
331 dev_warn(iommu->dev, "PTW cache invalidation timed out!\n");
334 static void sun50i_iommu_zap_range(struct sun50i_iommu *iommu,
337 assert_spin_locked(&iommu->iommu_lock);
339 iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0);
341 sun50i_iommu_zap_iova(iommu, iova);
342 sun50i_iommu_zap_iova(iommu, iova + SPAGE_SIZE);
344 sun50i_iommu_zap_iova(iommu, iova + size);
345 sun50i_iommu_zap_iova(iommu, iova + size + SPAGE_SIZE);
347 sun50i_iommu_zap_ptw_cache(iommu, iova);
348 sun50i_iommu_zap_ptw_cache(iommu, iova + SZ_1M);
350 sun50i_iommu_zap_ptw_cache(iommu, iova + size);
351 sun50i_iommu_zap_ptw_cache(iommu, iova + size + SZ_1M);
354 iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
357 static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
362 assert_spin_locked(&iommu->iommu_lock);
364 iommu_write(iommu,
375 ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG,
379 dev_warn(iommu->dev, "TLB Flush timed out!\n");
387 struct sun50i_iommu *iommu = sun50i_domain->iommu;
392 * .probe_device, and since we link our (single) domain to our iommu in
398 if (!iommu)
401 spin_lock_irqsave(&iommu->iommu_lock, flags);
402 sun50i_iommu_flush_all_tlb(iommu);
403 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
410 struct sun50i_iommu *iommu = sun50i_domain->iommu;
413 spin_lock_irqsave(&iommu->iommu_lock, flags);
414 sun50i_iommu_zap_range(iommu, iova, size);
415 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
426 static int sun50i_iommu_enable(struct sun50i_iommu *iommu)
432 if (!iommu->domain)
435 sun50i_domain = to_sun50i_domain(iommu->domain);
437 ret = reset_control_deassert(iommu->reset);
441 ret = clk_prepare_enable(iommu->clk);
445 spin_lock_irqsave(&iommu->iommu_lock, flags);
447 iommu_write(iommu, IOMMU_TTB_REG, sun50i_domain->dt_dma);
448 iommu_write(iommu, IOMMU_TLB_PREFETCH_REG,
455 iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK);
456 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE),
470 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD),
478 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR),
486 ret = sun50i_iommu_flush_all_tlb(iommu);
488 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
492 iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
493 iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE_ENABLE);
495 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
500 clk_disable_unprepare(iommu->clk);
503 reset_control_assert(iommu->reset);
508 static void sun50i_iommu_disable(struct sun50i_iommu *iommu)
512 spin_lock_irqsave(&iommu->iommu_lock, flags);
514 iommu_write(iommu, IOMMU_ENABLE_REG, 0);
515 iommu_write(iommu, IOMMU_TTB_REG, 0);
517 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
519 clk_disable_unprepare(iommu->clk);
520 reset_control_assert(iommu->reset);
523 static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu *iommu,
529 page_table = kmem_cache_zalloc(iommu->pt_pool, gfp);
533 pt_dma = dma_map_single(iommu->dev, page_table, PT_SIZE, DMA_TO_DEVICE);
534 if (dma_mapping_error(iommu->dev, pt_dma)) {
535 dev_err(iommu->dev, "Couldn't map L2 Page Table\n");
536 kmem_cache_free(iommu->pt_pool, page_table);
546 static void sun50i_iommu_free_page_table(struct sun50i_iommu *iommu,
551 dma_unmap_single(iommu->dev, pt_phys, PT_SIZE, DMA_TO_DEVICE);
552 kmem_cache_free(iommu->pt_pool, page_table);
558 struct sun50i_iommu *iommu = sun50i_domain->iommu;
571 page_table = sun50i_iommu_alloc_page_table(iommu, gfp);
585 sun50i_iommu_free_page_table(iommu, drop_pt);
599 struct sun50i_iommu *iommu = sun50i_domain->iommu;
614 dev_err(iommu->dev,
712 static int sun50i_iommu_attach_domain(struct sun50i_iommu *iommu,
715 iommu->domain = &sun50i_domain->domain;
716 sun50i_domain->iommu = iommu;
718 sun50i_domain->dt_dma = dma_map_single(iommu->dev, sun50i_domain->dt,
720 if (dma_mapping_error(iommu->dev, sun50i_domain->dt_dma)) {
721 dev_err(iommu->dev, "Couldn't map L1 Page Table\n");
725 return sun50i_iommu_enable(iommu);
728 static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu,
749 sun50i_iommu_free_page_table(iommu, page_table);
753 sun50i_iommu_disable(iommu);
755 dma_unmap_single(iommu->dev, virt_to_phys(sun50i_domain->dt),
758 iommu->domain = NULL;
764 struct sun50i_iommu *iommu = dev_iommu_priv_get(dev);
769 if (iommu->domain == identity_domain)
772 sun50i_domain = to_sun50i_domain(iommu->domain);
774 sun50i_iommu_detach_domain(iommu, sun50i_domain);
791 struct sun50i_iommu *iommu;
793 iommu = sun50i_iommu_from_dev(dev);
794 if (!iommu)
801 if (iommu->domain == domain)
806 sun50i_iommu_attach_domain(iommu, sun50i_domain);
813 struct sun50i_iommu *iommu;
815 iommu = sun50i_iommu_from_dev(dev);
816 if (!iommu)
819 return &iommu->iommu;
852 static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
856 dev_err(iommu->dev, "Page fault for %pad (master %d, dir %s)\n",
859 if (iommu->domain)
860 report_iommu_fault(iommu->domain, iommu->dev, iova, prot);
862 dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n");
864 sun50i_iommu_zap_range(iommu, iova, SPAGE_SIZE);
867 static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu,
875 assert_spin_locked(&iommu->iommu_lock);
877 iova = iommu_read(iommu, addr_reg);
878 blame = iommu_read(iommu, blame_reg);
886 sun50i_iommu_report_fault(iommu, master, iova, IOMMU_FAULT_READ);
891 static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu)
899 assert_spin_locked(&iommu->iommu_lock);
901 blame = iommu_read(iommu, IOMMU_INT_STA_REG);
903 iova = iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG(master));
904 aci = sun50i_get_pte_aci(iommu_read(iommu,
941 sun50i_iommu_report_fault(iommu, master, iova, dir);
949 struct sun50i_iommu *iommu = dev_id;
951 spin_lock(&iommu->iommu_lock);
953 status = iommu_read(iommu, IOMMU_INT_STA_REG);
955 spin_unlock(&iommu->iommu_lock);
959 l1_status = iommu_read(iommu, IOMMU_L1PG_INT_REG);
960 l2_status = iommu_read(iommu, IOMMU_L2PG_INT_REG);
963 sun50i_iommu_handle_pt_irq(iommu,
967 sun50i_iommu_handle_pt_irq(iommu,
971 sun50i_iommu_handle_perm_irq(iommu);
973 iommu_write(iommu, IOMMU_INT_CLR_REG, status);
976 iommu_write(iommu, IOMMU_RESET_REG, ~resets);
977 iommu_write(iommu, IOMMU_RESET_REG, IOMMU_RESET_RELEASE_ALL);
979 spin_unlock(&iommu->iommu_lock);
986 struct sun50i_iommu *iommu;
989 iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
990 if (!iommu)
992 spin_lock_init(&iommu->iommu_lock);
993 iommu->domain = &sun50i_iommu_identity_domain;
994 platform_set_drvdata(pdev, iommu);
995 iommu->dev = &pdev->dev;
997 iommu->pt_pool = kmem_cache_create(dev_name(&pdev->dev),
1001 if (!iommu->pt_pool)
1004 iommu->base = devm_platform_ioremap_resource(pdev, 0);
1005 if (IS_ERR(iommu->base)) {
1006 ret = PTR_ERR(iommu->base);
1016 iommu->clk = devm_clk_get(&pdev->dev, NULL);
1017 if (IS_ERR(iommu->clk)) {
1019 ret = PTR_ERR(iommu->clk);
1023 iommu->reset = devm_reset_control_get(&pdev->dev, NULL);
1024 if (IS_ERR(iommu->reset)) {
1026 ret = PTR_ERR(iommu->reset);
1030 ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev,
1035 ret = iommu_device_register(&iommu->iommu, &sun50i_iommu_ops, &pdev->dev);
1040 dev_name(&pdev->dev), iommu);
1047 iommu_device_unregister(&iommu->iommu);
1050 iommu_device_sysfs_remove(&iommu->iommu);
1053 kmem_cache_destroy(iommu->pt_pool);
1059 { .compatible = "allwinner,sun50i-h6-iommu", },
1066 .name = "sun50i-iommu",