Lines Matching refs:mmu

21 #include "ipu3-mmu.h"
73 * @mmu: MMU to perform the invalidate operation on
78 static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu)
80 writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE);
83 static void call_if_imgu_is_powered(struct imgu_mmu *mmu,
84 void (*func)(struct imgu_mmu *mmu))
86 if (!pm_runtime_get_if_in_use(mmu->dev))
89 func(mmu);
90 pm_runtime_put(mmu->dev);
95 * @mmu: MMU to set the CIO gate bit in.
101 static void imgu_mmu_set_halt(struct imgu_mmu *mmu, bool halt)
106 writel(halt, mmu->base + REG_GP_HALT);
107 ret = readl_poll_timeout(mmu->base + REG_GP_HALTED,
111 dev_err(mmu->dev, "failed to %s CIO gate halt\n",
168 static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx)
174 spin_lock_irqsave(&mmu->lock, flags);
176 l2pt = mmu->l2pts[l1pt_idx];
178 spin_unlock_irqrestore(&mmu->lock, flags);
182 spin_unlock_irqrestore(&mmu->lock, flags);
184 new_l2pt = imgu_mmu_alloc_page_table(mmu->dummy_page_pteval);
188 spin_lock_irqsave(&mmu->lock, flags);
190 dev_dbg(mmu->dev, "allocated page table %p for l1pt_idx %u\n",
193 l2pt = mmu->l2pts[l1pt_idx];
195 spin_unlock_irqrestore(&mmu->lock, flags);
201 mmu->l2pts[l1pt_idx] = new_l2pt;
204 mmu->l1pt[l1pt_idx] = pteval;
206 spin_unlock_irqrestore(&mmu->lock, flags);
210 static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova,
217 if (!mmu)
222 l2pt = imgu_mmu_get_l2pt(mmu, l1pt_idx);
226 spin_lock_irqsave(&mmu->lock, flags);
228 if (l2pt[l2pt_idx] != mmu->dummy_page_pteval) {
229 spin_unlock_irqrestore(&mmu->lock, flags);
235 spin_unlock_irqrestore(&mmu->lock, flags);
254 struct imgu_mmu *mmu = to_imgu_mmu(info);
263 dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx\n",
268 dev_dbg(mmu->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
272 dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa\n", iova, &paddr);
274 ret = __imgu_mmu_map(mmu, iova, paddr);
283 call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
302 struct imgu_mmu *mmu = to_imgu_mmu(info);
327 call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
338 static size_t __imgu_mmu_unmap(struct imgu_mmu *mmu,
346 if (!mmu)
351 spin_lock_irqsave(&mmu->lock, flags);
353 l2pt = mmu->l2pts[l1pt_idx];
355 spin_unlock_irqrestore(&mmu->lock, flags);
359 if (l2pt[l2pt_idx] == mmu->dummy_page_pteval)
362 l2pt[l2pt_idx] = mmu->dummy_page_pteval;
364 spin_unlock_irqrestore(&mmu->lock, flags);
382 struct imgu_mmu *mmu = to_imgu_mmu(info);
391 dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx\n",
396 dev_dbg(mmu->dev, "unmap this: iova 0x%lx size 0x%zx\n", iova, size);
403 unmapped_page = __imgu_mmu_unmap(mmu, iova, IPU3_PAGE_SIZE);
407 dev_dbg(mmu->dev, "unmapped: iova 0x%lx size 0x%zx\n",
414 call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
429 struct imgu_mmu *mmu;
432 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
433 if (!mmu)
436 mmu->dev = parent;
437 mmu->base = base;
438 spin_lock_init(&mmu->lock);
441 imgu_mmu_set_halt(mmu, true);
447 mmu->dummy_page = (void *)__get_free_page(GFP_KERNEL);
448 if (!mmu->dummy_page)
450 pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_page));
451 mmu->dummy_page_pteval = pteval;
457 mmu->dummy_l2pt = imgu_mmu_alloc_page_table(pteval);
458 if (!mmu->dummy_l2pt)
460 pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_l2pt));
461 mmu->dummy_l2pt_pteval = pteval;
467 mmu->l2pts = vzalloc(IPU3_PT_PTES * sizeof(*mmu->l2pts));
468 if (!mmu->l2pts)
472 mmu->l1pt = imgu_mmu_alloc_page_table(mmu->dummy_l2pt_pteval);
473 if (!mmu->l1pt)
476 pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
477 writel(pteval, mmu->base + REG_L1_PHYS);
478 imgu_mmu_tlb_invalidate(mmu);
479 imgu_mmu_set_halt(mmu, false);
481 mmu->geometry.aperture_start = 0;
482 mmu->geometry.aperture_end = DMA_BIT_MASK(IPU3_MMU_ADDRESS_BITS);
484 return &mmu->geometry;
487 vfree(mmu->l2pts);
489 imgu_mmu_free_page_table(mmu->dummy_l2pt);
491 free_page((unsigned long)mmu->dummy_page);
493 kfree(mmu);
505 struct imgu_mmu *mmu = to_imgu_mmu(info);
508 imgu_mmu_set_halt(mmu, true);
509 imgu_mmu_tlb_invalidate(mmu);
511 imgu_mmu_free_page_table(mmu->l1pt);
512 vfree(mmu->l2pts);
513 imgu_mmu_free_page_table(mmu->dummy_l2pt);
514 free_page((unsigned long)mmu->dummy_page);
515 kfree(mmu);
520 struct imgu_mmu *mmu = to_imgu_mmu(info);
522 imgu_mmu_set_halt(mmu, true);
527 struct imgu_mmu *mmu = to_imgu_mmu(info);
530 imgu_mmu_set_halt(mmu, true);
532 pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
533 writel(pteval, mmu->base + REG_L1_PHYS);
535 imgu_mmu_tlb_invalidate(mmu);
536 imgu_mmu_set_halt(mmu, false);