Lines Matching refs:mmu

41 #include "mmu/isp_mmu.h"
57 static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
79 static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu,
82 return mmu->driver->pte_to_phys(mmu, pte);
85 static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu,
88 unsigned int pte = mmu->driver->phys_to_pte(mmu, phys);
90 return (unsigned int)(pte | ISP_PTE_VALID_MASK(mmu));
97 static phys_addr_t alloc_page_table(struct isp_mmu *mmu)
119 atomisp_set_pte(page, i, mmu->driver->null_pte);
125 static void free_page_table(struct isp_mmu *mmu, phys_addr_t page)
142 static void mmu_remap_error(struct isp_mmu *mmu,
161 static void mmu_unmap_l2_pte_error(struct isp_mmu *mmu,
177 static void mmu_unmap_l1_pte_error(struct isp_mmu *mmu,
189 static void mmu_unmap_l1_pt_error(struct isp_mmu *mmu, unsigned int pte)
199 static int mmu_l2_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
219 if (ISP_PTE_VALID(mmu, pte)) {
220 mmu_remap_error(mmu, l1_pt, l1_idx,
224 free_mmu_map(mmu, start, ptr);
229 pte = isp_pgaddr_to_pte_valid(mmu, phys);
232 mmu->l2_pgt_refcount[l1_idx]++;
244 static int mmu_l1_map(struct isp_mmu *mmu, phys_addr_t l1_pt,
266 if (!ISP_PTE_VALID(mmu, l2_pte)) {
267 l2_pt = alloc_page_table(mmu);
273 free_mmu_map(mmu, start, ptr);
278 l2_pte = isp_pgaddr_to_pte_valid(mmu, l2_pt);
281 mmu->l2_pgt_refcount[idx] = 0;
284 l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
289 ret = mmu_l2_map(mmu, l1_pt, idx,
294 ret = mmu_l2_map(mmu, l1_pt, idx,
304 free_mmu_map(mmu, start, ptr);
317 static int mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
324 mutex_lock(&mmu->pt_mutex);
325 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
329 l1_pt = alloc_page_table(mmu);
332 mutex_unlock(&mmu->pt_mutex);
339 mmu->base_address = l1_pt;
340 mmu->l1_pte = isp_pgaddr_to_pte_valid(mmu, l1_pt);
341 memset(mmu->l2_pgt_refcount, 0, sizeof(int) * ISP_L1PT_PTES);
344 l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
350 ret = mmu_l1_map(mmu, l1_pt, start, end, phys);
355 mutex_unlock(&mmu->pt_mutex);
363 static void mmu_l2_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
382 if (!ISP_PTE_VALID(mmu, pte))
383 mmu_unmap_l2_pte_error(mmu, l1_pt, l1_idx,
386 atomisp_set_pte(l2_pt, idx, mmu->driver->null_pte);
387 mmu->l2_pgt_refcount[l1_idx]--;
391 if (mmu->l2_pgt_refcount[l1_idx] == 0) {
392 free_page_table(mmu, l2_pt);
393 atomisp_set_pte(l1_pt, l1_idx, mmu->driver->null_pte);
401 static void mmu_l1_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt,
420 if (!ISP_PTE_VALID(mmu, l2_pte)) {
421 mmu_unmap_l1_pte_error(mmu, l1_pt, idx, ptr, l2_pte);
425 l2_pt = isp_pte_to_pgaddr(mmu, l2_pte);
430 mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, l1_aligned);
433 mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, end);
448 static void mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
454 mutex_lock(&mmu->pt_mutex);
455 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
456 mmu_unmap_l1_pt_error(mmu, mmu->l1_pte);
457 mutex_unlock(&mmu->pt_mutex);
461 l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
466 mmu_l1_unmap(mmu, l1_pt, start, end);
467 mutex_unlock(&mmu->pt_mutex);
474 static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
483 mmu_unmap(mmu, start, pgnr);
486 int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
489 return mmu_map(mmu, isp_virt, phys, pgnr);
492 void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
495 mmu_unmap(mmu, isp_virt, pgnr);
498 static void isp_mmu_flush_tlb_range_default(struct isp_mmu *mmu,
502 isp_mmu_flush_tlb(mmu);
506 int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver)
508 if (!mmu) /* error */
516 mmu->driver = driver;
527 dev_err(atomisp_dev, "PTE_MASK is missing from mmu driver\n");
531 mmu->l1_pte = driver->null_pte;
533 mutex_init(&mmu->pt_mutex);
539 void isp_mmu_exit(struct isp_mmu *mmu)
545 if (!mmu)
548 if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) {
550 (unsigned int)mmu->l1_pte);
554 l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte);
559 if (ISP_PTE_VALID(mmu, pte)) {
560 l2_pt = isp_pte_to_pgaddr(mmu, pte);
562 free_page_table(mmu, l2_pt);
566 free_page_table(mmu, l1_pt);