Lines Matching refs:pgt

112 static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
114 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
115 u64 mask = BIT(pgt->ia_bits) - 1;
122 struct kvm_pgtable pgt = {
127 return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
301 static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data)
305 u64 limit = BIT(pgt->ia_bits);
310 if (!pgt->pgd)
313 for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) {
314 kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE];
316 ret = __kvm_pgtable_walk(data, pgt->mm_ops, pteref, pgt->start_level);
324 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
339 r = _kvm_pgtable_walk(pgt, &walk_data);
361 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
372 ret = kvm_pgtable_walk(pgt, ALIGN_DOWN(addr, PAGE_SIZE),
489 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
506 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
552 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
561 if (!pgt->mm_ops->page_count)
564 kvm_pgtable_walk(pgt, addr, size, &walker);
568 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
578 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL);
579 if (!pgt->pgd)
582 pgt->ia_bits = va_bits;
583 pgt->start_level = start_level;
584 pgt->mm_ops = mm_ops;
585 pgt->mmu = NULL;
586 pgt->force_pte_cb = NULL;
607 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
614 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
615 pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd));
616 pgt->pgd = NULL;
687 static bool stage2_has_fwb(struct kvm_pgtable *pgt)
692 return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
715 #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
717 static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
730 attr = KVM_S2_MEMATTR(pgt, DEVICE_nGnRE);
735 attr = KVM_S2_MEMATTR(pgt, NORMAL_NC);
738 attr = KVM_S2_MEMATTR(pgt, NORMAL);
875 static bool stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt)
885 return system_supports_tlb_range() && stage2_has_fwb(pgt);
892 struct kvm_pgtable *pgt = ctx->arg;
905 } else if (!stage2_unmap_defer_tlb_flush(pgt)) {
914 static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
917 return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
962 struct kvm_pgtable *pgt = data->mmu->pgt;
987 stage2_pte_cacheable(pgt, new))
1079 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
1086 .mmu = pgt->mmu,
1088 .force_pte = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr + size, prot),
1098 if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys)))
1101 ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
1105 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1110 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
1116 .mmu = pgt->mmu,
1131 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1138 struct kvm_pgtable *pgt = ctx->arg;
1139 struct kvm_s2_mmu *mmu = pgt->mmu;
1157 } else if (stage2_pte_cacheable(pgt, ctx->old)) {
1158 need_flush = !stage2_has_fwb(pgt);
1178 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
1183 .arg = pgt,
1187 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1188 if (stage2_unmap_defer_tlb_flush(pgt))
1190 kvm_tlb_flush_vmid_range(pgt->mmu, addr, size);
1239 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
1256 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1268 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
1270 return stage2_update_leaf_attrs(pgt, addr, size, 0,
1275 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
1280 ret = stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
1324 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
1336 WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
1340 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
1359 ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level,
1363 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa_nsh, pgt->mmu, addr, level);
1370 struct kvm_pgtable *pgt = ctx->arg;
1371 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1373 if (!kvm_pte_valid(ctx->old) || !stage2_pte_cacheable(pgt, ctx->old))
1382 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
1387 .arg = pgt,
1390 if (stage2_has_fwb(pgt))
1393 return kvm_pgtable_walk(pgt, addr, size, &walker);
1396 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
1403 .mmu = pgt->mmu,
1424 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1431 ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
1518 childp = kvm_pgtable_stage2_create_unlinked(mmu->pgt, phys,
1539 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
1548 return kvm_pgtable_walk(pgt, addr, size, &walker);
1551 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
1563 pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz);
1564 if (!pgt->pgd)
1567 pgt->ia_bits = ia_bits;
1568 pgt->start_level = start_level;
1569 pgt->mm_ops = mm_ops;
1570 pgt->mmu = mmu;
1571 pgt->flags = flags;
1572 pgt->force_pte_cb = force_pte_cb;
1604 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
1613 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
1614 pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
1615 pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
1616 pgt->pgd = NULL;