Lines Matching defs:pgt

66 		struct kvm_pgtable *pgt = mmu->pgt;
67 if (!pgt)
71 ret = fn(pgt, addr, next - addr);
118 struct kvm_pgtable *pgt;
145 pgt = kvm->arch.mmu.pgt;
146 if (!pgt)
150 ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache);
804 struct kvm_pgtable pgt = {
808 ARM64_HW_PGTABLE_LEVELS(pgt.ia_bits) + 1),
822 ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level);
872 struct kvm_pgtable *pgt;
899 if (mmu->pgt != NULL) {
904 pgt = kzalloc(sizeof(*pgt), GFP_KERNEL_ACCOUNT);
905 if (!pgt)
909 err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops);
926 mmu->pgt = pgt;
927 mmu->pgd_phys = __pa(pgt->pgd);
931 kvm_pgtable_stage2_destroy(pgt);
933 kfree(pgt);
1014 struct kvm_pgtable *pgt = NULL;
1017 pgt = mmu->pgt;
1018 if (pgt) {
1020 mmu->pgt = NULL;
1025 if (pgt) {
1026 kvm_pgtable_stage2_destroy(pgt);
1027 kfree(pgt);
1073 struct kvm_pgtable *pgt = mmu->pgt;
1091 ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
1395 struct kvm_pgtable *pgt;
1524 pgt = vcpu->arch.hw_mmu->pgt;
1577 ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
1579 ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
1607 pte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
1648 if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) {
1761 if (!kvm->arch.mmu.pgt)
1775 if (!kvm->arch.mmu.pgt)
1798 kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT,
1809 if (!kvm->arch.mmu.pgt)
1812 return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
1821 if (!kvm->arch.mmu.pgt)
1824 return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,