Lines Matching defs:mmu

31 	#define PT_HAVE_ACCESSED_DIRTY(mmu) true
45 #define PT_HAVE_ACCESSED_DIRTY(mmu) true
58 #define PT_HAVE_ACCESSED_DIRTY(mmu) (!(mmu)->cpu_role.base.ad_disabled)
109 static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access,
115 if (!PT_HAVE_ACCESSED_DIRTY(mmu))
145 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
147 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) ||
148 FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte);
159 if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) &&
163 if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K))
198 struct kvm_mmu *mmu,
209 if (!PT_HAVE_ACCESSED_DIRTY(mmu))
270 static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu,
288 gpte &= level - (PT32_ROOT_LEVEL + mmu->cpu_role.ext.cr4_pse);
303 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
326 walker->level = mmu->cpu_role.base.level;
327 pte = kvm_mmu_get_guest_pgd(vcpu, mmu);
328 have_ad = PT_HAVE_ACCESSED_DIRTY(mmu);
333 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
379 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(table_gfn),
420 if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, walker->level))) {
429 } while (!FNAME(is_last_gpte)(mmu, walker->level, pte));
436 errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
448 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault);
455 FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte);
466 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
478 if (fetch_fault && (is_efer_nx(mmu) || is_cr4_smep(mmu)))
518 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
528 return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
546 FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
639 top_level = vcpu->arch.mmu->cpu_role.base.level;
651 if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
660 if (unlikely(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) {
824 !is_cr0_wp(vcpu->arch.mmu) && !fault->user && fault->slot) {
834 if (is_cr4_smep(vcpu->arch.mmu))
868 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
878 WARN_ON_ONCE((addr >> 32) && mmu == vcpu->arch.walk_mmu);
881 r = FNAME(walk_addr_generic)(&walker, vcpu, mmu, addr, access);
930 FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);