Lines Matching refs:direct

642 	return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct;
722 if (!sp->role.direct)
740 * For direct MMUs (e.g. TDP or non-paging guests) or passthrough SPs,
744 * For direct SPs in indirect MMUs (shadow paging), i.e. when KVM
764 sp->role.passthrough ? "passthrough" : "direct",
769 sp->role.passthrough ? "passthrough" : "direct",
1700 if (!sp->role.direct)
1855 if (sp->role.direct)
1900 if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte ||
2106 * unsync, thus @vcpu can be NULL if @role.direct is true.
2142 if (sp->role.direct)
2203 if (!role.direct)
2228 /* Note, @vcpu may be NULL if @role.direct is true; see kvm_mmu_find_shadow_page. */
2264 static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct,
2273 role.direct = direct;
2312 bool direct, unsigned int access)
2319 role = kvm_mmu_child_role(sptep, direct, access);
2333 !vcpu->arch.mmu->root_role.direct)
2409 * The non-direct sub-pagetable must be updated before linking. For
2412 * so sp->unsync can be true or false. For higher level non-direct
2434 * For the direct sp, if the guest pte's dirty bit
2737 if (vcpu->arch.mmu->root_role.direct)
2968 WARN_ON_ONCE(!sp->role.direct);
3327 * mechanism only supports direct MMUs.
3466 * direct MMUs, nested MMUs are always indirect, and KVM always
3508 * Currently, fast page fault only works for direct mapping
3561 bool is_tdp_mmu = tdp_mmu_enabled && mmu->root_role.direct;
3660 WARN_ON_ONCE(role.direct && role.has_4_byte_gpte);
3708 /* root.pgd is ignored for direct MMUs. */
3870 * directory. Othwerise each PAE page direct shadows one guest
3908 if (mmu->root_role.direct ||
4006 if (vcpu->arch.mmu->root_role.direct)
4064 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
4073 if (direct)
4154 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
4159 if (mmio_info_in_cache(vcpu, addr, direct))
4173 if (direct)
4237 arch.direct_map = vcpu->arch.mmu->root_role.direct;
4251 if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
4259 if (!vcpu->arch.mmu->root_role.direct &&
4671 if (!role.direct && pgd != root->pgd)
4794 * If this is a direct root page, it doesn't have a write flooding
4797 if (!new_role.direct) {
5035 * the direct page table on host, use as much mmu features as
5262 role.base.direct = 1;
5337 role.direct = true;
5432 WARN_ON_ONCE(cpu_role.base.direct);
5458 role.base.direct = false;
5604 r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct);
5610 if (vcpu->arch.mmu->root_role.direct)
5851 bool direct = vcpu->arch.mmu->root_role.direct;
5875 r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5899 if (vcpu->arch.mmu->root_role.direct &&
5916 if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
6515 * Note, huge page splitting always uses direct shadow pages, regardless
6516 * of whether the huge page itself is mapped by a direct or indirect
6520 role = kvm_mmu_child_role(huge_sptep, /*direct=*/true, access);
6526 /* Safe to pass NULL for vCPU since requesting a direct SP. */
6553 * gfn-to-pfn translation since the SP is direct, so no need to
6750 if (sp->role.direct &&
7210 WARN_ON_ONCE(!sp->role.direct);