Lines Matching refs:root

1886 	 *           level tracks the root level
2327 struct kvm_vcpu *vcpu, hpa_t root,
2331 iterator->shadow_addr = root;
2344 BUG_ON(root != vcpu->arch.mmu->root.hpa);
2358 shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root.hpa,
2567 * Remove the active root from the active page list, the root
2586 * Make the request to free obsolete roots after marking the root
2643 * Don't zap active root pages, the page itself can't be freed
3437 * by a different task, but the root page should always be
3438 * available as the vCPU holds a reference to its root(s).
3575 && VALID_PAGE(mmu->root.hpa);
3598 if (kvm_mmu_is_dummy_root(mmu->root.hpa)) {
3600 } else if (root_to_sp(mmu->root.hpa)) {
3601 mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list);
3612 mmu->root.hpa = INVALID_PAGE;
3613 mmu->root.pgd = 0;
3675 hpa_t root;
3688 root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level);
3689 mmu->root.hpa = root;
3699 root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), 0,
3701 mmu->pae_root[i] = root | PT_PRESENT_MASK |
3704 mmu->root.hpa = __pa(mmu->pae_root);
3706 WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
3711 /* root.pgd is ignored for direct MMUs. */
3712 mmu->root.pgd = 0;
3725 * Check if this is the first shadow root being allocated before
3733 /* Recheck, under the lock, whether this is the first shadow root. */
3785 hpa_t root;
3791 mmu->root.hpa = kvm_mmu_get_dummy_root();
3821 * write-protect the guests page table root.
3824 root = mmu_alloc_root(vcpu, root_gfn, 0,
3826 mmu->root.hpa = root;
3878 root = mmu_alloc_root(vcpu, root_gfn, quadrant, PT32_ROOT_LEVEL);
3879 mmu->pae_root[i] = root | pm_mask;
3883 mmu->root.hpa = __pa(mmu->pml5_root);
3885 mmu->root.hpa = __pa(mmu->pml4_root);
3887 mmu->root.hpa = __pa(mmu->pae_root);
3890 mmu->root.pgd = root_pgd;
3907 * tables are allocated and initialized at root creation as there is no
3969 static bool is_unsync_root(hpa_t root)
3973 if (!VALID_PAGE(root) || kvm_mmu_is_dummy_root(root))
3989 sp = root_to_sp(root);
3993 * PDPTEs for a given PAE root need to be synchronized individually.
4012 if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
4018 hpa_t root = vcpu->arch.mmu->root.hpa;
4020 if (!is_unsync_root(root))
4023 sp = root_to_sp(root);
4034 hpa_t root = vcpu->arch.mmu->pae_root[i];
4036 if (IS_VALID_PAE_ROOT(root)) {
4037 sp = spte_to_child_sp(root);
4128 int root, leaf, level;
4131 leaf = get_sptes_lockless(vcpu, addr, sptes, &root);
4150 for (level = root; level >= leaf; level--)
4156 for (level = root; level >= leaf; level--)
4465 * root was invalidated by a memslot update or a relevant mmu_notifier fired.
4470 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
4479 * only a hint that the current root _may_ be obsolete and needs to be
4481 * previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs
4482 * to reload even if no vCPU is actively using the root.
4501 if (WARN_ON_ONCE(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa)))
4671 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
4676 if (!VALID_PAGE(root->hpa))
4679 if (!role.direct && pgd != root->pgd)
4682 sp = root_to_sp(root->hpa);
4690 * Find out if a previously cached root matching the new pgd/role is available,
4691 * and insert the current root as the MRU in the cache.
4692 * If a matching root is found, it is assigned to kvm_mmu->root and
4694 * If no match is found, kvm_mmu->root is left invalid, the LRU root is
4695 * evicted to make room for the current root, and false is returned.
4703 if (is_root_usable(&mmu->root, new_pgd, new_role))
4715 swap(mmu->root, mmu->prev_roots[i]);
4716 if (is_root_usable(&mmu->root, new_pgd, new_role))
4725 * Find out if a previously cached root matching the new pgd/role is available.
4726 * On entry, mmu->root is invalid.
4727 * If a matching root is found, it is assigned to kvm_mmu->root, the LRU entry
4729 * If no match is found, kvm_mmu->root is left invalid and false is returned.
4744 swap(mmu->root, mmu->prev_roots[i]);
4759 if (VALID_PAGE(mmu->root.hpa) && !root_to_sp(mmu->root.hpa))
4762 if (VALID_PAGE(mmu->root.hpa))
4774 * Return immediately if no usable root was found, kvm_mmu_reload()
4775 * will establish a valid root prior to the next VM-Enter.
4781 * It's possible that the cached previous root page is obsolete because
4784 * which will free the root set here and allocate a new one.
4802 * If this is a direct root page, it doesn't have a write flooding
4806 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
5630 * Flush any TLB entries for the new root, the provenance of the root
5632 * for a freed root, in theory another hypervisor could have left
5634 * flush when freeing a root (see kvm_tdp_mmu_put_root()).
5646 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.root_mmu.root.hpa));
5648 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa));
5667 * (or any shadow paging flavor with a dummy root, see note below)
5684 if (is_obsolete_root(kvm, mmu->root.hpa))
5861 if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
6005 __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->root.hpa);
6151 mmu->root.hpa = INVALID_PAGE;
6152 mmu->root.pgd = 0;
6161 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
6259 * loaded a new root, i.e. the shadow pages being zapped cannot
6281 * KVM performs a local TLB flush when allocating a new root (see
6341 * Deferring the zap until the final reference to the root is put would