Lines Matching defs:root

37 	 * for zapping and thus puts the TDP MMU's reference to each root, i.e.
76 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
78 if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
82 * The TDP MMU itself holds a reference to each root until the root is
84 * put for a valid root.
86 KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
89 list_del_rcu(&root->link);
91 call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
95 * Returns the next root after @prev_root (or the first root if @prev_root is
96 * NULL). A reference to the returned root is acquired, and the reference to
147 * recent root. (Unless keeping a live reference is desirable.)
232 struct kvm_mmu_page *root;
235 * Check for an existing root before acquiring the pages lock to avoid
236 * unnecessary serialization if multiple vCPUs are loading a new root.
238 * a valid root on behalf of the primary vCPU.
242 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, as_id) {
243 if (root->role.word == role.word)
250 * Recheck for an existing root after acquiring the pages lock, another
251 * vCPU may have raced ahead and created a new usable root. Manually
253 * lock is *not* held. WARN if grabbing a reference to a usable root
254 * fails, as the last reference to a root can only be put *after* the
255 * root has been invalidated, which requires holding mmu_lock for write.
257 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
258 if (root->role.word == role.word &&
259 !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root)))
263 root = tdp_mmu_alloc_sp(vcpu);
264 tdp_mmu_init_sp(root, NULL, 0, role);
270 * the TDP MMU itself, which is held until the root is invalidated and
273 refcount_set(&root->tdp_mmu_root_count, 2);
274 list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
282 * and actually consuming the root if it's invalidated after dropping
283 * mmu_lock, and the root can't be freed as this vCPU holds a reference.
285 mmu->root.hpa = __pa(root->spt);
286 mmu->root.pgd = 0;
688 for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end)
700 * from the paging structure root.
746 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
754 for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
772 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
777 * The root must have an elevated refcount so that it's reachable via
781 * callback. Dropping mmu_lock with an unreachable root would result
786 WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count));
794 * 4KiB=>2MiB=>1GiB=>root, in order to better honor need_resched() (all
810 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_4K);
811 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_2M);
813 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G);
814 __tdp_mmu_zap_root(kvm, root, shared, root->role.level);
824 * This helper intentionally doesn't allow zapping a root shadow page,
847 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
858 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
875 if (!root->role.invalid)
895 struct kvm_mmu_page *root;
898 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, -1)
899 flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
906 struct kvm_mmu_page *root;
910 * before returning to the caller. Zap directly even if the root is
921 for_each_tdp_mmu_root_yield_safe(kvm, root)
922 tdp_mmu_zap_root(kvm, root, false);
931 struct kvm_mmu_page *root;
935 for_each_tdp_mmu_root_yield_safe(kvm, root) {
936 if (!root->tdp_mmu_scheduled_root_to_zap)
939 root->tdp_mmu_scheduled_root_to_zap = false;
940 KVM_BUG_ON(!root->role.invalid, kvm);
944 * flush when allocating a new root (see kvm_mmu_load()), and
951 tdp_mmu_zap_root(kvm, root, true);
954 * The referenced needs to be put *after* zapping the root, as
955 * the root must be reachable by mmu_notifiers while it's being
958 kvm_tdp_mmu_put_root(kvm, root);
965 * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
976 struct kvm_mmu_page *root;
979 * mmu_lock must be held for write to ensure that a root doesn't become
980 * invalid while there are active readers (invalidating a root while
998 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
1003 * root alive after its been zapped.
1005 if (!root->role.invalid) {
1006 root->tdp_mmu_scheduled_root_to_zap = true;
1007 root->role.invalid = true;
1190 struct kvm_mmu_page *root;
1192 __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
1193 flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
1206 struct kvm_mmu_page *root;
1214 for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1217 tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
1289 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1300 for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
1330 struct kvm_mmu_page *root;
1335 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1336 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1442 struct kvm_mmu_page *root,
1463 for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
1515 struct kvm_mmu_page *root;
1519 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) {
1520 r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
1522 kvm_tdp_mmu_put_root(kvm, root);
1531 * All TDP MMU shadow pages share the same role as their root, aside
1538 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1541 const u64 dbit = tdp_mmu_need_write_protect(root) ? PT_WRITABLE_MASK :
1548 tdp_root_for_each_pte(iter, root, start, end) {
1581 struct kvm_mmu_page *root;
1585 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1586 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1592 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1595 const u64 dbit = (wrprot || tdp_mmu_need_write_protect(root)) ? PT_WRITABLE_MASK :
1603 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1643 struct kvm_mmu_page *root;
1645 for_each_valid_tdp_mmu_root(kvm, root, slot->as_id)
1646 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1650 struct kvm_mmu_page *root,
1660 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) {
1707 struct kvm_mmu_page *root;
1710 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1711 zap_collapsible_spte_range(kvm, root, slot);
1719 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1730 for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
1759 struct kvm_mmu_page *root;
1763 for_each_valid_tdp_mmu_root(kvm, root, slot->as_id)
1764 spte_set |= write_protect_gfn(kvm, root, gfn, min_level);