Lines Matching defs:arch

252 		.efer = vcpu->arch.efer,
645 return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct;
687 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
691 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
696 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadowed_info_cache,
701 return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
707 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
708 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
709 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
710 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
794 return &slot->arch.lpage_info[level - 2][idx];
836 kvm->arch.indirect_shadow_pages++;
875 &kvm->arch.possible_nx_huge_pages);
893 kvm->arch.indirect_shadow_pages--;
1104 return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1627 struct kvm_mmu_memory_cache *cache = &vcpu->arch.mmu_pte_list_desc_cache;
1674 * kvm->arch.n_used_mmu_pages values. We need a global,
1680 kvm->arch.n_used_mmu_pages += nr;
1874 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
1879 union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
1903 if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte ||
1916 return vcpu->arch.mmu->sync_spte(vcpu, sp, i);
1978 unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
2218 sp->mmu_valid_gen = kvm->arch.mmu_valid_gen;
2219 list_add(&sp->link, &kvm->arch.active_mmu_pages);
2242 sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2259 .page_header_cache = &vcpu->arch.mmu_page_header_cache,
2260 .shadow_page_cache = &vcpu->arch.mmu_shadow_page_cache,
2261 .shadowed_info_cache = &vcpu->arch.mmu_shadowed_info_cache,
2332 iterator->level = vcpu->arch.mmu->root_role.level;
2335 vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL &&
2336 !vcpu->arch.mmu->root_role.direct)
2344 BUG_ON(root != vcpu->arch.mmu->root.hpa);
2347 = vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2358 shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root.hpa,
2427 __link_shadow_page(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, sptep, sp, true);
2637 if (list_empty(&kvm->arch.active_mmu_pages))
2641 list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2667 if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2668 return kvm->arch.n_max_mmu_pages -
2669 kvm->arch.n_used_mmu_pages;
2705 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2706 kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
2709 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2712 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2740 if (vcpu->arch.mmu->root_role.direct)
2804 spin_lock(&kvm->arch.mmu_unsync_pages_lock);
2822 spin_unlock(&kvm->arch.mmu_unsync_pages_lock);
3656 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
3673 struct kvm_mmu *mmu = vcpu->arch.mmu;
3772 smp_store_release(&kvm->arch.shadow_root_allocated, true);
3781 struct kvm_mmu *mmu = vcpu->arch.mmu;
3899 struct kvm_mmu *mmu = vcpu->arch.mmu;
4009 if (vcpu->arch.mmu->root_role.direct)
4012 if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
4017 if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
4018 hpa_t root = vcpu->arch.mmu->root.hpa;
4034 hpa_t root = vcpu->arch.mmu->pae_root[i];
4051 if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa))
4055 kvm_mmu_free_roots(vcpu->kvm, vcpu->arch.mmu, roots_to_free);
4148 rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
4232 u32 id = vcpu->arch.apf.id;
4235 vcpu->arch.apf.id = 1;
4237 return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
4243 struct kvm_arch_async_pf arch;
4245 arch.token = alloc_apf_token(vcpu);
4246 arch.gfn = fault->gfn;
4247 arch.error_code = fault->error_code;
4248 arch.direct_map = vcpu->arch.mmu->root_role.direct;
4249 arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu);
4252 kvm_vcpu_gfn_to_hva(vcpu, fault->gfn), &arch);
4259 if (WARN_ON_ONCE(work->arch.error_code & PFERR_PRIVATE_ACCESS))
4262 if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
4270 if (!vcpu->arch.mmu->root_role.direct &&
4271 work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
4274 kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, work->arch.error_code, true, NULL);
4470 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
4501 if (WARN_ON_ONCE(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa)))
4549 u32 flags = vcpu->arch.apf.host_apf_flags;
4567 vcpu->arch.l1tf_flush_l1d = true;
4576 vcpu->arch.apf.host_apf_flags = 0;
4770 struct kvm_mmu *mmu = vcpu->arch.mmu;
4806 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
4936 vcpu->arch.reserved_gpa_bits,
4985 vcpu->arch.reserved_gpa_bits, execonly,
5354 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5405 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5430 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5482 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5510 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5522 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
5539 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
5541 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
5586 vcpu->arch.root_mmu.root_role.invalid = 1;
5587 vcpu->arch.guest_mmu.root_role.invalid = 1;
5588 vcpu->arch.nested_mmu.root_role.invalid = 1;
5589 vcpu->arch.root_mmu.cpu_role.ext.valid = 0;
5590 vcpu->arch.guest_mmu.cpu_role.ext.valid = 0;
5591 vcpu->arch.nested_mmu.cpu_role.ext.valid = 0;
5612 r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct);
5618 if (vcpu->arch.mmu->root_role.direct)
5645 kvm_mmu_free_roots(kvm, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
5646 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.root_mmu.root.hpa));
5647 kvm_mmu_free_roots(kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5648 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa));
5698 __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
5699 __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
5820 if (!vcpu->kvm->arch.indirect_shadow_pages)
5859 bool direct = vcpu->arch.mmu->root_role.direct;
5861 if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
5874 vcpu->kvm->arch.vm_type == KVM_X86_SW_PROTECTED_VM &&
5907 if (vcpu->arch.mmu->root_role.direct &&
5914 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
5960 if (WARN_ON_ONCE(mmu != vcpu->arch.mmu))
5992 /* It's actually a GPA for vcpu->arch.guest_mmu. */
5993 if (mmu != &vcpu->arch.guest_mmu) {
6026 kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL);
6034 struct kvm_mmu *mmu = vcpu->arch.mmu;
6156 /* vcpu->arch.guest_mmu isn't used when !tdp_enabled. */
6157 if (!tdp_enabled && mmu == &vcpu->arch.guest_mmu)
6203 vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
6204 vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
6206 vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
6207 vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
6209 vcpu->arch.mmu_shadow_page_cache.init_value =
6211 if (!vcpu->arch.mmu_shadow_page_cache.init_value)
6212 vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
6214 vcpu->arch.mmu = &vcpu->arch.root_mmu;
6215 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
6217 ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
6221 ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
6227 free_mmu_pages(&vcpu->arch.guest_mmu);
6240 &kvm->arch.active_mmu_pages, link) {
6269 &kvm->arch.zapped_obsolete_pages, &nr_zapped);
6285 kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
6311 kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
6350 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
6355 kvm->arch.shadow_mmio_value = shadow_mmio_value;
6356 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6357 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
6358 INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
6359 spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
6364 kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
6365 kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
6367 kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
6369 kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
6370 kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
6375 kvm_mmu_free_memory_cache(&kvm->arch.split_desc_cache);
6376 kvm_mmu_free_memory_cache(&kvm->arch.split_page_header_cache);
6377 kvm_mmu_free_memory_cache(&kvm->arch.split_shadow_page_cache);
6489 return need_topup(&kvm->arch.split_desc_cache, SPLIT_DESC_CACHE_MIN_NR_OBJECTS) ||
6490 need_topup(&kvm->arch.split_page_header_cache, 1) ||
6491 need_topup(&kvm->arch.split_shadow_page_cache, 1);
6515 r = __kvm_mmu_topup_memory_cache(&kvm->arch.split_desc_cache, capacity,
6520 r = kvm_mmu_topup_memory_cache(&kvm->arch.split_page_header_cache, 1);
6524 return kvm_mmu_topup_memory_cache(&kvm->arch.split_shadow_page_cache, 1);
6547 caches.page_header_cache = &kvm->arch.split_page_header_cache;
6548 caches.shadow_page_cache = &kvm->arch.split_shadow_page_cache;
6559 struct kvm_mmu_memory_cache *cache = &kvm->arch.split_desc_cache;
6856 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
6936 if (!kvm->arch.n_used_mmu_pages &&
6945 &kvm->arch.zapped_obsolete_pages);
7042 wake_up_process(kvm->arch.nx_huge_page_recovery_thread);
7127 free_mmu_pages(&vcpu->arch.root_mmu);
7128 free_mmu_pages(&vcpu->arch.guest_mmu);
7188 wake_up_process(kvm->arch.nx_huge_page_recovery_thread);
7220 if (list_empty(&kvm->arch.possible_nx_huge_pages))
7230 sp = list_first_entry(&kvm->arch.possible_nx_huge_pages,
7336 &kvm->arch.nx_huge_page_recovery_thread);
7338 kthread_unpark(kvm->arch.nx_huge_page_recovery_thread);
7345 if (kvm->arch.nx_huge_page_recovery_thread)
7346 kthread_stop(kvm->arch.nx_huge_page_recovery_thread);