Lines Matching refs:gfn

287 	gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep));
289 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
292 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
295 u64 spte = make_mmio_spte(vcpu, gfn, access);
297 trace_mark_mmio_spte(sptep, gfn, spte);
720 return sp->gfn;
725 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
755 gfn_t gfn, unsigned int access)
758 sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
765 sp->gfn, kvm_mmu_page_get_access(sp, index), access);
767 WARN_ONCE(gfn != kvm_mmu_page_get_gfn(sp, index),
768 "gfn mismatch under %s page %llx (expected %llx, got %llx)\n",
770 sp->gfn, kvm_mmu_page_get_gfn(sp, index), gfn);
776 gfn_t gfn = kvm_mmu_page_get_gfn(sp, index);
778 kvm_mmu_page_set_translation(sp, index, gfn, access);
782 * Return the pointer to the large page information for a given gfn,
785 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
790 idx = gfn_to_index(gfn, slot->base_gfn, level);
798 * disallowed, e.g. if KVM has shadow a page table at the gfn.
803 gfn_t gfn, int count)
809 linfo = lpage_info_slot(gfn, slot, i);
817 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
819 update_gfn_disallow_lpage_count(slot, gfn, 1);
822 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
824 update_gfn_disallow_lpage_count(slot, gfn, -1);
831 gfn_t gfn;
834 gfn = sp->gfn;
836 slot = __gfn_to_memslot(slots, gfn);
840 return __kvm_write_track_add_gfn(kvm, slot, gfn);
842 kvm_mmu_gfn_disallow_lpage(slot, gfn);
844 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
845 kvm_flush_remote_tlbs_gfn(kvm, gfn, PG_LEVEL_4K);
879 gfn_t gfn;
882 gfn = sp->gfn;
884 slot = __gfn_to_memslot(slots, gfn);
886 return __kvm_write_track_remove_gfn(kvm, slot, gfn);
888 kvm_mmu_gfn_allow_lpage(slot, gfn);
908 gfn_t gfn,
913 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1086 static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
1091 idx = gfn_to_index(gfn, slot->base_gfn, level);
1100 gfn_t gfn;
1104 gfn = kvm_mmu_page_get_gfn(sp, spte_index(spte));
1113 slot = __gfn_to_memslot(slots, gfn);
1114 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1415 struct kvm_memory_slot *slot, u64 gfn,
1424 rmap_head = gfn_to_rmap(gfn, i, slot);
1431 kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
1436 static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
1440 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1441 return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1451 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1458 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1487 kvm_flush_remote_tlbs_gfn(kvm, gfn, level);
1503 gfn_t gfn;
1515 iterator->gfn = iterator->start_gfn;
1516 iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
1542 iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1564 struct kvm_memory_slot *slot, gfn_t gfn,
1576 ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
1613 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1627 struct kvm_memory_slot *slot, gfn_t gfn,
1644 u64 *spte, gfn_t gfn, unsigned int access)
1651 kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access);
1654 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1661 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
1666 u64 *spte, gfn_t gfn, unsigned int access)
1670 __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
1706 pr_err_ratelimited("SPTE %llx (@ %p) for gfn %llx shadow-present at free",
1749 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1751 return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1916 if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
2107 protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn);
2153 gfn_t gfn,
2163 if (sp->gfn != gfn) {
2171 * unsync pages for the same gfn. While it's possible
2199 * SPs for a single gfn to be unsync.
2238 gfn_t gfn,
2262 sp->gfn = gfn;
2275 gfn_t gfn,
2282 sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2284 sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role);
2287 sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role);
2295 gfn_t gfn,
2304 return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role);
2354 u64 *sptep, gfn_t gfn,
2363 return kvm_mmu_get_shadow_page(vcpu, gfn, role);
2454 * kvm_mmu_find_shadow_page() without write-protecting the gfn,
2757 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2765 for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
2800 * Attempt to unsync any shadow pages that can be reached by the specified gfn,
2801 * KVM is creating a writable mapping for said gfn. Returns 0 if all pages
2806 gfn_t gfn, bool can_unsync, bool prefetch)
2816 if (kvm_gfn_is_write_tracked(kvm, slot, gfn))
2825 for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
2907 u64 *sptep, unsigned int pte_access, gfn_t gfn,
2925 mark_mmio_spte(vcpu, sptep, gfn, pte_access);
2948 wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
2955 trace_kvm_mmu_set_spte(level, gfn, sptep);
2964 kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level);
2968 rmap_add(vcpu, slot, sptep, gfn, pte_access);
2985 gfn_t gfn;
2987 gfn = kvm_mmu_page_get_gfn(sp, spte_index(start));
2988 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2992 ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2996 for (i = 0; i < ret; i++, gfn++, start++) {
2997 mmu_set_spte(vcpu, slot, start, access, gfn,
3058 * Lookup the mapping level for @gfn in the current mm.
3082 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
3101 hva = __gfn_to_hva_memslot(slot, gfn);
3148 gfn_t gfn, int max_level, bool is_private)
3155 linfo = lpage_info_slot(gfn, slot, max_level);
3166 host_level = host_pfn_mapping_level(kvm, gfn, slot);
3171 const struct kvm_memory_slot *slot, gfn_t gfn,
3175 kvm_mem_is_private(kvm, gfn);
3177 return __kvm_mmu_max_mapping_level(kvm, slot, gfn, max_level, is_private);
3201 fault->gfn, fault->max_level,
3212 VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask));
3232 fault->pfn |= fault->gfn & page_mask;
3242 gfn_t base_gfn = fault->gfn;
3255 base_gfn = gfn_round_for_level(fault->gfn, it.level);
3281 static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn)
3283 unsigned long hva = gfn_to_hva_memslot(slot, gfn);
3296 * Do not cache the mmio info caused by writing the readonly gfn
3297 * into the spte otherwise read access on readonly gfn also can
3304 kvm_send_hwpoison_signal(fault->slot, fault->gfn);
3317 vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
3329 * Do not create an MMIO SPTE for a gfn greater than host.MAXPHYADDR,
3331 * tricked by L0 userspace (you can observe gfn > L1.MAXPHYADDR if and
3335 if (unlikely(fault->gfn > kvm_mmu_max_gfn()))
3401 mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn);
3542 * since the gfn is not stable for indirect shadow page. See
3683 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant,
3695 sp = kvm_mmu_get_shadow_page(vcpu, gfn, role);
4200 gfn_t gfn = get_mmio_spte_gfn(spte);
4209 trace_handle_mmio_page_fault(addr, gfn, access);
4210 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
4234 if (kvm_gfn_is_write_tracked(vcpu->kvm, fault->slot, fault->gfn))
4263 gfn_t gfn)
4268 arch.gfn = gfn;
4273 kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
4315 kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT,
4330 r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn,
4350 * Retry the page fault if the gfn hit a memslot that is being deleted
4352 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
4376 if (fault->is_private != kvm_mem_is_private(vcpu->kvm, fault->gfn)) {
4385 fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, false, &async,
4392 trace_kvm_try_async_get_page(fault->addr, fault->gfn);
4393 if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
4394 trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn);
4397 } else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn)) {
4407 fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, true, NULL,
4443 mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn))
4463 if (mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn)) {
4501 mmu_invalidate_retry_gfn(vcpu->kvm, fault->mmu_seq, fault->gfn);
4646 gfn_t base = gfn_round_for_level(fault->gfn,
4812 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4816 if (gfn != get_mmio_spte_gfn(*sptep)) {
4821 mark_mmio_spte(vcpu, sptep, gfn, access);
5797 gfn_t gfn = gpa >> PAGE_SHIFT;
5817 for_each_gfn_valid_sp_with_gptes(vcpu->kvm, sp, gfn) {
6075 iterator.gfn - start_gfn + 1);
6462 * more than one rmap entry for a gfn, i.e. requires an L1 gfn to be
6470 * encounters an aliased gfn or two.
6496 gfn_t gfn;
6498 gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
6514 return __kvm_mmu_get_shadow_page(kvm, NULL, &caches, gfn, role);
6527 gfn_t gfn;
6534 gfn = kvm_mmu_page_get_gfn(sp, index);
6540 * gfn-to-pfn translation since the SP is direct, so no need to
6557 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);
6569 gfn_t gfn;
6573 gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
6598 trace_kvm_mmu_split_huge_page(gfn, spte, level, r);
6738 sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
7224 slot = __gfn_to_memslot(slots, sp->gfn);
7333 static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7336 return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
7339 static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7342 lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG;
7345 static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
7348 lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG;
7352 gfn_t gfn, int level, unsigned long attrs)
7354 const unsigned long start = gfn;
7360 for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
7361 if (hugepage_test_mixed(slot, gfn, level - 1) ||
7362 attrs != kvm_get_memory_attributes(kvm, gfn))
7393 gfn_t gfn = gfn_round_for_level(range->start, level);
7396 if (gfn != range->start || gfn + nr_pages > range->end) {
7398 * Skip mixed tracking if the aligned gfn isn't covered
7402 if (gfn >= slot->base_gfn &&
7403 gfn + nr_pages <= slot->base_gfn + slot->npages) {
7404 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7405 hugepage_clear_mixed(slot, gfn, level);
7407 hugepage_set_mixed(slot, gfn, level);
7409 gfn += nr_pages;
7416 for ( ; gfn + nr_pages <= range->end; gfn += nr_pages)
7417 hugepage_clear_mixed(slot, gfn, level);
7424 if (gfn < range->end &&
7425 (gfn + nr_pages) <= (slot->base_gfn + slot->npages)) {
7426 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7427 hugepage_clear_mixed(slot, gfn, level);
7429 hugepage_set_mixed(slot, gfn, level);
7452 gfn_t gfn;
7461 for (gfn = start; gfn < end; gfn += nr_pages) {
7462 unsigned long attrs = kvm_get_memory_attributes(kvm, gfn);
7464 if (hugepage_has_attrs(kvm, slot, gfn, level, attrs))
7465 hugepage_clear_mixed(slot, gfn, level);
7467 hugepage_set_mixed(slot, gfn, level);