Lines Matching refs:gfn

102  * error pfns indicate that the gfn is in slot but faild to
120 * error_noslot pfns indicate that the gfn can not be
129 /* noslot pfn indicates that the gfn is not in slot. */
300 kvm_pfn_t gfn;
1072 /* Iterator used for walking memslots that overlap a gfn range. */
1100 * its key strictly greater than the searched one (the start gfn in our case).
1114 * Find the slot with the lowest gfn that can possibly intersect with
1120 * already has a higher start gfn.
1204 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1207 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
1208 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
1209 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
1210 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
1211 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
1216 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
1217 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1219 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
1220 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn);
1221 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
1231 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1239 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1251 #define __kvm_get_guest(kvm, gfn, offset, v) \
1253 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1271 #define __kvm_put_guest(kvm, gfn, offset, v) \
1273 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1280 mark_page_dirty(kvm, gfn); \
1294 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
1295 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
1296 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1297 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
1298 void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
1299 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
1302 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
1303 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
1304 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1307 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
1308 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
1309 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
1315 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
1319 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
1436 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
1568 gfn_t gfn, u64 nr_pages)
1573 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
1698 * Returns a pointer to the memslot if it contains gfn.
1702 try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1707 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
1714 * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL.
1721 search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
1730 if (gfn >= slot->base_gfn) {
1731 if (gfn < slot->base_gfn + slot->npages)
1742 ____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
1747 slot = try_get_memslot(slot, gfn);
1751 slot = search_memslots(slots, gfn, approx);
1766 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
1768 return ____gfn_to_memslot(slots, gfn, false);
1772 __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
1780 unsigned long offset = gfn - slot->base_gfn;
1785 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
1787 return gfn_to_memslot(kvm, gfn)->id;
1798 static inline gpa_t gfn_to_gpa(gfn_t gfn)
1800 return (gpa_t)gfn << PAGE_SHIFT;
2039 gfn_t gfn)
2057 if (gfn >= kvm->mmu_invalidate_range_start &&
2058 gfn < kvm->mmu_invalidate_range_end)
2075 gfn_t gfn)
2086 gfn >= kvm->mmu_invalidate_range_start &&
2087 gfn < kvm->mmu_invalidate_range_end)
2411 static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn)
2413 return xa_to_value(xa_load(&kvm->mem_attr_array, gfn));
2423 static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
2426 kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE;
2429 static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
2437 gfn_t gfn, kvm_pfn_t *pfn, int *max_order);
2440 struct kvm_memory_slot *slot, gfn_t gfn,