/linux-master/arch/powerpc/kvm/ |
H A D | e500_mmu_host.c | 323 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, 353 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); 354 hva = gfn_to_hva_memslot(slot, gfn); 381 slot_start = pfn - (gfn - slot->base_gfn); 400 * requested) that will cover gfn, stay within the 401 * range, and for which gfn and pfn are mutually 409 gfn_start = gfn & ~(tsize_pages - 1); 412 if (gfn_start + pfn - gfn < start) 414 if (gfn_end + pfn - gfn > end) 416 if ((gfn 322 kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, struct tlbe_ref *ref) argument 559 kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, struct kvm_book3e_206_tlb_entry *stlbe, int esel) argument 612 gfn_t gfn = gpaddr >> PAGE_SHIFT; local [all...] |
H A D | book3s_hv_rm_mmu.c | 97 unsigned long gfn, unsigned long psize) 104 gfn -= memslot->base_gfn; 105 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages); 113 unsigned long gfn; local 117 gfn = hpte_rpn(hpte_gr, psize); 118 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); 120 kvmppc_update_dirty_map(memslot, gfn, psize); 131 unsigned long gfn; local 133 gfn = hpte_rpn(hpte_gr, kvmppc_actual_pgsz(hpte_v, hpte_gr)); 134 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); 96 kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, unsigned long gfn, unsigned long psize) argument 156 unsigned long gfn; local 188 unsigned long i, pa, gpa, gfn, psize; local 889 unsigned long gfn, hva, pa, psize = PAGE_SHIFT; local [all...] |
H A D | book3s_64_mmu_radix.c | 430 unsigned long gfn = gpa >> PAGE_SHIFT; local 442 memslot = gfn_to_memslot(kvm, gfn); 459 kvmppc_update_dirty_map(memslot, gfn, page_size); 830 unsigned long hva, gfn = gpa >> PAGE_SHIFT; local 848 hva = gfn_to_hva_memslot(memslot, gfn); 855 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, 949 unsigned long gpa, gfn; local 972 gfn = gpa >> PAGE_SHIFT; 977 return kvmppc_send_page_to_uv(kvm, gfn); 980 memslot = gfn_to_memslot(kvm, gfn); 1032 kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument 1051 kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument 1079 kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument 1101 unsigned long gfn = memslot->base_gfn + pagenum; local [all...] |
H A D | book3s_hv_nested.c | 1033 unsigned long gfn, end_gfn; local 1038 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; 1039 end_gfn = gfn + (nbytes >> PAGE_SHIFT); 1044 for (; gfn < end_gfn; gfn++) { 1045 unsigned long *rmap = &memslot->arch.rmap[gfn]; 1527 unsigned long n_gpa, gpa, gfn, perm = 0UL; local 1585 gfn = gpa >> PAGE_SHIFT; 1589 memslot = gfn_to_memslot(kvm, gfn); 1639 /* Align gfn t [all...] |
/linux-master/include/trace/events/ |
H A D | kvm.h | 261 TP_PROTO(u64 gva, u64 gfn), 263 TP_ARGS(gva, gfn), 267 __field(u64, gfn) 272 __entry->gfn = gfn; 275 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) 280 TP_PROTO(u64 gva, u64 gfn), 282 TP_ARGS(gva, gfn) 287 TP_PROTO(u64 gva, u64 gfn), [all...] |
/linux-master/virt/kvm/ |
H A D | kvm_main.c | 367 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) argument 369 if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages)) 639 * {gfn(page) | page intersects with [hva_start, hva_end)} = 1579 * If the memslot gfn is unchanged, rb_replace_node() can be used to 1580 * switch the node in the gfn tree instead of removing the old and 1835 * memslot will be created. Validation of sp->gfn happens in: 2541 /* Set @attributes for the gfn range [@start, @end). */ 2627 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) argument 2629 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 2633 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) argument 2666 kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) argument 2674 kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) argument 2682 kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) argument 2711 __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn, gfn_t *nr_pages, bool write) argument 2726 gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, gfn_t *nr_pages) argument 2732 gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) argument 2739 gfn_to_hva(struct kvm *kvm, gfn_t gfn) argument 2745 kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) argument 2759 gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, bool *writable) argument 2770 gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) argument 2777 kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) argument 3028 __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool interruptible, bool *async, bool write_fault, bool *writable, hva_t *hva) argument 3060 gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, bool *writable) argument 3068 gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) argument 3075 gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn) argument 3082 kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) argument 3088 gfn_to_pfn(struct kvm *kvm, gfn_t gfn) argument 3094 kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) argument 3100 gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, struct page **pages, int nr_pages) argument 3123 gfn_to_page(struct kvm *kvm, gfn_t gfn) argument 3149 kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) argument 3310 __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, void *data, int offset, int len) argument 3325 kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len) argument 3334 kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len) argument 3345 gfn_t gfn = gpa >> PAGE_SHIFT; local 3365 gfn_t gfn = gpa >> PAGE_SHIFT; local 3383 __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, void *data, int offset, unsigned long len) argument 3403 gfn_t gfn = gpa >> PAGE_SHIFT; local 3411 __kvm_write_guest_page(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn, const void *data, int offset, int len) argument 3428 kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len) argument 3437 kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, int offset, int len) argument 3449 gfn_t gfn = gpa >> PAGE_SHIFT; local 3470 gfn_t gfn = gpa >> PAGE_SHIFT; local 3615 gfn_t gfn = gpa >> PAGE_SHIFT; local 3632 mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn) argument 3657 mark_page_dirty(struct kvm *kvm, gfn_t gfn) argument 3666 kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) argument [all...] |
H A D | pfncache.c | 291 gfn_t gfn = gpa_to_gfn(gpa); local 295 gpc->memslot = __gfn_to_memslot(slots, gfn); 296 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
|
/linux-master/arch/loongarch/kvm/ |
H A D | mmu.c | 68 offset = (addr >> PAGE_SHIFT) - ctx->gfn; 354 * @gfn_offset: The gfn offset in memory slot 372 ctx.gfn = base_gfn; 572 gfn_t gfn = gpa >> PAGE_SHIFT; local 602 slot = gfn_to_memslot(kvm, gfn); 628 mark_page_dirty(kvm, gfn); 671 * Lookup the mapping level for @gfn in the current mm. 695 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, argument 714 hva = __gfn_to_hva_memslot(slot, gfn); 757 static kvm_pte_t *kvm_split_huge(struct kvm_vcpu *vcpu, kvm_pte_t *ptep, gfn_t gfn) argument 807 gfn_t gfn = gpa >> PAGE_SHIFT; local [all...] |
/linux-master/arch/x86/kvm/mmu/ |
H A D | spte.c | 71 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) argument 75 u64 gpa = gfn << PAGE_SHIFT; 139 unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, 193 spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, 223 if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, can_unsync, prefetch)) { 244 mark_page_dirty_in_slot(vcpu->kvm, slot, gfn); 137 make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, bool can_unsync, bool host_writable, u64 *new_spte) argument
|
H A D | spte.h | 476 unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, 482 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access);
|
/linux-master/arch/riscv/kvm/ |
H A D | vcpu_sbi_sta.c | 36 gfn_t gfn; local 46 gfn = shmem >> PAGE_SHIFT; 47 hva = kvm_vcpu_gfn_to_hva(vcpu, gfn); 78 kvm_vcpu_mark_page_dirty(vcpu, gfn);
|
H A D | mmu.c | 617 gfn_t gfn = gpa >> PAGE_SHIFT; local 650 gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; 670 hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable); 693 mark_page_dirty(kvm, gfn);
|
/linux-master/drivers/xen/xenbus/ |
H A D | xenbus_client.c | 411 unsigned long gfn; local 414 gfn = pfn_to_gfn(vmalloc_to_pfn(addr)); 416 gfn = virt_to_gfn(addr); 420 gfn, 0); 653 static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn, argument 659 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn); 871 static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn, argument 878 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
|
/linux-master/arch/x86/include/asm/xen/ |
H A D | page.h | 251 static inline unsigned long gfn_to_pfn(unsigned long gfn) argument 254 return gfn; 256 return mfn_to_pfn(gfn);
|
/linux-master/drivers/gpu/drm/i915/gvt/ |
H A D | gtt.c | 664 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, 693 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, 749 if (spt->guest_page.gfn) { 753 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); 799 /* Find a spt by guest gfn. */ 801 struct intel_vgpu *vgpu, unsigned long gfn) 805 track = intel_vgpu_find_page_track(vgpu, gfn); 871 /* Allocate shadow page table associated with specific gfn. */ 874 unsigned long gfn, bool guest_pde_ips) 886 ret = intel_vgpu_register_page_track(vgpu, gfn, 800 intel_vgpu_find_spt_by_gfn( struct intel_vgpu *vgpu, unsigned long gfn) argument 872 ppgtt_alloc_spt_gfn( struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type, unsigned long gfn, bool guest_pde_ips) argument 1241 unsigned long gfn; local 2213 unsigned long gma, gfn; local [all...] |
H A D | trace.h | 64 TP_printk("VM%d [alloc] spt %p type %d mfn 0x%lx gfn 0x%lx\n", 153 TP_PROTO(int id, char *action, void *spt, unsigned long gfn, 156 TP_ARGS(id, action, spt, gfn, type), 164 "VM%d [%s] spt %p gfn 0x%lx type %d\n", 165 id, action, spt, gfn, type);
|
H A D | opregion.c | 275 vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; 420 scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + 422 parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
|
H A D | gvt.h | 122 u32 gfn[INTEL_GVT_OPREGION_PAGES]; member in struct:intel_vgpu_opregion 753 int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn); 754 int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn); 756 int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
/linux-master/arch/s390/kvm/ |
H A D | gaccess.c | 874 * caller needs to ensure that gfn is accessible, so we can 985 const gfn_t gfn = gpa_to_gfn(gpa); local 989 rc = kvm_write_guest_page(kvm, gfn, data, offset, len); 991 rc = kvm_read_guest_page(kvm, gfn, data, offset, len); 1001 gfn_t gfn; local 1005 gfn = gpa >> PAGE_SHIFT; 1006 slot = gfn_to_memslot(kvm, gfn); 1007 hva = gfn_to_hva_memslot_prot(slot, gfn, &writable); 1025 mark_page_dirty_in_slot(kvm, slot, gfn); 1176 gfn_t gfn local [all...] |
/linux-master/arch/loongarch/include/asm/ |
H A D | kvm_mmu.h | 34 unsigned long gfn; member in struct:kvm_ptw_ctx
|
/linux-master/arch/x86/kvm/ |
H A D | x86.h | 219 gva_t gva, gfn_t gfn, unsigned access) 232 vcpu->arch.mmio_gfn = gfn; 313 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); 316 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, 218 vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, unsigned access) argument
|
H A D | mmu.h | 284 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) argument 287 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
|
H A D | mtrr.c | 614 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) argument 623 start = gfn_to_gpa(gfn); 690 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, argument 698 start = gfn_to_gpa(gfn); 699 end = gfn_to_gpa(gfn + page_num);
|
/linux-master/arch/mips/kvm/ |
H A D | mmu.c | 407 * @gfn_offset: The gfn offset in memory slot 515 gfn_t gfn = gpa >> PAGE_SHIFT; local 546 mark_page_dirty(kvm, gfn); 592 gfn_t gfn = gpa >> PAGE_SHIFT; local 632 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable); 660 mark_page_dirty(kvm, gfn);
|
/linux-master/tools/testing/selftests/kvm/ |
H A D | dirty_log_test.c | 164 * recorded the dirty gfn with the old contents. 169 * dirty gfn we've collected, so that if a mismatch of data found later in the 305 static inline bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn) argument 307 return smp_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY; 310 static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn) argument 312 smp_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
|