Searched refs:gfn (Results 1 - 25 of 84) sorted by relevance

1234

/linux-master/drivers/gpu/drm/i915/gvt/
H A Dpage_track.h45 struct intel_vgpu *vgpu, unsigned long gfn);
48 unsigned long gfn, gvt_page_track_handler_t handler,
51 unsigned long gfn);
53 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
54 int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
H A Dpage_track.c29 * @gfn: the gfn of guest page
35 struct intel_vgpu *vgpu, unsigned long gfn)
37 return radix_tree_lookup(&vgpu->page_track_tree, gfn);
43 * @gfn: the gfn of guest page
50 int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn, argument
56 track = intel_vgpu_find_page_track(vgpu, gfn);
67 ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track);
79 * @gfn
34 intel_vgpu_find_page_track( struct intel_vgpu *vgpu, unsigned long gfn) argument
82 intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu, unsigned long gfn) argument
103 intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) argument
130 intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) argument
[all...]
H A Dkvmgt.c92 gfn_t gfn; member in struct:kvmgt_pgfn
100 gfn_t gfn; member in struct:gvt_dma
111 static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages,
128 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, argument
131 vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT,
136 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, argument
149 dma_addr_t cur_iova = (gfn + npage) << PAGE_SHIFT;
173 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
177 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, argument
184 ret = gvt_pin_guest_page(vgpu, gfn, siz
200 gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, dma_addr_t dma_addr, unsigned long size) argument
228 __gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) argument
246 __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, dma_addr_t dma_addr, unsigned long size) argument
349 __kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn) argument
365 kvmgt_gfn_is_write_protected(struct intel_vgpu *info, gfn_t gfn) argument
373 kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn) argument
388 kvmgt_protect_table_del(struct intel_vgpu *info, gfn_t gfn) argument
1545 intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) argument
1563 intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn) argument
1596 kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages, struct kvm_page_track_notifier_node *node) argument
1629 intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) argument
[all...]
/linux-master/arch/x86/include/asm/
H A Dkvm_page_track.h39 * @gfn: base gfn of the region being removed
43 void (*track_remove_region)(gfn_t gfn, unsigned long nr_pages,
52 int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn);
53 int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn);
H A Dsev-common.h86 #define GHCB_MSR_PSC_REQ_GFN(gfn, op) \
90 ((u64)((gfn) & GENMASK_ULL(39, 0)) << 12) | \
125 gfn : 40, member in struct:psc_entry
/linux-master/arch/x86/kvm/mmu/
H A Dmmutrace.h13 __field(__u64, gfn) \
20 __entry->gfn = sp->gfn; \
34 trace_seq_printf(p, "sp gen %u gfn %llx l%u %u-byte q%u%s %s%s" \
37 __entry->gfn, role.level, \
212 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
213 TP_ARGS(sptep, gfn, spte),
217 __field(gfn_t, gfn)
224 __entry->gfn = gfn;
[all...]
H A Dtdp_iter.c15 SPTE_INDEX(iter->gfn << PAGE_SHIFT, iter->level);
29 iter->gfn = gfn_round_for_level(iter->next_last_level_gfn, iter->level);
97 iter->gfn = gfn_round_for_level(iter->next_last_level_gfn, iter->level);
116 if (SPTE_INDEX(iter->gfn << PAGE_SHIFT, iter->level) ==
120 iter->gfn += KVM_PAGES_PER_HPAGE(iter->level);
121 iter->next_last_level_gfn = iter->gfn;
139 iter->gfn = gfn_round_for_level(iter->gfn, iter->level);
H A Dpage_track.c75 static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn, argument
80 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
91 gfn_t gfn)
101 update_gfn_write_track(slot, gfn, 1);
107 kvm_mmu_gfn_disallow_lpage(slot, gfn);
109 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
114 struct kvm_memory_slot *slot, gfn_t gfn)
124 update_gfn_write_track(slot, gfn, -1);
130 kvm_mmu_gfn_allow_lpage(slot, gfn);
137 const struct kvm_memory_slot *slot, gfn_t gfn)
90 __kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn) argument
113 __kvm_write_track_remove_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn) argument
136 kvm_gfn_is_write_tracked(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn) argument
319 kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn) argument
349 kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn) argument
[all...]
H A Dmmu_internal.h81 gfn_t gfn; member in struct:kvm_mmu_page
115 * e.g. because KVM is shadowing a PTE at the same gfn, the memslot
161 static inline gfn_t gfn_round_for_level(gfn_t gfn, int level) argument
163 return gfn & -KVM_PAGES_PER_HPAGE(level);
167 gfn_t gfn, bool can_unsync, bool prefetch);
169 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
170 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
172 struct kvm_memory_slot *slot, u64 gfn,
176 static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level) argument
178 kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, leve
233 gfn_t gfn; member in struct:kvm_page_fault
[all...]
H A Dpage_track.h19 gfn_t gfn);
21 struct kvm_memory_slot *slot, gfn_t gfn);
24 const struct kvm_memory_slot *slot, gfn_t gfn);
H A Dtdp_mmu.c198 gfn_t gfn, union kvm_mmu_page_role role)
205 sp->gfn = gfn;
223 tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
290 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
346 gfn_t base_gfn = sp->gfn;
355 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); local
418 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
429 * @gfn: the base GFN that was mapped by the SPTE
441 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, argument
197 tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep, gfn_t gfn, union kvm_mmu_page_role role) argument
625 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, u64 old_spte, u64 new_spte, gfn_t gfn, int level) argument
1615 clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t gfn, unsigned long mask, bool wrprot) argument
1661 kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, unsigned long mask, bool wrprot) argument
1742 write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t gfn, int min_level) argument
1778 kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int min_level) argument
1803 gfn_t gfn = addr >> PAGE_SHIFT; local
1832 gfn_t gfn = addr >> PAGE_SHIFT; local
[all...]
H A Dmmu.c287 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep)); local
289 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
292 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, argument
295 u64 spte = make_mmio_spte(vcpu, gfn, access);
297 trace_mark_mmio_spte(sptep, gfn, spte);
720 return sp->gfn;
725 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
755 gfn_t gfn, unsigned int access)
758 sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
765 sp->gfn, kvm_mmu_page_get_acces
754 kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index, gfn_t gfn, unsigned int access) argument
776 gfn_t gfn = kvm_mmu_page_get_gfn(sp, index); local
785 lpage_info_slot(gfn_t gfn, const struct kvm_memory_slot *slot, int level) argument
802 update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot, gfn_t gfn, int count) argument
817 kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn) argument
822 kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn) argument
831 gfn_t gfn; local
879 gfn_t gfn; local
907 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log) argument
1086 gfn_to_rmap(gfn_t gfn, int level, const struct kvm_memory_slot *slot) argument
1100 gfn_t gfn; local
1414 kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn, int min_level) argument
1436 kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn) argument
1450 kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, pte_t unused) argument
1457 kvm_set_pte_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, pte_t pte) argument
1503 gfn_t gfn; member in struct:slot_rmap_walk_iterator
1612 kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, pte_t unused) argument
1626 kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, pte_t unused) argument
1641 __rmap_add(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn, unsigned int access) argument
1665 rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn, unsigned int access) argument
1749 kvm_page_table_hashfn(gfn_t gfn) argument
2151 kvm_mmu_find_shadow_page(struct kvm *kvm, struct kvm_vcpu *vcpu, gfn_t gfn, struct hlist_head *sp_list, union kvm_mmu_page_role role) argument
2236 kvm_mmu_alloc_shadow_page(struct kvm *kvm, struct shadow_page_caches *caches, gfn_t gfn, struct hlist_head *sp_list, union kvm_mmu_page_role role) argument
2272 __kvm_mmu_get_shadow_page(struct kvm *kvm, struct kvm_vcpu *vcpu, struct shadow_page_caches *caches, gfn_t gfn, union kvm_mmu_page_role role) argument
2294 kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu, gfn_t gfn, union kvm_mmu_page_role role) argument
2353 kvm_mmu_get_child_sp(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, bool direct, unsigned int access) argument
2757 kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) argument
2805 mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, bool can_unsync, bool prefetch) argument
2906 mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, u64 *sptep, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, struct kvm_page_fault *fault) argument
2985 gfn_t gfn; local
3082 host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, const struct kvm_memory_slot *slot) argument
3146 __kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, int max_level, bool is_private) argument
3170 kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, int max_level) argument
3281 kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn) argument
3683 mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant, u8 level) argument
4200 gfn_t gfn = get_mmio_spte_gfn(spte); local
4262 kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, gfn_t gfn) argument
4812 sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, unsigned int access) argument
5797 gfn_t gfn = gpa >> PAGE_SHIFT; local
6496 gfn_t gfn; local
6527 gfn_t gfn; local
6569 gfn_t gfn; local
7333 hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn, int level) argument
7339 hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn, int level) argument
7345 hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn, int level) argument
7351 hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int level, unsigned long attrs) argument
7393 gfn_t gfn = gfn_round_for_level(range->start, level); local
7452 gfn_t gfn; local
[all...]
H A Dpaging_tmpl.h91 gfn_t gfn; member in struct:guest_walker
322 gfn_t gfn; local
440 gfn = gpte_to_gfn_lvl(pte, walker->level);
441 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
445 gfn += pse36_gfn_delta(pte);
448 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault);
452 walker->gfn = real_gpa >> PAGE_SHIFT;
538 gfn_t gfn; local
544 gfn = gpte_to_gfn(gpte);
548 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, pte_acces
912 gfn_t gfn; local
[all...]
H A Dtdp_iter.h95 gfn_t gfn; member in struct:tdp_iter
125 iter.valid && iter.gfn < end; \
H A Dtdp_mmu.h42 gfn_t gfn, unsigned long mask,
48 struct kvm_memory_slot *slot, gfn_t gfn,
/linux-master/arch/powerpc/kvm/
H A Dbook3s_hv_uvmem.c289 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm, argument
295 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
296 unsigned long index = gfn - p->base_pfn;
308 static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn, argument
311 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn);
315 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm) argument
317 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0);
321 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm) argument
323 kvmppc_mark_gfn(gfn, kv
327 kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm) argument
333 kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm, unsigned long *uvmem_pfn) argument
361 kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot, struct kvm *kvm, unsigned long *gfn) argument
394 unsigned long gfn = memslot->base_gfn; local
617 unsigned long uvmem_pfn, gfn; local
797 unsigned long gfn = memslot->base_gfn; local
885 unsigned long gfn = gpa >> page_shift; local
943 unsigned long gfn = gpa >> page_shift; local
1049 unsigned long gfn = gpa >> page_shift; local
1084 kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) argument
[all...]
H A Dbook3s_64_mmu_hv.c515 unsigned long gpa, gfn, hva, pfn, hpa; local
578 gfn = gpa >> PAGE_SHIFT;
579 memslot = gfn_to_memslot(kvm, gfn);
604 hva = gfn_to_hva_memslot(memslot, gfn);
616 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
781 unsigned long *rmapp, unsigned long gfn)
805 hpte_rpn(ptel, psize) == gfn) {
813 kvmppc_update_dirty_map(memslot, gfn, psize);
822 unsigned long gfn)
828 rmapp = &memslot->arch.rmap[gfn
779 kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i, struct kvm_memory_slot *memslot, unsigned long *rmapp, unsigned long gfn) argument
821 kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument
859 gfn_t gfn; local
875 unsigned long gfn; local
899 kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument
956 gfn_t gfn; local
970 kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument
1119 unsigned long gfn; local
1160 unsigned long gfn = gpa >> PAGE_SHIFT; local
1192 unsigned long gfn; local
1276 unsigned long gfn = hpte_rpn(guest_rpte, apsize); local
[all...]
/linux-master/include/xen/
H A Dxen-ops.h66 xen_pfn_t *gfn, int nr,
79 xen_pfn_t *gfn, int nr,
98 * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
101 * @gfn: Array of GFNs to map
108 * @gfn and @err_ptr may point to the same buffer, the GFNs will be
116 xen_pfn_t *gfn, int nr,
122 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
130 return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
165 * @gfn: First GFN to map.
176 xen_pfn_t gfn, in
77 xen_xlate_remap_gfn_array(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t *gfn, int nr, int *err_ptr, pgprot_t prot, unsigned int domid, struct page **pages) argument
114 xen_remap_domain_gfn_array(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t *gfn, int nr, int *err_ptr, pgprot_t prot, unsigned int domid, struct page **pages) argument
174 xen_remap_domain_gfn_range(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t gfn, int nr, pgprot_t prot, unsigned int domid, struct page **pages) argument
[all...]
/linux-master/virt/kvm/
H A Ddirty_ring.c89 static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn) argument
91 smp_store_release(&gfn->flags, 0);
94 static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn) argument
96 gfn->flags = KVM_DIRTY_GFN_F_DIRTY;
99 static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn) argument
101 return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET;
/linux-master/include/linux/
H A Dkvm_host.h102 * error pfns indicate that the gfn is in slot but faild to
120 * error_noslot pfns indicate that the gfn can not be
129 /* noslot pfn indicates that the gfn is not in slot. */
300 kvm_pfn_t gfn; member in struct:kvm_host_map
1072 /* Iterator used for walking memslots that overlap a gfn range. */
1100 * its key strictly greater than the searched one (the start gfn in our case).
1114 * Find the slot with the lowest gfn that can possibly intersect with
1120 * already has a higher start gfn.
1204 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1207 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
1567 kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) argument
1702 try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn) argument
1721 search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx) argument
1742 ____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx) argument
1766 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) argument
1772 __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) argument
1785 memslot_id(struct kvm *kvm, gfn_t gfn) argument
1798 gfn_to_gpa(gfn_t gfn) argument
2037 mmu_invalidate_retry_gfn(struct kvm *kvm, unsigned long mmu_seq, gfn_t gfn) argument
2073 mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm, unsigned long mmu_seq, gfn_t gfn) argument
2411 kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn) argument
2423 kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) argument
2429 kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) argument
2439 kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, kvm_pfn_t *pfn, int *max_order) argument
[all...]
/linux-master/drivers/xen/
H A Dxlate_mmu.c45 typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data);
47 /* Break down the pages in 4KB chunk and call fn for each gfn */
65 xen_pfn_t *fgfn; /* foreign domain's gfn */
66 int nr_fgfn; /* Number of foreign gfn left to map */
84 static void setup_hparams(unsigned long gfn, void *data) argument
89 info->h_gpfns[info->h_iter] = gfn;
145 xen_pfn_t *gfn, int nr,
158 data.fgfn = gfn;
174 static void unmap_gfn(unsigned long gfn, void *data) argument
179 xrp.gpfn = gfn;
143 xen_xlate_remap_gfn_array(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t *gfn, int nr, int *err_ptr, pgprot_t prot, unsigned domid, struct page **pages) argument
197 setup_balloon_gfn(unsigned long gfn, void *data) argument
[all...]
/linux-master/arch/x86/kvm/
H A Dkvm_onhyperv.h10 int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, gfn_t nr_pages);
/linux-master/arch/powerpc/include/asm/
H A Dkvm_book3s_uvmem.h22 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn);
81 static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) argument
/linux-master/arch/riscv/kvm/
H A Dvcpu_exit.c19 gfn_t gfn; local
23 gfn = fault_addr >> PAGE_SHIFT;
24 memslot = gfn_to_memslot(vcpu->kvm, gfn);
25 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
/linux-master/include/xen/arm/
H A Dpage.h52 static inline unsigned long gfn_to_pfn(unsigned long gfn) argument
54 return gfn;

Completed in 309 milliseconds

1234