Searched refs:gfn (Results 76 - 85 of 85) sorted by path

1234

/linux-master/include/xen/arm/
H A Dpage.h52 static inline unsigned long gfn_to_pfn(unsigned long gfn) argument
54 return gfn;
/linux-master/include/xen/
H A Dgrant_table.h298 * gfn: guest frame number
303 typedef void (*xen_grant_fn_t)(unsigned long gfn, unsigned int offset,
H A Dxen-ops.h66 xen_pfn_t *gfn, int nr,
79 xen_pfn_t *gfn, int nr,
98 * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
101 * @gfn: Array of GFNs to map
108 * @gfn and @err_ptr may point to the same buffer, the GFNs will be
116 xen_pfn_t *gfn, int nr,
122 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
130 return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
165 * @gfn: First GFN to map.
176 xen_pfn_t gfn, in
77 xen_xlate_remap_gfn_array(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t *gfn, int nr, int *err_ptr, pgprot_t prot, unsigned int domid, struct page **pages) argument
114 xen_remap_domain_gfn_array(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t *gfn, int nr, int *err_ptr, pgprot_t prot, unsigned int domid, struct page **pages) argument
174 xen_remap_domain_gfn_range(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t gfn, int nr, pgprot_t prot, unsigned int domid, struct page **pages) argument
[all...]
/linux-master/tools/arch/x86/include/uapi/asm/
H A Dkvm.h572 __u64 gfn; member in union:kvm_xen_hvm_attr::__anon140::__anon141
/linux-master/tools/testing/selftests/kvm/
H A Ddirty_log_test.c161 * recorded the dirty gfn with the old contents.
166 * dirty gfn we've collected, so that if a mismatch of data found later in the
302 static inline bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn) argument
304 return smp_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY;
307 static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn) argument
309 smp_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET);
/linux-master/tools/testing/selftests/kvm/x86_64/
H A Dxen_shinfo_test.c402 .u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE
407 .u.shared_info.gfn = KVM_XEN_INVALID_GFN
507 ha.u.shared_info.gfn = SHINFO_ADDR / PAGE_SIZE;
/linux-master/virt/kvm/
H A Ddirty_ring.c89 static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn) argument
91 smp_store_release(&gfn->flags, 0);
94 static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn) argument
96 gfn->flags = KVM_DIRTY_GFN_F_DIRTY;
99 static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn) argument
101 return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET;
H A Dguest_memfd.c486 gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
488 pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
485 kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, kvm_pfn_t *pfn, int *max_order) argument
H A Dkvm_main.c358 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) argument
360 if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages))
640 * {gfn(page) | page intersects with [hva_start, hva_end)} =
1540 * If the memslot gfn is unchanged, rb_replace_node() can be used to
1541 * switch the node in the gfn tree instead of removing the old and
1796 * memslot will be created. Validation of sp->gfn happens in:
2502 /* Set @attributes for the gfn range [@start, @end). */
2588 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) argument
2590 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
2594 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) argument
2627 kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) argument
2635 kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) argument
2643 kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) argument
2672 __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn, gfn_t *nr_pages, bool write) argument
2687 gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, gfn_t *nr_pages) argument
2693 gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) argument
2700 gfn_to_hva(struct kvm *kvm, gfn_t gfn) argument
2706 kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) argument
2720 gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, bool *writable) argument
2731 gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) argument
2738 kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) argument
2989 __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool interruptible, bool *async, bool write_fault, bool *writable, hva_t *hva) argument
3017 gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, bool *writable) argument
3025 gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) argument
3032 gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn) argument
3039 kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) argument
3045 gfn_to_pfn(struct kvm *kvm, gfn_t gfn) argument
3051 kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) argument
3057 gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, struct page **pages, int nr_pages) argument
3080 gfn_to_page(struct kvm *kvm, gfn_t gfn) argument
3106 kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) argument
3268 __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, void *data, int offset, int len) argument
3283 kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len) argument
3292 kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len) argument
3303 gfn_t gfn = gpa >> PAGE_SHIFT; local
3323 gfn_t gfn = gpa >> PAGE_SHIFT; local
3341 __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, void *data, int offset, unsigned long len) argument
3361 gfn_t gfn = gpa >> PAGE_SHIFT; local
3370 __kvm_write_guest_page(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn, const void *data, int offset, int len) argument
3387 kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len) argument
3396 kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, int offset, int len) argument
3408 gfn_t gfn = gpa >> PAGE_SHIFT; local
3429 gfn_t gfn = gpa >> PAGE_SHIFT; local
3574 gfn_t gfn = gpa >> PAGE_SHIFT; local
3591 mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn) argument
3616 mark_page_dirty(struct kvm *kvm, gfn_t gfn) argument
3625 kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) argument
[all...]
H A Dpfncache.c291 gfn_t gfn = gpa_to_gfn(gpa); local
295 gpc->memslot = __gfn_to_memslot(slots, gfn);
296 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);

Completed in 306 milliseconds

1234