Searched refs:gfn (Results 1 - 4 of 4) sorted by relevance

/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/kvm/
H A Dpaging_tmpl.h65 gfn_t gfn; member in struct:guest_walker
137 walker->gfn = (*ptep & PT_BASE_ADDR_MASK)
145 walker->gfn = (*ptep & PT_DIR_BASE_ADDR_MASK)
147 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
196 u64 *shadow_pte, u64 access_bits, gfn_t gfn)
202 guest_pte & PT_DIRTY_MASK, access_bits, gfn);
206 u64 *shadow_pte, u64 access_bits, gfn_t gfn)
212 gaddr = (gpa_t)gfn << PAGE_SHIFT;
218 guest_pde & PT_DIRTY_MASK, access_bits, gfn);
267 walker->gfn);
195 set_pte(struct kvm_vcpu *vcpu, u64 guest_pte, u64 *shadow_pte, u64 access_bits, gfn_t gfn) argument
205 set_pde(struct kvm_vcpu *vcpu, u64 guest_pde, u64 *shadow_pte, u64 access_bits, gfn_t gfn) argument
316 gfn_t gfn; local
[all...]
H A Dmmu.c412 static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) argument
419 page = gfn_to_page(kvm, gfn);
468 static unsigned kvm_page_table_hashfn(gfn_t gfn) argument
470 return gfn;
568 gfn_t gfn)
575 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
576 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
579 if (page->gfn == gfn
567 kvm_mmu_lookup_page(struct kvm_vcpu *vcpu, gfn_t gfn) argument
587 kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gaddr, unsigned level, int metaphysical, unsigned hugepage_access, u64 *parent_pte) argument
698 kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn) argument
954 set_pte_common(struct kvm_vcpu *vcpu, u64 *shadow_pte, gpa_t gaddr, int dirty, u64 access_bits, gfn_t gfn) argument
1142 gfn_t gfn = gpa >> PAGE_SHIFT; local
[all...]
H A Dkvm.h88 * gfn - guest frame number
115 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
138 gfn_t gfn; member in struct:kvm_mmu_page
465 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
466 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
467 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
H A Dkvm_main.c232 gfn_t gfn; local
239 gfn = vcpu->mmu.gva_to_gpa(vcpu, addr) >> PAGE_SHIFT;
240 mark_page_dirty(vcpu->kvm, gfn);
922 static gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) argument
929 if (gfn >= alias->base_gfn
930 && gfn < alias->base_gfn + alias->npages)
931 return alias->target_gfn + gfn - alias->base_gfn;
933 return gfn;
936 static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn) argument
943 if (gfn >
950 gfn_to_memslot(struct kvm *kvm, gfn_t gfn) argument
956 gfn_to_page(struct kvm *kvm, gfn_t gfn) argument
968 mark_page_dirty(struct kvm *kvm, gfn_t gfn) argument
[all...]

Completed in 44 milliseconds