Searched refs:memslot (Results 1 - 25 of 37) sorted by path

12

/linux-master/arch/arm64/kvm/
H A Darm.c1656 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
H A Dmmu.c158 static bool memslot_is_logging(struct kvm_memory_slot *memslot) argument
160 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
337 struct kvm_memory_slot *memslot)
339 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
340 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
355 struct kvm_memory_slot *memslot; local
362 kvm_for_each_memslot(memslot, bkt, slots)
363 stage2_flush_memslot(kvm, memslot);
944 struct kvm_memory_slot *memslot)
336 stage2_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
943 stage2_unmap_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
995 struct kvm_memory_slot *memslot; local
1131 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); local
1158 struct kvm_memory_slot *memslot; local
1214 fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, unsigned long hva, unsigned long map_size) argument
1284 transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long hva, kvm_pfn_t *pfnp, phys_addr_t *ipap) argument
1377 user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_memory_slot *memslot, unsigned long hva, bool fault_is_perm) argument
1629 struct kvm_memory_slot *memslot; local
[all...]
/linux-master/arch/loongarch/include/asm/
H A Dkvm_host.h239 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot);
/linux-master/arch/loongarch/kvm/
H A Dmmu.c410 * memslot->userspace_addr:
415 * memslot->base_gfn << PAGE_SIZE:
637 static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot, argument
643 if (kvm_slot_dirty_track_enabled(memslot) && write)
646 if (kvm_hugepage_capable(memslot))
649 if (kvm_hugepage_incapable(memslot))
652 start = memslot->userspace_addr;
653 end = start + memslot->npages * PAGE_SIZE;
657 * by the memslot. This means we have to prohibit block size mappings
707 * Note, using the already-retrieved memslot an
809 struct kvm_memory_slot *memslot; local
948 kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
952 kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot) argument
[all...]
/linux-master/arch/mips/kvm/
H A Dmips.c979 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
/linux-master/arch/powerpc/include/asm/
H A Dkvm_book3s.h198 const struct kvm_memory_slot *memslot,
205 struct kvm_memory_slot *memslot,
214 extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
216 extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
218 extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
221 struct kvm_memory_slot *memslot, unsigned long *map);
223 const struct kvm_memory_slot *memslot);
241 extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
258 struct kvm_memory_slot *memslot, unsigned long *map);
260 struct kvm_memory_slot *memslot,
[all...]
H A Dkvm_book3s_64.h66 * rmap entry in the memslot. The list is always terminated by a "single entry"
68 * a single entry then this is itself in the rmap entry of the memslot, not a
488 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot, argument
495 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
635 const struct kvm_memory_slot *memslot,
H A Dkvm_ppc.h174 struct kvm_memory_slot *memslot, unsigned long porder);
217 struct kvm_memory_slot *memslot);
278 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
/linux-master/arch/powerpc/kvm/
H A Dbook3s.c851 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
866 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
868 kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
H A Dbook3s.h11 struct kvm_memory_slot *memslot);
H A Dbook3s_64_mmu_hv.c206 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, argument
220 npages = memslot->npages >> (porder - PAGE_SHIFT);
516 struct kvm_memory_slot *memslot; local
579 memslot = gfn_to_memslot(kvm, gfn);
581 trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr);
583 /* No memslot means it's an emulated MMIO region */
584 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
592 if (gfn_base < memslot->base_gfn)
604 hva = gfn_to_hva_memslot(memslot, gf
759 struct kvm_memory_slot *memslot; local
779 kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i, struct kvm_memory_slot *memslot, unsigned long *rmapp, unsigned long gfn) argument
821 kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument
872 kvmppc_core_flush_memslot_hv(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
899 kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument
970 kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument
1115 kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa, struct kvm_memory_slot *memslot, unsigned long *map) argument
1133 kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long *map) argument
1159 struct kvm_memory_slot *memslot; local
1191 struct kvm_memory_slot *memslot; local
1278 struct kvm_memory_slot *memslot = local
[all...]
H A Dbook3s_64_mmu_radix.c425 const struct kvm_memory_slot *memslot,
441 if (!memslot) {
442 memslot = gfn_to_memslot(kvm, gfn);
443 if (!memslot)
456 kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
458 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap)
459 kvmppc_update_dirty_map(memslot, gfn, page_size);
470 * turned off for a memslot while the VM is running. The new memslot
471 * becomes visible to page faults before the memslot commi
423 kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, unsigned int shift, const struct kvm_memory_slot *memslot, u64 lpid) argument
821 kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, unsigned long gpa, struct kvm_memory_slot *memslot, bool writing, bool kvm_ro, pte_t *inserted_pte, unsigned int *levelp) argument
950 struct kvm_memory_slot *memslot; local
1032 kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument
1051 kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument
1079 kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument
1098 kvm_radix_test_clear_dirty(struct kvm *kvm, struct kvm_memory_slot *memslot, int pagenum) argument
1153 kvmppc_hv_get_dirty_log_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long *map) argument
1178 kvmppc_radix_flush_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot) argument
[all...]
H A Dbook3s_64_vio.c359 struct kvm_memory_slot *memslot; local
361 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
362 if (!memslot)
365 *ua = __gfn_to_hva_memslot(memslot, gfn) |
H A Dbook3s_hv.c966 /* Copy guest memory in place - must reside within a single memslot */
5099 struct kvm_memory_slot *memslot; local
5112 memslot = id_to_memslot(slots, log->slot);
5114 if (!memslot || !memslot->dirty_bitmap)
5121 n = kvm_dirty_bitmap_bytes(memslot);
5122 buf = memslot->dirty_bitmap + n / sizeof(long);
5126 r = kvmppc_hv_get_dirty_log_radix(kvm, memslot, buf);
5128 r = kvmppc_hv_get_dirty_log_hpt(kvm, memslot, buf);
5134 * memslot'
5308 struct kvm_memory_slot *memslot; local
6267 struct kvm_memory_slot *memslot; local
[all...]
H A Dbook3s_hv_nested.c797 struct kvm_memory_slot *memslot; local
817 kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
818 kvmhv_free_memslot_nest_rmap(memslot);
1029 const struct kvm_memory_slot *memslot,
1036 if (!memslot)
1038 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn;
1045 unsigned long *rmap = &memslot->arch.rmap[gfn];
1519 struct kvm_memory_slot *memslot; local
1587 /* 1. Get the corresponding host memslot */
1589 memslot
1028 kvmhv_remove_nest_rmap_range(struct kvm *kvm, const struct kvm_memory_slot *memslot, unsigned long gpa, unsigned long hpa, unsigned long nbytes) argument
[all...]
H A Dbook3s_hv_rm_mmu.c95 /* Update the dirty bitmap of a memslot */
96 void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, argument
101 if (!psize || !memslot->dirty_bitmap)
104 gfn -= memslot->base_gfn;
105 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages);
112 struct kvm_memory_slot *memslot; local
118 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
119 if (memslot && memslot->dirty_bitmap)
120 kvmppc_update_dirty_map(memslot, gf
129 struct kvm_memory_slot *memslot; local
155 struct kvm_memory_slot *memslot; local
193 struct kvm_memory_slot *memslot; local
888 struct kvm_memory_slot *memslot; local
928 struct kvm_memory_slot *memslot; local
[all...]
H A Dbook3s_hv_uvmem.c361 static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot, argument
377 * kvmppc_uvmem_slot and memslot.
392 const struct kvm_memory_slot *memslot, bool merge)
394 unsigned long gfn = memslot->base_gfn;
404 end = start + (memslot->npages << PAGE_SHIFT);
431 const struct kvm_memory_slot *memslot)
433 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
434 kvmppc_uvmem_slot_free(kvm, memslot);
435 kvmppc_memslot_page_merge(kvm, memslot, true);
439 const struct kvm_memory_slot *memslot)
391 kvmppc_memslot_page_merge(struct kvm *kvm, const struct kvm_memory_slot *memslot, bool merge) argument
430 __kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *memslot) argument
438 __kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *memslot) argument
468 struct kvm_memory_slot *memslot, *m; local
662 struct kvm_memory_slot *memslot; local
794 kvmppc_uv_migrate_mem_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot) argument
833 struct kvm_memory_slot *memslot; local
[all...]
H A Dbook3s_pr.c1874 struct kvm_memory_slot *memslot; local
1883 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
1889 ga = memslot->base_gfn << PAGE_SHIFT;
1890 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1895 n = kvm_dirty_bitmap_bytes(memslot);
1896 memset(memslot->dirty_bitmap, 0, n);
1906 struct kvm_memory_slot *memslot)
1905 kvmppc_core_flush_memslot_pr(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
H A Dbooke.c1817 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
1846 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
H A Dtrace_hv.h294 struct kvm_memory_slot *memslot, unsigned long ea,
297 TP_ARGS(vcpu, hptep, memslot, ea, dsisr),
317 __entry->base_gfn = memslot ? memslot->base_gfn : -1UL;
318 __entry->slot_flags = memslot ? memslot->flags : 0;
/linux-master/arch/riscv/include/asm/
H A Dkvm_host.h333 struct kvm_memory_slot *memslot,
/linux-master/arch/riscv/kvm/
H A Dmmu.c336 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); local
337 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
338 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
405 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
439 * At this point memslot has been committed and there is an
610 struct kvm_memory_slot *memslot,
621 bool logging = (memslot->dirty_bitmap &&
622 !(memslot->flags & KVM_MEM_READONLY)) ? true : false;
609 kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, gpa_t gpa, unsigned long hva, bool is_write) argument
H A Dvcpu_exit.c16 struct kvm_memory_slot *memslot; local
24 memslot = gfn_to_memslot(vcpu->kvm, gfn);
25 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
43 ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva,
/linux-master/arch/s390/kvm/
H A Dkvm-s390.c669 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
678 cur_gfn = memslot->base_gfn;
679 last_gfn = memslot->base_gfn + memslot->npages;
682 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
710 struct kvm_memory_slot *memslot; local
722 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
728 n = kvm_dirty_bitmap_bytes(memslot);
729 memset(memslot->dirty_bitmap, 0, n);
2289 /* Reached the end of the current memslot, tak
[all...]
/linux-master/arch/x86/include/asm/
H A Dkvm_host.h1298 * memslot, etc... Note, zapping shadow pages on this list doesn't
1468 * is used as one input when determining whether certain memslot
1921 const struct kvm_memory_slot *memslot,
1924 const struct kvm_memory_slot *memslot,
1927 const struct kvm_memory_slot *memslot,
1931 const struct kvm_memory_slot *memslot);
1933 const struct kvm_memory_slot *memslot);

Completed in 379 milliseconds

12