Searched refs:memslot (Results 1 - 25 of 37) sorted by relevance

12

/linux-master/arch/powerpc/kvm/
H A Dbook3s_64_mmu_hv.c206 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, argument
220 npages = memslot->npages >> (porder - PAGE_SHIFT);
516 struct kvm_memory_slot *memslot; local
579 memslot = gfn_to_memslot(kvm, gfn);
581 trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr);
583 /* No memslot means it's an emulated MMIO region */
584 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
592 if (gfn_base < memslot->base_gfn)
604 hva = gfn_to_hva_memslot(memslot, gf
759 struct kvm_memory_slot *memslot; local
779 kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i, struct kvm_memory_slot *memslot, unsigned long *rmapp, unsigned long gfn) argument
821 kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument
872 kvmppc_core_flush_memslot_hv(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
899 kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument
970 kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument
1115 kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa, struct kvm_memory_slot *memslot, unsigned long *map) argument
1133 kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long *map) argument
1159 struct kvm_memory_slot *memslot; local
1191 struct kvm_memory_slot *memslot; local
1278 struct kvm_memory_slot *memslot = local
[all...]
H A Dbook3s_hv_uvmem.c361 static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot, argument
377 * kvmppc_uvmem_slot and memslot.
392 const struct kvm_memory_slot *memslot, bool merge)
394 unsigned long gfn = memslot->base_gfn;
404 end = start + (memslot->npages << PAGE_SHIFT);
431 const struct kvm_memory_slot *memslot)
433 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
434 kvmppc_uvmem_slot_free(kvm, memslot);
435 kvmppc_memslot_page_merge(kvm, memslot, true);
439 const struct kvm_memory_slot *memslot)
391 kvmppc_memslot_page_merge(struct kvm *kvm, const struct kvm_memory_slot *memslot, bool merge) argument
430 __kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *memslot) argument
438 __kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *memslot) argument
468 struct kvm_memory_slot *memslot, *m; local
662 struct kvm_memory_slot *memslot; local
794 kvmppc_uv_migrate_mem_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot) argument
833 struct kvm_memory_slot *memslot; local
[all...]
H A Dbook3s_hv_rm_mmu.c95 /* Update the dirty bitmap of a memslot */
96 void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, argument
101 if (!psize || !memslot->dirty_bitmap)
104 gfn -= memslot->base_gfn;
105 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages);
112 struct kvm_memory_slot *memslot; local
118 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
119 if (memslot && memslot->dirty_bitmap)
120 kvmppc_update_dirty_map(memslot, gf
129 struct kvm_memory_slot *memslot; local
155 struct kvm_memory_slot *memslot; local
193 struct kvm_memory_slot *memslot; local
888 struct kvm_memory_slot *memslot; local
928 struct kvm_memory_slot *memslot; local
[all...]
H A Dbook3s_64_mmu_radix.c425 const struct kvm_memory_slot *memslot,
441 if (!memslot) {
442 memslot = gfn_to_memslot(kvm, gfn);
443 if (!memslot)
456 kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
458 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap)
459 kvmppc_update_dirty_map(memslot, gfn, page_size);
470 * turned off for a memslot while the VM is running. The new memslot
471 * becomes visible to page faults before the memslot commi
423 kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, unsigned int shift, const struct kvm_memory_slot *memslot, u64 lpid) argument
821 kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, unsigned long gpa, struct kvm_memory_slot *memslot, bool writing, bool kvm_ro, pte_t *inserted_pte, unsigned int *levelp) argument
950 struct kvm_memory_slot *memslot; local
1032 kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument
1051 kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument
1079 kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) argument
1098 kvm_radix_test_clear_dirty(struct kvm *kvm, struct kvm_memory_slot *memslot, int pagenum) argument
1153 kvmppc_hv_get_dirty_log_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long *map) argument
1178 kvmppc_radix_flush_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot) argument
[all...]
H A Dbook3s.h11 struct kvm_memory_slot *memslot);
H A Dtrace_hv.h294 struct kvm_memory_slot *memslot, unsigned long ea,
297 TP_ARGS(vcpu, hptep, memslot, ea, dsisr),
317 __entry->base_gfn = memslot ? memslot->base_gfn : -1UL;
318 __entry->slot_flags = memslot ? memslot->flags : 0;
H A Dbook3s_hv_nested.c797 struct kvm_memory_slot *memslot; local
817 kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
818 kvmhv_free_memslot_nest_rmap(memslot);
1029 const struct kvm_memory_slot *memslot,
1036 if (!memslot)
1038 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn;
1045 unsigned long *rmap = &memslot->arch.rmap[gfn];
1519 struct kvm_memory_slot *memslot; local
1587 /* 1. Get the corresponding host memslot */
1589 memslot
1028 kvmhv_remove_nest_rmap_range(struct kvm *kvm, const struct kvm_memory_slot *memslot, unsigned long gpa, unsigned long hpa, unsigned long nbytes) argument
[all...]
H A Dbook3s_hv.c966 /* Copy guest memory in place - must reside within a single memslot */
5099 struct kvm_memory_slot *memslot; local
5112 memslot = id_to_memslot(slots, log->slot);
5114 if (!memslot || !memslot->dirty_bitmap)
5121 n = kvm_dirty_bitmap_bytes(memslot);
5122 buf = memslot->dirty_bitmap + n / sizeof(long);
5126 r = kvmppc_hv_get_dirty_log_radix(kvm, memslot, buf);
5128 r = kvmppc_hv_get_dirty_log_hpt(kvm, memslot, buf);
5134 * memslot'
5308 struct kvm_memory_slot *memslot; local
6267 struct kvm_memory_slot *memslot; local
[all...]
H A Dbook3s_64_vio.c359 struct kvm_memory_slot *memslot; local
361 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
362 if (!memslot)
365 *ua = __gfn_to_hva_memslot(memslot, gfn) |
/linux-master/include/linux/
H A Dkvm_types.h57 struct kvm_memory_slot *memslot; member in struct:gfn_to_hva_cache
64 struct kvm_memory_slot *memslot; member in struct:gfn_to_pfn_cache
H A Dkvm_host.h60 * Bit 63 of the memslot generation number is an "update in-progress flag",
63 * mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
74 * memslot update is in-progress, and to prevent cache hits *after* updating
393 * The most recently used memslot by this vCPU and the slots generation
396 * thousands of years, even assuming 1M memslot operations per second.
569 * Since at idle each memslot belongs to two memslot sets it has to contain
572 * Two memslot sets (one active and one inactive) are necessary so the VM
573 * continues to run on one memslot set while the other is being modified.
575 * These two memslot set
615 kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) argument
620 kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot) argument
2288 kvm_is_visible_memslot(struct kvm_memory_slot *memslot) argument
[all...]
/linux-master/virt/kvm/
H A Ddirty_ring.c55 struct kvm_memory_slot *memslot; local
64 memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
66 if (!memslot || (offset + __fls(mask)) >= memslot->npages)
70 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask);
H A Dkvm_main.c381 const struct kvm_memory_slot *memslot)
384 * All current use cases for flushing the TLBs for a specific memslot
386 * mmu_lock. The interaction between the various operations on memslot
388 * operation is observed by any other operation on the same memslot.
391 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
563 * least one memslot was found, i.e. if the handler found guest memory.
588 /* Iterate over each memslot intersecting [start, last] (inclusive) range */
714 * unmap the memslot instead of skipping the memslot t
380 kvm_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot) argument
1018 kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) argument
1043 struct kvm_memory_slot *memslot; local
1451 kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) argument
2171 kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, int *is_dirty, struct kvm_memory_slot **memslot) argument
2237 struct kvm_memory_slot *memslot; local
2348 struct kvm_memory_slot *memslot; local
2669 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); local
2677 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); local
3412 __kvm_write_guest_page(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn, const void *data, int offset, int len) argument
3633 mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn) argument
3660 struct kvm_memory_slot *memslot; local
3669 struct kvm_memory_slot *memslot; local
[all...]
H A Dpfncache.c68 * If the page was cached from a memslot, make sure the memslots have
274 gpc->memslot = NULL;
288 gpc->memslot = __gfn_to_memslot(slots, gfn);
289 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
297 * Even if the GPA and/or the memslot generation changed, the
330 * Some/all of the uhva, gpa, and memslot generation info may still be
436 * memslot generation. The PFN lookup needs to be redone every
/linux-master/arch/arm64/kvm/
H A Dmmu.c158 static bool memslot_is_logging(struct kvm_memory_slot *memslot) argument
160 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
337 struct kvm_memory_slot *memslot)
339 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
340 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
355 struct kvm_memory_slot *memslot; local
362 kvm_for_each_memslot(memslot, bkt, slots)
363 stage2_flush_memslot(kvm, memslot);
944 struct kvm_memory_slot *memslot)
336 stage2_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
943 stage2_unmap_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
995 struct kvm_memory_slot *memslot; local
1131 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); local
1158 struct kvm_memory_slot *memslot; local
1214 fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, unsigned long hva, unsigned long map_size) argument
1284 transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long hva, kvm_pfn_t *pfnp, phys_addr_t *ipap) argument
1377 user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_memory_slot *memslot, unsigned long hva, bool fault_is_perm) argument
1629 struct kvm_memory_slot *memslot; local
[all...]
/linux-master/arch/riscv/kvm/
H A Dvcpu_exit.c16 struct kvm_memory_slot *memslot; local
24 memslot = gfn_to_memslot(vcpu->kvm, gfn);
25 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
43 ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva,
H A Dmmu.c336 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); local
337 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
338 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
405 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
439 * At this point memslot has been committed and there is an
610 struct kvm_memory_slot *memslot,
621 bool logging = (memslot->dirty_bitmap &&
622 !(memslot->flags & KVM_MEM_READONLY)) ? true : false;
609 kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, gpa_t gpa, unsigned long hva, bool is_write) argument
/linux-master/arch/x86/include/asm/uv/
H A Duv_geo.h57 char memslot; /* The memory slot on the bus */ member in struct:geo_mem_s
/linux-master/arch/loongarch/kvm/
H A Dmmu.c410 * memslot->userspace_addr:
415 * memslot->base_gfn << PAGE_SIZE:
637 static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot, argument
643 if (kvm_slot_dirty_track_enabled(memslot) && write)
646 if (kvm_hugepage_capable(memslot))
649 if (kvm_hugepage_incapable(memslot))
652 start = memslot->userspace_addr;
653 end = start + memslot->npages * PAGE_SIZE;
657 * by the memslot. This means we have to prohibit block size mappings
707 * Note, using the already-retrieved memslot an
809 struct kvm_memory_slot *memslot; local
948 kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) argument
952 kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot) argument
[all...]
/linux-master/arch/powerpc/include/asm/
H A Dkvm_book3s.h198 const struct kvm_memory_slot *memslot,
205 struct kvm_memory_slot *memslot,
214 extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
216 extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
218 extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
221 struct kvm_memory_slot *memslot, unsigned long *map);
223 const struct kvm_memory_slot *memslot);
241 extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
258 struct kvm_memory_slot *memslot, unsigned long *map);
260 struct kvm_memory_slot *memslot,
[all...]
H A Dkvm_book3s_64.h66 * rmap entry in the memslot. The list is always terminated by a "single entry"
68 * a single entry then this is itself in the rmap entry of the memslot, not a
488 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot, argument
495 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
635 const struct kvm_memory_slot *memslot,
/linux-master/arch/loongarch/include/asm/
H A Dkvm_host.h239 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot);
/linux-master/tools/testing/selftests/kvm/lib/
H A Dkvm_util.c430 * read-only memslots as MMIO, and creating a read-only memslot for the
1087 * Install a unique fd for each memslot so that the fd
1153 * memslot - KVM memory slot ID
1159 * using kvm memory slot ID given by memslot. TEST_ASSERT failure
1160 * on error (e.g. currently no memory region using memslot as a KVM
1164 memslot2region(struct kvm_vm *vm, uint32_t memslot) argument
1169 memslot)
1170 if (region->region.slot == memslot)
1174 " requested slot: %u\n", memslot);
2053 * memslot
2066 __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t paddr_min, uint32_t memslot, bool protected) argument
2112 vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot) argument
[all...]
/linux-master/tools/testing/selftests/kvm/include/
H A Dkvm_util_base.h130 * memslot.
164 memslot2region(struct kvm_vm *vm, uint32_t memslot);
860 uint32_t memslot);
862 vm_paddr_t paddr_min, uint32_t memslot,
867 vm_paddr_t paddr_min, uint32_t memslot)
874 return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
1055 * memslot - Memory region slot for new virtual translation tables
866 vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t paddr_min, uint32_t memslot) argument
/linux-master/arch/riscv/include/asm/
H A Dkvm_host.h333 struct kvm_memory_slot *memslot,

Completed in 605 milliseconds

12