Lines Matching refs:memslot

158 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
160 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
337 struct kvm_memory_slot *memslot)
339 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
340 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
355 struct kvm_memory_slot *memslot;
362 kvm_for_each_memslot(memslot, bkt, slots)
363 stage2_flush_memslot(kvm, memslot);
944 struct kvm_memory_slot *memslot)
946 hva_t hva = memslot->userspace_addr;
947 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
948 phys_addr_t size = PAGE_SIZE * memslot->npages;
978 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
995 struct kvm_memory_slot *memslot;
1003 kvm_for_each_memslot(memslot, bkt, slots)
1004 stage2_unmap_memslot(kvm, memslot);
1131 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
1134 if (WARN_ON_ONCE(!memslot))
1137 start = memslot->base_gfn << PAGE_SHIFT;
1138 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1143 kvm_flush_remote_tlbs_memslot(kvm, memslot);
1158 struct kvm_memory_slot *memslot;
1164 memslot = id_to_memslot(slots, slot);
1166 start = memslot->base_gfn << PAGE_SHIFT;
1167 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1214 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
1222 /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */
1226 size = memslot->npages * PAGE_SIZE;
1228 gpa_start = memslot->base_gfn << PAGE_SHIFT;
1230 uaddr_start = memslot->userspace_addr;
1240 * memslot->userspace_addr:
1245 * memslot->base_gfn << PAGE_SHIFT:
1261 * by the memslot. This means we have to prohibit block size mappings
1284 transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
1293 * block map is contained within the memslot.
1295 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
1350 * - preventing VM_SHARED mappings in a memslot with MTE preventing two VMs
1378 struct kvm_memory_slot *memslot, unsigned long hva,
1392 bool logging_active = memslot_is_logging(memslot);
1447 if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
1455 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
1491 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
1536 vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
1588 mark_page_dirty_in_slot(kvm, memslot, gfn);
1629 struct kvm_memory_slot *memslot;
1688 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1689 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1745 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva,
1953 * At this point memslot has been committed and there is an
1981 * creating the memslot (a nop). Doing it for deletes makes