Lines Matching refs:vm

181 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
183 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL))
184 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size);
186 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size);
187 vm->dirty_ring_size = ring_size;
190 static void vm_open(struct kvm_vm *vm)
192 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR);
196 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type);
197 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd));
250 * Initializes vm->vpages_valid to match the canonical VA space of the
258 __weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
260 sparsebit_set_num(vm->vpages_valid,
261 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
262 sparsebit_set_num(vm->vpages_valid,
263 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
264 (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
269 struct kvm_vm *vm;
271 vm = calloc(1, sizeof(*vm));
272 TEST_ASSERT(vm != NULL, "Insufficient Memory");
274 INIT_LIST_HEAD(&vm->vcpus);
275 vm->regions.gpa_tree = RB_ROOT;
276 vm->regions.hva_tree = RB_ROOT;
277 hash_init(vm->regions.slot_hash);
279 vm->mode = shape.mode;
280 vm->type = shape.type;
282 vm->pa_bits = vm_guest_mode_params[vm->mode].pa_bits;
283 vm->va_bits = vm_guest_mode_params[vm->mode].va_bits;
284 vm->page_size = vm_guest_mode_params[vm->mode].page_size;
285 vm->page_shift = vm_guest_mode_params[vm->mode].page_shift;
288 switch (vm->mode) {
290 vm->pgtable_levels = 4;
293 vm->pgtable_levels = 3;
296 vm->pgtable_levels = 4;
299 vm->pgtable_levels = 3;
303 vm->pgtable_levels = 4;
307 vm->pgtable_levels = 3;
313 vm->pgtable_levels = 4;
316 vm->pgtable_levels = 3;
320 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
321 kvm_init_vm_address_properties(vm);
323 * Ignore KVM support for 5-level paging (vm->va_bits == 57),
327 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
329 vm->va_bits);
331 vm->pa_bits);
332 vm->pgtable_levels = 4;
333 vm->va_bits = 48;
339 vm->pgtable_levels = 5;
342 vm->pgtable_levels = 5;
345 TEST_FAIL("Unknown guest mode: 0x%x", vm->mode);
349 TEST_ASSERT(!vm->type, "ARM doesn't support test-provided types");
350 if (vm->pa_bits != 40)
351 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
354 vm_open(vm);
357 vm->vpages_valid = sparsebit_alloc();
358 vm_vaddr_populate_bitmap(vm);
361 vm->max_gfn = vm_compute_max_gfn(vm);
364 vm->vpages_mapped = sparsebit_alloc();
366 return vm;
414 struct kvm_vm *vm;
420 vm = ____vm_create(shape);
422 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0);
424 vm->memslots[i] = 0;
426 kvm_vm_elf_load(vm, program_invocation_name);
434 slot0 = memslot2region(vm, 0);
435 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
439 sync_global_to_guest(vm, guest_rng);
441 kvm_arch_vm_post_create(vm);
443 return vm;
469 struct kvm_vm *vm;
474 vm = __vm_create(shape, nr_vcpus, extra_mem_pages);
477 vcpus[i] = vm_vcpu_add(vm, i, guest_code);
479 return vm;
488 struct kvm_vm *vm;
490 vm = __vm_create_with_vcpus(shape, 1, extra_mem_pages, guest_code, vcpus);
493 return vm;
500 * vm - VM that has been released before
531 __weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm,
534 return __vm_vcpu_add(vm, vcpu_id);
537 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm)
539 kvm_vm_restart(vm);
541 return vm_vcpu_recreate(vm, 0);
618 * vm - Virtual Machine
634 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
638 for (node = vm->regions.gpa_tree.rb_node; node; ) {
673 static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
678 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
710 static void __vm_mem_region_delete(struct kvm_vm *vm,
717 rb_erase(&region->gpa_node, &vm->regions.gpa_tree);
718 rb_erase(&region->hva_node, &vm->regions.hva_tree);
723 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
798 * vm - Virtual Machine
816 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
831 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
838 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
839 amt = vm->page_size - (ptr1 % vm->page_size);
840 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
841 amt = vm->page_size - (ptr2 % vm->page_size);
843 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
844 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
915 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
926 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region);
929 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
932 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva);
942 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
958 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, &region);
961 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
965 int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva,
974 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
981 size_t mem_size = npages * vm->page_size;
986 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
988 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
990 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
992 " guest_paddr: 0x%lx vm->page_size: 0x%x",
993 guest_paddr, vm->page_size);
994 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
995 <= vm->max_gfn, "Physical range beyond maximum "
998 " vm->max_gfn: 0x%lx vm->page_size: 0x%x",
999 guest_paddr, npages, vm->max_gfn, vm->page_size);
1006 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
1013 guest_paddr, npages, vm->page_size,
1018 hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
1097 guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags);
1116 if (vm_arch_has_protected_memory(vm))
1119 guest_paddr >> vm->page_shift, npages);
1123 region->region.memory_size = npages * vm->page_size;
1125 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
1135 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region);
1136 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region);
1137 hash_add(vm->regions.slot_hash, &region->slot_node, slot);
1153 void vm_userspace_mem_region_add(struct kvm_vm *vm,
1158 vm_mem_add(vm, src_type, guest_paddr, slot, npages, flags, -1, 0);
1165 * vm - Virtual Machine
1177 memslot2region(struct kvm_vm *vm, uint32_t memslot)
1181 hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
1188 fputs("---- vm dump ----\n", stderr);
1189 vm_dump(stderr, vm, 2);
1198 * vm - Virtual Machine
1208 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
1213 region = memslot2region(vm, slot);
1217 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
1228 * vm - Virtual Machine
1238 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
1243 region = memslot2region(vm, slot);
1247 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region);
1258 * vm - Virtual Machine
1267 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
1269 __vm_mem_region_delete(vm, memslot2region(vm, slot), true);
1272 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size,
1285 region = userspace_mem_region_find(vm, gpa, gpa);
1316 static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
1320 list_for_each_entry(vcpu, &vm->vcpus, list) {
1329 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
1332 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
1337 TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists", vcpu_id);
1343 vcpu->vm = vm;
1345 vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id);
1346 TEST_ASSERT_VM_VCPU_IOCTL(vcpu->fd >= 0, KVM_CREATE_VCPU, vcpu->fd, vm);
1357 list_add(&vcpu->list, &vm->vcpus);
1366 * vm - Virtual Machine
1377 * Within the VM specified by vm, locates the lowest starting virtual
1382 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
1385 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
1388 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
1389 if ((pgidx_start * vm->page_size) < vaddr_min)
1393 if (!sparsebit_is_set_num(vm->vpages_valid,
1395 pgidx_start = sparsebit_next_set_num(vm->vpages_valid,
1404 if (sparsebit_is_clear_num(vm->vpages_mapped,
1407 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped,
1416 if (!sparsebit_is_set_num(vm->vpages_valid,
1419 vm->vpages_valid, pgidx_start, pages);
1432 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid,
1438 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped,
1445 return pgidx_start * vm->page_size;
1448 static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
1453 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
1455 virt_pgd_alloc(vm);
1456 vm_paddr_t paddr = __vm_phy_pages_alloc(vm, pages,
1457 KVM_UTIL_MIN_PFN * vm->page_size,
1458 vm->memslots[type], protected);
1464 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
1468 pages--, vaddr += vm->page_size, paddr += vm->page_size) {
1470 virt_pg_map(vm, vaddr, paddr);
1472 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
1478 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
1481 return ____vm_vaddr_alloc(vm, sz, vaddr_min, type,
1482 vm_arch_has_protected_memory(vm));
1485 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
1489 return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, false);
1496 * vm - Virtual Machine
1505 * Allocates at least sz bytes within the virtual address space of the vm
1506 * given by vm. The allocated bytes are mapped to a virtual address >=
1511 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
1513 return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA);
1520 * vm - Virtual Machine
1528 * space of the vm.
1530 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
1532 return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
1535 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type)
1537 return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type);
1544 * vm - Virtual Machine
1552 * space of the vm.
1554 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm)
1556 return vm_vaddr_alloc_pages(vm, 1);
1563 * vm - Virtual Machine
1572 * Within the VM given by @vm, creates a virtual translation for
1575 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
1578 size_t page_size = vm->page_size;
1585 virt_pg_map(vm, vaddr, paddr);
1586 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
1597 * vm - Virtual Machine
1606 * by gpa, within the VM given by vm. When found, the host virtual
1607 * address providing the memory to the vm physical address is returned.
1610 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
1614 gpa = vm_untag_gpa(vm, gpa);
1616 region = userspace_mem_region_find(vm, gpa, gpa);
1618 TEST_FAIL("No vm physical memory at 0x%lx", gpa);
1630 * vm - Virtual Machine
1639 * by hva, within the VM given by vm. When found, the equivalent
1643 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
1647 for (node = vm->regions.hva_tree.rb_node; node; ) {
1671 * vm - Virtual Machine
1686 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
1691 region = userspace_mem_region_find(vm, gpa, gpa);
1703 void vm_create_irqchip(struct kvm_vm *vm)
1705 vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL);
1707 vm->has_irqchip = true;
1769 uint32_t size = vcpu->vm->dirty_ring_size;
1810 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type)
1817 return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
1820 int __kvm_create_device(struct kvm_vm *vm, uint64_t type)
1829 err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
1862 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
1869 return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
1872 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
1874 int ret = _kvm_irq_line(vm, irq, level);
1910 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
1915 ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing);
1921 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
1925 ret = _kvm_gsi_routing_write(vm, routing);
1933 * vm - Virtual Machine
1941 * Dumps the current state of the VM given by vm, to the FILE stream
1944 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1950 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
1951 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
1952 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
1954 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
1968 sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
1970 vm->pgd_created);
1971 if (vm->pgd_created) {
1974 virt_dump(stream, vm, indent + 4);
1978 list_for_each_entry(vcpu, &vm->vcpus, list)
2063 * vm - Virtual Machine
2074 * Within the VM specified by vm, locates a range of available physical
2079 vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
2088 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
2091 paddr_min, vm->page_size);
2093 region = memslot2region(vm, memslot);
2097 base = pg = paddr_min >> vm->page_shift;
2110 paddr_min, vm->page_size, memslot);
2111 fputs("---- vm dump ----\n", stderr);
2112 vm_dump(stderr, vm, 2);
2122 return base * vm->page_size;
2125 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
2128 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
2131 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
2133 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR,
2134 vm->memslots[MEM_REGION_PT]);
2141 * vm - Virtual Machine
2149 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
2151 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
2154 unsigned long __weak vm_compute_max_gfn(struct kvm_vm *vm)
2156 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
2271 * vm - the VM for which the stat should be read
2280 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
2287 if (!vm->stats_fd) {
2288 vm->stats_fd = vm_get_stats_fd(vm);
2289 read_stats_header(vm->stats_fd, &vm->stats_header);
2290 vm->stats_desc = read_stats_descriptors(vm->stats_fd,
2291 &vm->stats_header);
2294 size_desc = get_stats_descriptor_size(&vm->stats_header);
2296 for (i = 0; i < vm->stats_header.num_desc; ++i) {
2297 desc = (void *)vm->stats_desc + (i * size_desc);
2302 read_stat_data(vm->stats_fd, &vm->stats_header, desc,
2309 __weak void kvm_arch_vm_post_create(struct kvm_vm *vm)
2327 bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr)
2332 if (!vm_arch_has_protected_memory(vm))
2335 region = userspace_mem_region_find(vm, paddr, paddr);
2336 TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr);
2338 pg = paddr >> vm->page_shift;