Lines Matching refs:vm

29 static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
31 return (v + vm->page_size) & ~(vm->page_size - 1);
34 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
40 static uint64_t ptrs_per_pte(struct kvm_vm *vm)
59 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
63 TEST_ASSERT(level < vm->pgtable_levels,
69 void virt_arch_pgd_alloc(struct kvm_vm *vm)
71 size_t nr_pages = page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size;
73 if (vm->pgd_created)
76 vm->pgd = vm_phy_pages_alloc(vm, nr_pages,
78 vm->memslots[MEM_REGION_PT]);
79 vm->pgd_created = true;
82 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
85 int level = vm->pgtable_levels - 1;
87 TEST_ASSERT((vaddr % vm->page_size) == 0,
89 " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
90 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
91 (vaddr >> vm->page_shift)),
93 TEST_ASSERT((paddr % vm->page_size) == 0,
95 " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
96 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
98 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
99 paddr, vm->max_gfn, vm->page_size);
101 ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, vaddr, level) * 8;
103 next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT;
110 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) +
111 pte_index(vm, vaddr, level) * 8;
113 next_ppn = vm_alloc_page_table(vm) >>
126 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
129 int level = vm->pgtable_levels - 1;
131 if (!vm->pgd_created)
134 ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, gva, level) * 8;
140 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) +
141 pte_index(vm, gva, level) * 8;
147 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
150 TEST_FAIL("No mapping for vm virtual address gva: 0x%lx level: %d",
155 static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent,
165 for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
166 ptep = addr_gpa2hva(vm, pte);
171 pte_dump(stream, vm, indent + 1,
172 pte_addr(vm, *ptep), level - 1);
177 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
179 int level = vm->pgtable_levels - 1;
182 if (!vm->pgd_created)
185 for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pte(vm) * 8; pgd += 8) {
186 ptep = addr_gpa2hva(vm, pgd);
191 pte_dump(stream, vm, indent + 1,
192 pte_addr(vm, *ptep), level - 1);
198 struct kvm_vm *vm = vcpu->vm;
205 switch (vm->mode) {
211 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
214 satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN;
298 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
307 stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
308 vm->page_size;
309 stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
313 vcpu = __vm_vcpu_add(vm, vcpu_id);
434 void vm_init_vector_tables(struct kvm_vm *vm)
436 vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
437 vm->page_size, MEM_REGION_DATA);
439 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
442 void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler)
444 struct handlers *handlers = addr_gva2hva(vm, vm->handlers);
450 void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handler)
452 struct handlers *handlers = addr_gva2hva(vm, vm->handlers);