Searched refs:hva (Results 1 - 25 of 61) sorted by last modified time

123

/linux-master/virt/kvm/
H A Dkvm_main.c829 * any given time, and the caches themselves can check for hva overlap,
1563 * Initialize @new's hva range. Do this even when replacing an @old
1572 * hva_node needs to be swapped with remove+insert even though hva can't
2752 * Return the hva of a @gfn and the R/W attribute if possible.
2756 * @writable: used to return the read/write attribute of the @slot if the hva
2762 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); local
2764 if (!kvm_is_error_hva(hva) && writable)
2767 return hva;
3030 bool write_fault, bool *writable, hva_t *hva)
3034 if (hva)
3028 __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, bool interruptible, bool *async, bool write_fault, bool *writable, hva_t *hva) argument
3152 void *hva = NULL; local
[all...]
H A Dasync_pf.c191 unsigned long hva, struct kvm_arch_async_pf *arch)
199 if (unlikely(kvm_is_error_hva(hva)))
213 work->addr = hva;
190 kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, unsigned long hva, struct kvm_arch_async_pf *arch) argument
/linux-master/tools/testing/selftests/kvm/
H A Dset_memory_region_test.c115 uint64_t *hva; local
134 hva = addr_gpa2hva(vm, MEM_REGION_GPA);
135 memset(hva, 0, 2 * 4096);
184 uint64_t *hva; local
188 hva = addr_gpa2hva(vm, MEM_REGION_GPA);
192 * hva->gpa translation is misaligned, i.e. the guest is accessing a
196 WRITE_ONCE(*hva, 2);
209 WRITE_ONCE(*hva, 1);
/linux-master/arch/x86/kvm/
H A Dx86.c3699 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) {
3704 kvm_is_error_hva(ghc->hva) || !ghc->memslot)
3708 st = (struct kvm_steal_time __user *)ghc->hva;
5099 kvm_is_error_hva(ghc->hva) || !ghc->memslot))
5102 st = (struct kvm_steal_time __user *)ghc->hva;
8017 unsigned long hva; local
8043 hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa));
8044 if (kvm_is_error_hva(hva))
8047 hva += offset_in_page(gpa);
8051 r = emulator_try_cmpxchg_user(u8, hva, ol
12645 unsigned long hva, old_npages; local
[all...]
H A Dlapic.c2606 void __user *hva; local
2614 hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
2616 if (IS_ERR(hva)) {
2617 ret = PTR_ERR(hva);
H A Dxen.c742 void __user * hva = u64_to_user_ptr(data->u.shared_info.hva); local
744 if (!PAGE_ALIGNED(hva) || !access_ok(hva, PAGE_SIZE)) {
746 } else if (!hva) {
751 (unsigned long)hva, PAGE_SIZE);
825 data->u.shared_info.hva = kvm->arch.xen.shinfo_cache.uhva;
827 data->u.shared_info.hva = 0;
884 if (data->u.hva == 0) {
891 data->u.hva, sizeo
[all...]
/linux-master/arch/x86/kvm/vmx/
H A Dvmx.c4118 vapic_page = vmx->nested.virtual_apic_map.hva;
H A Dnested.c630 msr_bitmap_l1 = (unsigned long *)map->hva;
2060 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
3297 (struct pi_desc *)(((void *)map->hva) +
3879 vapic_page = vmx->nested.virtual_apic_map.hva;
/linux-master/arch/x86/kvm/svm/
H A Dsvm.c2270 vmcb12 = map.hva;
4642 svm_copy_vmrun_state(map_save.hva + 0x400,
4686 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
4694 vmcb12 = map.hva;
H A Dsev.c2887 svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
/linux-master/arch/x86/kvm/mmu/
H A Dmmu.c3070 * event for the hva. This can be done by explicit checking the MMU notifier
3071 * or by ensuring that KVM already has a valid mapping that covers the hva.
3086 unsigned long hva; local
3101 hva = __gfn_to_hva_memslot(slot, gfn);
3117 pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
3121 p4d = READ_ONCE(*p4d_offset(&pgd, hva));
3125 pud = READ_ONCE(*pud_offset(&p4d, hva));
3134 pmd = READ_ONCE(*pmd_offset(&pud, hva));
3283 unsigned long hva = gfn_to_hva_memslot(slot, gfn); local
3285 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, PAGE_SHIF
[all...]
/linux-master/tools/arch/x86/include/uapi/asm/
H A Dkvm.h574 __u64 hva; member in union:kvm_xen_hvm_attr::__anon140::__anon141
625 __u64 hva; member in union:kvm_xen_vcpu_attr::__anon146
/linux-master/drivers/vhost/
H A Dvhost.c2163 static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) argument
2177 if (u->addr > hva - 1 + len ||
2178 u->addr - 1 + u->size < hva)
2180 start = max(u->addr, hva);
2181 end = min(u->addr - 1 + u->size, hva - 1 + len);
2196 hva += min;
/linux-master/arch/x86/virt/svm/
H A Dsev.c337 void snp_dump_hva_rmpentry(unsigned long hva) argument
345 pgd += pgd_index(hva);
346 pte = lookup_address_in_pgd(pgd, hva, &level);
349 pr_err("Can't dump RMP entry for HVA %lx: no PTE/PFN found\n", hva);
353 paddr = PFN_PHYS(pte_pfn(*pte)) | (hva & ~page_level_mask(level));
/linux-master/arch/x86/include/uapi/asm/
H A Dkvm.h574 __u64 hva; member in union:kvm_xen_hvm_attr::__anon11::__anon12
625 __u64 hva; member in union:kvm_xen_vcpu_attr::__anon17
/linux-master/arch/arm64/kvm/
H A Dmmu.c946 hva_t hva = memslot->userspace_addr; local
949 hva_t reg_end = hva + size;
967 vma = find_vma_intersection(current->mm, hva, reg_end);
974 vm_start = max(hva, vma->vm_start);
981 hva = vm_end;
982 } while (hva < reg_end);
1215 unsigned long hva,
1271 return (hva & ~(map_size - 1)) >= uaddr_start &&
1272 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
1276 * Check if the given hva i
1214 fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, unsigned long hva, unsigned long map_size) argument
1284 transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long hva, kvm_pfn_t *pfnp, phys_addr_t *ipap) argument
1315 get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva) argument
1377 user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_memory_slot *memslot, unsigned long hva, bool fault_is_perm) argument
1630 unsigned long hva; local
1994 hva_t hva, reg_end; local
[all...]
/linux-master/tools/testing/selftests/kvm/x86_64/
H A Dxen_shinfo_test.c425 .u.shared_info.hva = (unsigned long)shinfo
430 .u.shared_info.hva = 0
520 ha.u.shared_info.hva = (unsigned long)shinfo;
900 .u.hva = (unsigned long)vinfo
H A Dsmaller_maxphyaddr_emulation_test.c54 uint64_t *hva; local
79 hva = addr_gpa2hva(vm, MEM_REGION_GPA);
80 memset(hva, 0, PAGE_SIZE);
H A Dprivate_mem_conversions_test.c351 uint8_t *hva = addr_gpa2hva(vm, gpa + i); local
354 memcmp_h(hva, gpa + i, uc.args[3], nr_bytes);
358 memset(hva, uc.args[4], nr_bytes);
/linux-master/tools/testing/selftests/kvm/lib/
H A Ducall_common.c39 uc->hva = uc;
96 ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
113 ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
135 ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
H A Dkvm_util.c792 * hva - Starting host virtual address
802 * Returns 0 if the bytes starting at hva for a length of len
804 * a value < 0, if bytes at hva are less than those at gva.
807 * Compares the bytes starting at the host virtual address hva, for
811 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) argument
820 uintptr_t ptr1 = (uintptr_t)hva + offset;
911 uint64_t gpa, uint64_t size, void *hva)
918 .userspace_addr = (uintptr_t)hva,
925 uint64_t gpa, uint64_t size, void *hva)
927 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva);
910 __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, uint64_t gpa, uint64_t size, void *hva) argument
924 vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, uint64_t gpa, uint64_t size, void *hva) argument
933 __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, uint64_t gpa, uint64_t size, void *hva, uint32_t guest_memfd, uint64_t guest_memfd_offset) argument
950 vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, uint64_t gpa, uint64_t size, void *hva, uint32_t guest_memfd, uint64_t guest_memfd_offset) argument
1630 addr_hva2gpa(struct kvm_vm *vm, void *hva) argument
[all...]
/linux-master/tools/testing/selftests/kvm/include/
H A Dkvm_util_base.h451 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
564 uint64_t gpa, uint64_t size, void *hva);
566 uint64_t gpa, uint64_t size, void *hva);
568 uint64_t gpa, uint64_t size, void *hva,
571 uint64_t gpa, uint64_t size, void *hva,
610 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
/linux-master/tools/testing/selftests/kvm/aarch64/
H A Dpage_fault_test.c305 void *hva; member in struct:uffd_args
320 TEST_ASSERT_EQ(addr, (uint64_t)args->hva);
356 args->hva = (void *)region->region.userspace_addr;
361 memcpy(args->copy, args->hva, args->paging_size);
376 pt_args.hva,
383 data_args.hva,
410 void *hva = (void *)region->region.userspace_addr; local
419 ret = madvise(hva, paging_size, MADV_DONTNEED);
429 void *hva; local
432 hva
531 void *hva; local
[all...]
/linux-master/include/linux/
H A Dkvm_host.h256 unsigned long hva, struct kvm_arch_async_pf *arch);
298 void *hva; member in struct:kvm_host_map
309 return !!map->hva;
1223 bool write_fault, bool *writable, hva_t *hva);
1355 * @hva: userspace virtual address to map.
1365 int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len);
1791 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) argument
1793 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
1815 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); local
1817 return !kvm_is_error_hva(hva);
[all...]
H A Dkvm_types.h35 * hva - host virtual address
55 unsigned long hva; member in struct:gfn_to_hva_cache

Completed in 426 milliseconds

123