Searched refs:hva (Results 1 - 25 of 61) sorted by path

123

/linux-master/arch/arm64/kvm/
H A Dmmu.c946 hva_t hva = memslot->userspace_addr; local
949 hva_t reg_end = hva + size;
967 vma = find_vma_intersection(current->mm, hva, reg_end);
974 vm_start = max(hva, vma->vm_start);
981 hva = vm_end;
982 } while (hva < reg_end);
1215 unsigned long hva,
1271 return (hva & ~(map_size - 1)) >= uaddr_start &&
1272 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
1276 * Check if the given hva i
1214 fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, unsigned long hva, unsigned long map_size) argument
1284 transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long hva, kvm_pfn_t *pfnp, phys_addr_t *ipap) argument
1315 get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva) argument
1377 user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_memory_slot *memslot, unsigned long hva, bool fault_is_perm) argument
1630 unsigned long hva; local
1994 hva_t hva, reg_end; local
[all...]
/linux-master/arch/loongarch/include/asm/
H A Dkvm_host.h206 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
209 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
/linux-master/arch/loongarch/kvm/
H A Dmmu.c638 unsigned long hva, bool write)
667 return (hva >= ALIGN(start, PMD_SIZE)) && (hva < ALIGN_DOWN(end, PMD_SIZE));
683 * event for the hva. This can be done by explicit checking the MMU notifier
684 * or by ensuring that KVM already has a valid mapping that covers the hva.
699 unsigned long hva; local
714 hva = __gfn_to_hva_memslot(slot, gfn);
730 pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
734 p4d = READ_ONCE(*p4d_offset(&pgd, hva));
738 pud = READ_ONCE(*pud_offset(&p4d, hva));
637 fault_supports_huge_mapping(struct kvm_memory_slot *memslot, unsigned long hva, bool write) argument
804 unsigned long hva, mmu_seq, prot_bits; local
[all...]
/linux-master/arch/powerpc/kvm/
H A Dbook3s_64_mmu_hv.c515 unsigned long gpa, gfn, hva, pfn, hpa; local
604 hva = gfn_to_hva_memslot(memslot, gfn);
612 if (get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
633 ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
656 hpa |= hva & (pte_size - psize);
1163 unsigned long hva, offset; local
1170 hva = gfn_to_hva_memslot(memslot, gfn);
1171 npages = get_user_pages_fast(hva, 1, FOLL_WRITE, pages);
H A Dbook3s_64_mmu_radix.c830 unsigned long hva, gfn = gpa >> PAGE_SHIFT; local
848 hva = gfn_to_hva_memslot(memslot, gfn);
849 if (!kvm_ro && get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
872 ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
893 (hva & (PUD_SIZE - PAGE_SIZE))) {
897 (hva & (PMD_SIZE - PAGE_SIZE))) {
908 pte = __pte(pte_val(pte) | (hva & rpnmask));
H A Dbook3s_hv.c5307 unsigned long hva; local
5344 hva = memslot->userspace_addr;
5346 vma = vma_lookup(kvm->mm, hva);
H A Dbook3s_hv_rm_mmu.c189 unsigned long slot_fn, hva; local
246 hva = __gfn_to_hva_memslot(memslot, gfn);
249 ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &hpage_shift);
273 pa |= hva & (host_pte_size - 1);
889 unsigned long gfn, hva, pa, psize = PAGE_SHIFT; local
900 hva = __gfn_to_hva_memslot(memslot, gfn);
903 ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
914 pa |= hva & (psize - 1);
H A De500_mmu_host.c329 unsigned long hva; local
354 hva = gfn_to_hva_memslot(slot, gfn);
360 vma = find_vma(kvm->mm, hva);
361 if (vma && hva >= vma->vm_start &&
379 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
424 } else if (vma && hva >= vma->vm_start &&
477 ptep = find_linux_pte(pgdir, hva, NULL, NULL);
/linux-master/arch/riscv/include/asm/
H A Dkvm_host.h334 gpa_t gpa, unsigned long hva, bool is_write);
/linux-master/arch/riscv/kvm/
H A Dmmu.c452 hva_t hva, reg_end, size; local
469 hva = new->userspace_addr;
471 reg_end = hva + size;
490 struct vm_area_struct *vma = find_vma(current->mm, hva);
506 vm_start = max(hva, vma->vm_start);
510 gpa_t gpa = base_gpa + (vm_start - hva);
528 hva = vm_end;
529 } while (hva < reg_end);
611 gpa_t gpa, unsigned long hva, bool is_write)
634 vma = vma_lookup(current->mm, hva);
609 kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, gpa_t gpa, unsigned long hva, bool is_write) argument
[all...]
H A Dvcpu_exit.c17 unsigned long hva, fault_addr; local
25 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
27 if (kvm_is_error_hva(hva) ||
43 ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva,
H A Dvcpu_sbi_sta.c35 unsigned long hva; local
47 hva = kvm_vcpu_gfn_to_hva(vcpu, gfn);
49 if (WARN_ON(kvm_is_error_hva(hva))) {
54 sequence_ptr = (__le32 __user *)(hva + offset_in_page(shmem) +
56 steal_ptr = (__le64 __user *)(hva + offset_in_page(shmem) +
88 unsigned long hva; local
114 hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable);
115 if (kvm_is_error_hva(hva) || !writable)
/linux-master/arch/s390/include/asm/
H A Dpgtable.h1305 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1306 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
/linux-master/arch/s390/kvm/
H A Dgaccess.c804 unsigned long hva; local
810 hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
811 if (kvm_is_error_hva(hva))
815 r = get_guest_storage_key(current->mm, hva, &storage_key);
867 unsigned long hva; local
877 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gpa));
879 r = get_guest_storage_key(current->mm, hva, &storage_key);
1002 hva_t hva; local
1007 hva = gfn_to_hva_memslot_prot(slot, gfn, &writable);
1009 if (kvm_is_error_hva(hva))
1179 hva_t hva; local
[all...]
H A Dkvm-s390.c2073 uint64_t hva; local
2094 hva = gfn_to_hva(kvm, args->start_gfn + i);
2095 if (kvm_is_error_hva(hva)) {
2100 r = get_guest_storage_key(current->mm, hva, &keys[i]);
2121 uint64_t hva; local
2153 hva = gfn_to_hva(kvm, args->start_gfn + i);
2154 if (kvm_is_error_hva(hva)) {
2165 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
2167 r = fixup_user_fault(current->mm, hva,
2194 unsigned long pgstev, hva, cur_gf local
2251 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev; local
2368 unsigned long hva, mask, pgstev, i; local
4645 hva_t hva; local
[all...]
H A Dpci.c230 unsigned long hva, bit; local
252 hva = gfn_to_hva(kvm, gpa_to_gfn((gpa_t)fib->fmt0.aibv));
253 npages = pin_user_pages_fast(hva, 1, FOLL_WRITE | FOLL_LONGTERM, pages);
267 hva = gfn_to_hva(kvm, gpa_to_gfn((gpa_t)fib->fmt0.aisb));
268 npages = pin_user_pages_fast(hva, 1, FOLL_WRITE | FOLL_LONGTERM,
H A Dpriv.c1160 unsigned long gfn, hva, res, pgstev, ptev; local
1170 hva = gfn_to_hva(vcpu->kvm, gfn);
1173 if (kvm_is_error_hva(hva))
1176 nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
/linux-master/arch/s390/mm/
H A Dpgtable.c1033 * @hva: the host virtual address of the page whose PGSTE is to be processed
1042 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, argument
1056 vma = vma_lookup(mm, hva);
1059 ptep = get_locked_pte(mm, hva, &ptl);
1141 * @hva: the host virtual address of the page whose PGSTE is to be processed
1148 int set_pgste_bits(struct mm_struct *mm, unsigned long hva, argument
1156 vma = vma_lookup(mm, hva);
1159 ptep = get_locked_pte(mm, hva, &ptl);
1176 * @hva: the host virtual address of the page whose PGSTE is to be processed
1181 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigne argument
[all...]
/linux-master/arch/x86/include/uapi/asm/
H A Dkvm.h574 __u64 hva; member in union:kvm_xen_hvm_attr::__anon11::__anon12
625 __u64 hva; member in union:kvm_xen_vcpu_attr::__anon17
/linux-master/arch/x86/kvm/
H A Dlapic.c2606 void __user *hva; local
2614 hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
2616 if (IS_ERR(hva)) {
2617 ret = PTR_ERR(hva);
/linux-master/arch/x86/kvm/mmu/
H A Dmmu.c3070 * event for the hva. This can be done by explicit checking the MMU notifier
3071 * or by ensuring that KVM already has a valid mapping that covers the hva.
3086 unsigned long hva; local
3101 hva = __gfn_to_hva_memslot(slot, gfn);
3117 pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
3121 p4d = READ_ONCE(*p4d_offset(&pgd, hva));
3125 pud = READ_ONCE(*pud_offset(&p4d, hva));
3134 pmd = READ_ONCE(*pmd_offset(&pud, hva));
3283 unsigned long hva = gfn_to_hva_memslot(slot, gfn); local
3285 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, PAGE_SHIF
[all...]
H A Dmmu_internal.h241 hva_t hva; member in struct:kvm_page_fault
/linux-master/arch/x86/kvm/svm/
H A Dnested.c874 vmcb12 = map.hva;
983 vmcb12 = map.hva;
H A Dsev.c2887 svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
H A Dsvm.c2270 vmcb12 = map.hva;
4642 svm_copy_vmrun_state(map_save.hva + 0x400,
4686 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
4694 vmcb12 = map.hva;

Completed in 325 milliseconds

123