Lines Matching refs:hva

946 	hva_t hva = memslot->userspace_addr;
949 hva_t reg_end = hva + size;
967 vma = find_vma_intersection(current->mm, hva, reg_end);
974 vm_start = max(hva, vma->vm_start);
981 hva = vm_end;
982 } while (hva < reg_end);
1215 unsigned long hva,
1271 return (hva & ~(map_size - 1)) >= uaddr_start &&
1272 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
1276 * Check if the given hva is backed by a transparent huge page (THP) and
1285 unsigned long hva, kvm_pfn_t *pfnp,
1295 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
1296 int sz = get_user_mapping_size(kvm, hva);
1315 static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
1327 pa = (vma->vm_pgoff << PAGE_SHIFT) + (hva - vma->vm_start);
1330 if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) &&
1331 ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start &&
1332 ALIGN(hva, PUD_SIZE) <= vma->vm_end)
1336 if ((hva & (PMD_SIZE - 1)) == (pa & (PMD_SIZE - 1)) &&
1337 ALIGN_DOWN(hva, PMD_SIZE) >= vma->vm_start &&
1338 ALIGN(hva, PMD_SIZE) <= vma->vm_end)
1378 struct kvm_memory_slot *memslot, unsigned long hva,
1426 vma = vma_lookup(current->mm, hva);
1428 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1441 vma_shift = get_vma_page_shift(vma, hva);
1447 if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
1455 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
1494 kvm_send_hwpoison_signal(hva, vma_shift);
1537 hva, &pfn,
1630 unsigned long hva;
1689 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1691 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1719 if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) {
1745 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva,
1994 hva_t hva, reg_end;
2008 hva = new->userspace_addr;
2009 reg_end = hva + (new->npages << PAGE_SHIFT);
2026 vma = find_vma_intersection(current->mm, hva, reg_end);
2042 hva = min(reg_end, vma->vm_end);
2043 } while (hva < reg_end);