Lines Matching refs:vmf

5273 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
5933 struct vm_fault *vmf)
5935 struct vm_area_struct *vma = vmf->vma;
5937 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
5938 pte_t pte = huge_ptep_get(vmf->pte);
5966 set_huge_ptep_writable(vma, vmf->address, vmf->pte);
5992 set_huge_ptep_writable(vma, vmf->address, vmf->pte);
6019 spin_unlock(vmf->ptl);
6020 new_folio = alloc_hugetlb_folio(vma, vmf->address, outside_reserve);
6045 idx = vma_hugecache_offset(h, vma, vmf->address);
6051 vmf->address);
6055 spin_lock(vmf->ptl);
6056 vmf->pte = hugetlb_walk(vma, vmf->address,
6058 if (likely(vmf->pte &&
6059 pte_same(huge_ptep_get(vmf->pte), pte)))
6077 ret = vmf_anon_prepare(vmf);
6081 if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) {
6087 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address,
6088 vmf->address + huge_page_size(h));
6095 spin_lock(vmf->ptl);
6096 vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h));
6097 if (likely(vmf->pte && pte_same(huge_ptep_get(vmf->pte), pte))) {
6101 huge_ptep_clear_flush(vma, vmf->address, vmf->pte);
6103 hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address);
6106 set_huge_pte_at(mm, vmf->address, vmf->pte, newpte,
6112 spin_unlock(vmf->ptl);
6120 restore_reserve_on_error(h, vma, vmf->address, new_folio);
6125 spin_lock(vmf->ptl); /* Caller expects lock to be held */
6177 static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf,
6188 hugetlb_vma_unlock_read(vmf->vma);
6189 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
6191 return handle_userfault(vmf, reason);
6212 struct vm_fault *vmf)
6214 struct vm_area_struct *vma = vmf->vma;
6223 u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
6242 folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff);
6245 if (vmf->pgoff >= size)
6266 if (!hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte)) {
6271 return hugetlb_handle_userfault(vmf, mapping,
6276 ret = vmf_anon_prepare(vmf);
6281 folio = alloc_hugetlb_folio(vma, vmf->address, 0);
6295 if (hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte))
6301 clear_huge_page(&folio->page, vmf->real_address,
6308 vmf->pgoff);
6317 restore_reserve_on_error(h, vma, vmf->address,
6345 if (!hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte)) {
6349 return hugetlb_handle_userfault(vmf, mapping,
6360 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
6361 if (vma_needs_reservation(h, vma, vmf->address) < 0) {
6366 vma_end_reservation(h, vma, vmf->address);
6369 vmf->ptl = huge_pte_lock(h, mm, vmf->pte);
6372 if (!pte_same(huge_ptep_get(vmf->pte), vmf->orig_pte))
6376 hugetlb_add_new_anon_rmap(folio, vma, vmf->address);
6385 if (unlikely(pte_marker_uffd_wp(vmf->orig_pte)))
6387 set_huge_pte_at(mm, vmf->address, vmf->pte, new_pte, huge_page_size(h));
6390 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
6392 ret = hugetlb_wp(folio, vmf);
6395 spin_unlock(vmf->ptl);
6412 spin_unlock(vmf->ptl);
6415 restore_reserve_on_error(h, vma, vmf->address, folio);
6456 struct vm_fault vmf = {
6477 hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff);
6482 * until finished with vmf.pte. This prevents huge_pmd_unshare from
6483 * being called elsewhere and making the vmf.pte no longer valid.
6486 vmf.pte = huge_pte_alloc(mm, vma, vmf.address, huge_page_size(h));
6487 if (!vmf.pte) {
6493 vmf.orig_pte = huge_ptep_get(vmf.pte);
6494 if (huge_pte_none_mostly(vmf.orig_pte)) {
6495 if (is_pte_marker(vmf.orig_pte)) {
6497 pte_marker_get(pte_to_swp_entry(vmf.orig_pte));
6512 return hugetlb_no_page(mapping, &vmf);
6518 * vmf.orig_pte could be a migration/hwpoison vmf.orig_pte at this
6524 if (!pte_present(vmf.orig_pte)) {
6525 if (unlikely(is_hugetlb_entry_migration(vmf.orig_pte))) {
6534 migration_entry_wait_huge(vma, vmf.pte);
6536 } else if (unlikely(is_hugetlb_entry_hwpoisoned(vmf.orig_pte)))
6550 !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(vmf.orig_pte)) {
6551 if (vma_needs_reservation(h, vma, vmf.address) < 0) {
6556 vma_end_reservation(h, vma, vmf.address);
6559 vmf.pgoff);
6564 vmf.ptl = huge_pte_lock(h, mm, vmf.pte);
6567 if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(vmf.pte))))
6571 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(vmf.pte)) &&
6572 (flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) {
6574 spin_unlock(vmf.ptl);
6581 return handle_userfault(&vmf, VM_UFFD_WP);
6584 vmf.orig_pte = huge_pte_clear_uffd_wp(vmf.orig_pte);
6585 set_huge_pte_at(mm, vmf.address, vmf.pte, vmf.orig_pte,
6591 * hugetlb_wp() requires page locks of pte_page(vmf.orig_pte) and
6595 folio = page_folio(pte_page(vmf.orig_pte));
6605 if (!huge_pte_write(vmf.orig_pte)) {
6606 ret = hugetlb_wp(pagecache_folio, &vmf);
6609 vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte);
6612 vmf.orig_pte = pte_mkyoung(vmf.orig_pte);
6613 if (huge_ptep_set_access_flags(vma, vmf.address, vmf.pte, vmf.orig_pte,
6615 update_mmu_cache(vma, vmf.address, vmf.pte);
6621 spin_unlock(vmf.ptl);