Lines Matching refs:vmf

918 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
921 struct vm_area_struct *vma = vmf->vma;
924 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
945 clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
953 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
954 if (unlikely(!pmd_none(*vmf->pmd))) {
965 spin_unlock(vmf->ptl);
968 ret = handle_userfault(vmf, VM_UFFD_MISSING);
977 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
978 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
979 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
982 spin_unlock(vmf->ptl);
990 spin_unlock(vmf->ptl);
1049 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
1051 struct vm_area_struct *vma = vmf->vma;
1054 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1059 ret = vmf_anon_prepare(vmf);
1064 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
1080 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1082 if (pmd_none(*vmf->pmd)) {
1085 spin_unlock(vmf->ptl);
1088 spin_unlock(vmf->ptl);
1090 ret = handle_userfault(vmf, VM_UFFD_MISSING);
1094 haddr, vmf->pmd, zero_folio);
1095 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1096 spin_unlock(vmf->ptl);
1099 spin_unlock(vmf->ptl);
1111 return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
1163 * @vmf: Structure describing the fault
1171 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
1173 unsigned long addr = vmf->address & PMD_MASK;
1174 struct vm_area_struct *vma = vmf->vma;
1200 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
1252 * @vmf: Structure describing the fault
1260 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
1262 unsigned long addr = vmf->address & PUD_MASK;
1263 struct vm_area_struct *vma = vmf->vma;
1282 insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
1499 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1501 bool write = vmf->flags & FAULT_FLAG_WRITE;
1503 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1504 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1507 touch_pud(vmf->vma, vmf->address, vmf->pud, write);
1509 spin_unlock(vmf->ptl);
1513 void huge_pmd_set_accessed(struct vm_fault *vmf)
1515 bool write = vmf->flags & FAULT_FLAG_WRITE;
1517 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1518 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
1521 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
1524 spin_unlock(vmf->ptl);
1527 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
1529 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
1530 struct vm_area_struct *vma = vmf->vma;
1533 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1534 pmd_t orig_pmd = vmf->orig_pmd;
1536 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1542 spin_lock(vmf->ptl);
1544 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1545 spin_unlock(vmf->ptl);
1559 spin_unlock(vmf->ptl);
1561 spin_lock(vmf->ptl);
1562 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1563 spin_unlock(vmf->ptl);
1595 spin_unlock(vmf->ptl);
1600 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1601 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1602 spin_unlock(vmf->ptl);
1608 spin_unlock(vmf->ptl);
1610 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1645 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
1647 struct vm_area_struct *vma = vmf->vma;
1648 pmd_t oldpmd = vmf->orig_pmd;
1651 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1657 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1658 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1659 spin_unlock(vmf->ptl);
1671 can_change_pmd_writable(vma, vmf->address, pmd))
1689 target_nid = numa_migrate_prep(folio, vmf, haddr, nid, &flags);
1695 spin_unlock(vmf->ptl);
1704 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1705 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1706 spin_unlock(vmf->ptl);
1724 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1725 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1726 spin_unlock(vmf->ptl);