Lines Matching refs:vmf

830 static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
844 vto = kmap_atomic(vmf->cow_page);
845 copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);
869 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
873 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
876 bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma);
898 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
1186 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1190 unsigned long vaddr = vmf->address;
1194 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
1196 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1197 trace_dax_load_hole(inode, vmf, ret);
1202 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1205 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1206 unsigned long pmd_addr = vmf->address & PMD_MASK;
1207 struct vm_area_struct *vma = vmf->vma;
1215 zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm);
1221 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
1230 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1231 if (!pmd_none(*(vmf->pmd))) {
1237 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1240 pmd_entry = mk_pmd(&zero_folio->page, vmf->vma->vm_page_prot);
1242 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1244 trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry);
1250 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry);
1254 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1603 static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
1612 clear_user_highpage(vmf->cow_page, vmf->address);
1615 error = copy_cow_page_dax(vmf, iter);
1626 __SetPageUptodate(vmf->cow_page);
1627 ret = finish_fault(vmf);
1635 * @vmf: vm fault instance
1642 static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
1656 if (!pmd && vmf->cow_page)
1657 return dax_fault_cow_page(vmf, iter);
1663 return dax_load_hole(xas, vmf, iter, entry);
1664 return dax_pmd_load_hole(xas, vmf, iter, entry);
1676 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags);
1684 if (dax_fault_is_synchronous(iter, vmf->vma))
1689 return vmf_insert_pfn_pmd(vmf, pfn, write);
1693 return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1694 return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
1697 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1700 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1701 XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1704 .pos = (loff_t)vmf->pgoff << PAGE_SHIFT,
1712 trace_dax_pte_fault(iter.inode, vmf, ret);
1723 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1738 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1749 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false);
1753 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
1769 trace_dax_pte_fault_done(iter.inode, vmf, ret);
1774 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
1777 unsigned long pmd_addr = vmf->address & PMD_MASK;
1778 bool write = vmf->flags & FAULT_FLAG_WRITE;
1786 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1787 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1791 if (write && !(vmf->vma->vm_flags & VM_SHARED))
1795 if (pmd_addr < vmf->vma->vm_start)
1797 if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
1807 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1810 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1811 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1821 if (vmf->flags & FAULT_FLAG_WRITE)
1831 trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0);
1838 if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
1859 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1860 !pmd_devmap(*vmf->pmd)) {
1870 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
1879 split_huge_pmd(vmf->vma, vmf->pmd, vmf->address);
1883 trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
1887 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1896 * @vmf: The description of the fault
1907 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
1911 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1913 return dax_iomap_pmd_fault(vmf, pfnp, ops);
1921 * @vmf: The description of the fault
1929 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1931 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1932 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1943 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1951 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1954 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
1959 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1965 * @vmf: The description of the fault
1973 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order,
1977 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1980 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1983 return dax_insert_pfn_mkwrite(vmf, pfn, order);