Lines Matching defs:flags

104  * @flags: gup flags: these are the FOLL_* flag values.
106 * "grab" names in this file mean, "look at flags to decide whether to use
126 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
130 if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
133 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
136 if (flags & FOLL_GET)
157 if (unlikely((flags & FOLL_LONGTERM) &&
189 static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
191 if (flags & FOLL_PIN) {
208 * @flags: gup flags: these are the FOLL_* flag values.
210 * This might not do anything at all, depending on the flags argument.
212 * "grab" names in this file mean, "look at flags to decide whether to use
225 int __must_check try_grab_page(struct page *page, unsigned int flags)
232 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
235 if (flags & FOLL_GET)
237 else if (flags & FOLL_PIN) {
537 unsigned long addr, unsigned long end, unsigned int flags,
552 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
561 folio = try_grab_folio(page, refs, flags);
566 gup_put_folio(folio, refs, flags);
570 if (!pte_write(pte) && gup_must_unshare(vma, flags, &folio->page)) {
571 gup_put_folio(folio, refs, flags);
590 unsigned long end, unsigned int flags,
601 ret = gup_hugepte(vma, ptep, sz, addr, end, flags, pages, nr);
611 unsigned int flags,
628 flags, &page, &nr);
644 unsigned long end, unsigned int flags,
652 unsigned int flags,
661 unsigned int flags, unsigned long address)
663 if (!(flags & FOLL_DUMP))
688 int flags, struct follow_page_context *ctx)
698 if ((flags & FOLL_WRITE) && !pud_write(pud))
715 if (!(flags & (FOLL_GET | FOLL_PIN)))
718 if (flags & FOLL_TOUCH)
719 touch_pud(vma, addr, pudp, flags & FOLL_WRITE);
729 gup_must_unshare(vma, flags, page))
732 ret = try_grab_page(page, flags);
744 unsigned int flags)
751 if (!(flags & FOLL_FORCE))
781 unsigned int flags,
792 if ((flags & FOLL_WRITE) &&
793 !can_follow_write_pmd(pmdval, page, vma, flags))
797 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(pmdval))
800 if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
803 if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page))
806 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
809 ret = try_grab_page(page, flags);
814 if (pmd_trans_huge(pmdval) && (flags & FOLL_TOUCH))
815 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
827 int flags, struct follow_page_context *ctx)
834 unsigned int flags,
842 pte_t *pte, unsigned int flags)
844 if (flags & FOLL_TOUCH) {
848 if (flags & FOLL_WRITE)
865 unsigned int flags)
872 if (!(flags & FOLL_FORCE))
901 unsigned long address, pmd_t *pmd, unsigned int flags,
911 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
917 return no_page_table(vma, flags, address);
921 if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags))
930 if ((flags & FOLL_WRITE) &&
931 !can_follow_write_pte(pte, page, vma, flags)) {
936 if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
948 if (flags & FOLL_DUMP) {
957 ret = follow_pfn_pte(vma, address, ptep, flags);
963 if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) {
968 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
972 ret = try_grab_page(page, flags);
983 if (flags & FOLL_PIN) {
991 if (flags & FOLL_TOUCH) {
992 if ((flags & FOLL_WRITE) &&
1009 return no_page_table(vma, flags, address);
1014 unsigned int flags,
1025 return no_page_table(vma, flags, address);
1027 return no_page_table(vma, flags, address);
1030 address, PMD_SHIFT, flags, ctx);
1033 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
1037 return no_page_table(vma, flags, address);
1040 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
1042 if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
1043 return no_page_table(vma, flags, address);
1049 return no_page_table(vma, flags, address);
1053 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
1055 if (pmd_trans_huge(pmdval) && (flags & FOLL_SPLIT_PMD)) {
1060 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
1062 page = follow_huge_pmd(vma, address, pmd, flags, ctx);
1069 unsigned int flags,
1080 return no_page_table(vma, flags, address);
1083 address, PUD_SHIFT, flags, ctx);
1086 page = follow_huge_pud(vma, address, pudp, flags, ctx);
1090 return no_page_table(vma, flags, address);
1093 return no_page_table(vma, flags, address);
1095 return follow_pmd_mask(vma, address, pudp, flags, ctx);
1100 unsigned int flags,
1111 address, P4D_SHIFT, flags, ctx);
1114 return no_page_table(vma, flags, address);
1116 return follow_pud_mask(vma, address, p4dp, flags, ctx);
1123 * @flags: flags modifying lookup behaviour
1127 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
1144 unsigned long address, unsigned int flags,
1158 address, PGDIR_SHIFT, flags, ctx);
1160 page = no_page_table(vma, flags, address);
1162 page = follow_p4d_mask(vma, address, pgd, flags, ctx);
1247 * mmap_lock must be held on entry. If @flags has FOLL_UNLOCKABLE but not
1252 unsigned long address, unsigned int *flags, bool unshare,
1258 if (*flags & FOLL_NOFAULT)
1260 if (*flags & FOLL_WRITE)
1262 if (*flags & FOLL_REMOTE)
1264 if (*flags & FOLL_UNLOCKABLE) {
1272 if (*flags & FOLL_INTERRUPTIBLE)
1275 if (*flags & FOLL_NOWAIT)
1277 if (*flags & FOLL_TRIED) {
1311 int err = vm_fault_to_errno(ret, *flags);
1469 * @gup_flags: flags modifying pin behaviour
1704 * @fault_flags:flags to pass down to handle_mm_fault()
1790 static bool gup_signal_pending(unsigned int flags)
1795 if (!(flags & FOLL_INTERRUPTIBLE))
1819 unsigned int flags)
1840 if (flags & FOLL_PIN)
1841 mm_set_has_pinned_flag(&mm->flags);
1852 if (pages && !(flags & FOLL_PIN))
1853 flags |= FOLL_GET;
1857 ret = __get_user_pages(mm, start, nr_pages, flags, pages,
1859 if (!(flags & FOLL_UNLOCKABLE)) {
1906 if (gup_signal_pending(flags)) {
1921 ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
1956 if (WARN_ON_ONCE(pages_done == 0 && !(flags & FOLL_NOWAIT)))
2091 * flags. VMAs must be already marked with the desired vm_flags, and
2129 * double checks the vma flags, so that it won't mlock pages
2172 * If FOLL_FORCE is set, we only require the "MAY" flags.
2552 unsigned int flags;
2559 flags = memalloc_pin_save();
2572 memalloc_pin_restore(flags);
2577 * Check that the given flags are valid for the exported gup/pup interface, and
2578 * update them with the required flags that the caller must have set.
2586 * These flags not allowed to be specified externally to the gup
2632 * @gup_flags: flags modifying lookup behaviour
2714 * @gup_flags: flags modifying lookup behaviour
2819 static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags)
2830 if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) ==
2891 unsigned int flags, struct page **pages)
2897 gup_put_folio(folio, 1, flags);
2922 unsigned long end, unsigned int flags, struct page **pages,
2947 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2951 if (unlikely(flags & FOLL_LONGTERM))
2956 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
2965 folio = try_grab_folio(page, 1, flags);
2971 gup_put_folio(folio, 1, flags);
2975 if (!gup_fast_folio_allowed(folio, flags)) {
2976 gup_put_folio(folio, 1, flags);
2980 if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) {
2981 gup_put_folio(folio, 1, flags);
2991 if (flags & FOLL_PIN) {
2994 gup_put_folio(folio, 1, flags);
3023 unsigned long end, unsigned int flags, struct page **pages,
3032 unsigned long end, unsigned int flags, struct page **pages, int *nr)
3043 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
3047 if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
3048 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
3052 folio = try_grab_folio(page, 1, flags);
3054 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
3068 unsigned long end, unsigned int flags, struct page **pages,
3075 if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr))
3079 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
3086 unsigned long end, unsigned int flags, struct page **pages,
3093 if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr))
3097 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
3104 unsigned long end, unsigned int flags, struct page **pages,
3112 unsigned long end, unsigned int flags, struct page **pages,
3121 unsigned long end, unsigned int flags, struct page **pages,
3128 if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
3132 if (unlikely(flags & FOLL_LONGTERM))
3134 return gup_fast_devmap_pmd_leaf(orig, pmdp, addr, end, flags,
3141 folio = try_grab_folio(page, refs, flags);
3146 gup_put_folio(folio, refs, flags);
3150 if (!gup_fast_folio_allowed(folio, flags)) {
3151 gup_put_folio(folio, refs, flags);
3154 if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
3155 gup_put_folio(folio, refs, flags);
3165 unsigned long end, unsigned int flags, struct page **pages,
3172 if (!pud_access_permitted(orig, flags & FOLL_WRITE))
3176 if (unlikely(flags & FOLL_LONGTERM))
3178 return gup_fast_devmap_pud_leaf(orig, pudp, addr, end, flags,
3185 folio = try_grab_folio(page, refs, flags);
3190 gup_put_folio(folio, refs, flags);
3194 if (!gup_fast_folio_allowed(folio, flags)) {
3195 gup_put_folio(folio, refs, flags);
3199 if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
3200 gup_put_folio(folio, refs, flags);
3210 unsigned long end, unsigned int flags, struct page **pages,
3217 if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
3225 folio = try_grab_folio(page, refs, flags);
3230 gup_put_folio(folio, refs, flags);
3234 if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
3235 gup_put_folio(folio, refs, flags);
3239 if (!gup_fast_folio_allowed(folio, flags)) {
3240 gup_put_folio(folio, refs, flags);
3250 unsigned long end, unsigned int flags, struct page **pages,
3269 if (!gup_fast_pmd_leaf(pmd, pmdp, addr, next, flags,
3279 PMD_SHIFT, next, flags, pages, nr) != 1)
3281 } else if (!gup_fast_pte_range(pmd, pmdp, addr, next, flags,
3290 unsigned long end, unsigned int flags, struct page **pages,
3304 if (!gup_fast_pud_leaf(pud, pudp, addr, next, flags,
3309 PUD_SHIFT, next, flags, pages, nr) != 1)
3311 } else if (!gup_fast_pmd_range(pudp, pud, addr, next, flags,
3320 unsigned long end, unsigned int flags, struct page **pages,
3336 P4D_SHIFT, next, flags, pages, nr) != 1)
3338 } else if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags,
3347 unsigned int flags, struct page **pages, int *nr)
3360 if (!gup_fast_pgd_leaf(pgd, pgdp, addr, next, flags,
3365 PGDIR_SHIFT, next, flags, pages, nr) != 1)
3367 } else if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags,
3374 unsigned int flags, struct page **pages, int *nr)
3393 unsigned long flags;
3418 local_irq_save(flags);
3420 local_irq_restore(flags);
3452 mm_set_has_pinned_flag(&current->mm->flags);
3492 * @gup_flags: flags modifying pin behaviour
3528 * @gup_flags: flags modifying pin behaviour
3560 * @gup_flags: flags modifying pin behaviour
3589 * @gup_flags: flags modifying lookup behaviour
3627 * @gup_flags: flags modifying lookup behaviour