Lines Matching defs:pages

33 static inline void sanity_check_pinned_pages(struct page **pages,
40 * We only pin anonymous pages if they are exclusive. Once pinned, we
44 * We'd like to verify that our pinned anonymous pages are still mapped
51 for (; npages; npages--, pages++) {
52 struct page *page = *pages;
269 * that such pages can be separately tracked and uniquely handled. In
337 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
338 * @pages: array of pages to be maybe marked dirty, and definitely released.
339 * @npages: number of pages in the @pages array.
340 * @make_dirty: whether to mark the pages dirty
345 * For each page in the @pages array, make that page (or its head page, if a
347 * listed as clean. In any case, releases all pages using unpin_user_page(),
358 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
366 unpin_user_pages(pages, npages);
370 sanity_check_pinned_pages(pages, npages);
372 folio = gup_folio_next(pages, npages, i, &nr);
408 * @npages: number of consecutive pages to release.
409 * @make_dirty: whether to mark the pages dirty
411 * "gup-pinned page range" refers to a range of pages that has had one of the
415 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
443 static void unpin_user_pages_lockless(struct page **pages, unsigned long npages)
451 * fork() and some anonymous pages might now actually be shared --
455 folio = gup_folio_next(pages, npages, i, &nr);
461 * unpin_user_pages() - release an array of gup-pinned pages.
462 * @pages: array of pages to be marked dirty and released.
463 * @npages: number of pages in the @pages array.
465 * For each page in the @pages array, release the page using unpin_user_page().
469 void unpin_user_pages(struct page **pages, unsigned long npages)
476 * If this WARN_ON() fires, then the system *might* be leaking pages (by
483 sanity_check_pinned_pages(pages, npages);
485 folio = gup_folio_next(pages, npages, i, &nr);
508 * has touched so far, we don't want to allocate unnecessary pages or
606 * We only care about anon pages in can_follow_write_pte() and don't
617 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
628 /* Avoid special (like zero) pages in core dumps */
797 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
851 * to fail on PROT_NONE-mapped pages.
871 /* user gate pages are read-only */
1068 * Anon pages in shared mappings are surprising: now
1133 * __get_user_pages() - pin user pages in memory
1136 * @nr_pages: number of pages from start to pin
1138 * @pages: array that receives pointers to the pages pinned.
1140 * only intends to ensure the pages are faulted in.
1143 * Returns either number of pages pinned (which may be less than the
1147 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1148 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1149 * pages pinned. Again, this may be less than nr_pages.
1152 * The caller is responsible for releasing returned @pages, via put_page().
1188 unsigned int gup_flags, struct page **pages,
1200 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
1229 pages ? &page : NULL);
1246 * If we have a pending SIGKILL, don't keep faulting pages and
1275 * struct page. If the caller expects **pages to be
1279 if (pages) {
1292 if (pages) {
1304 * pages.
1329 pages[i + j] = subpage;
1485 struct page **pages,
1513 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1517 * FOLL_PIN always expects pages to be non-null, but no need to assert
1520 if (pages && !(flags & FOLL_PIN))
1525 ret = __get_user_pages(mm, start, nr_pages, flags, pages,
1556 * For the prefault case (!pages) we only update counts.
1558 if (likely(pages))
1559 pages += ret;
1590 pages, locked);
1606 if (likely(pages))
1607 pages++;
1631 * populate_vma_page_range() - populate a range of pages in the vma.
1637 * This takes care of mlocking the pages too if VM_LOCKED is set.
1639 * Return either number of pages pinned in the vma, or a negative error
1672 /* ... similarly, we've never faulted in PROT_NONE pages */
1707 * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1715 * Returns either number of processed pages in the MM, or a negative error
1756 * __mm_populate - populate and/or mlock pages within a range of address space.
1774 * We want to fault in pages for [nstart; end) address range.
1796 * Now fault in a range of pages. populate_vma_page_range()
1797 * double checks the vma flags, so that it won't mlock pages
1817 unsigned long nr_pages, struct page **pages,
1857 if (pages) {
1858 pages[i] = virt_to_page((void *)start);
1859 if (pages[i])
1860 get_page(pages[i]);
1946 * already know that some or all of the pages in the address range aren't in
1951 * Note that we don't pin or otherwise hold the pages referenced that we fault
2051 * Returns the number of collected pages. Return value is always >= 0.
2056 struct page **pages)
2063 struct folio *folio = page_folio(pages[i]);
2100 * Unpins all pages and migrates device coherent pages and movable_page_list.
2101 * Returns -EAGAIN if all pages were successfully migrated or -errno for failure
2107 struct page **pages)
2113 struct folio *folio = page_folio(pages[i]);
2120 pages[i] = NULL;
2133 * We can't migrate pages with unexpected references, so drop
2135 * Migrating pages have been added to movable_page_list after
2139 unpin_user_page(pages[i]);
2140 pages[i] = NULL;
2163 if (pages[i])
2164 unpin_user_page(pages[i]);
2171 * Check whether all pages are *allowed* to be pinned. Rather confusingly, all
2172 * pages in the range are required to be pinned via FOLL_PIN, before calling
2175 * If any pages in the range are not allowed to be pinned, then this routine
2176 * will migrate those pages away, unpin all the pages in the range and return
2183 * If everything is OK and all pages in the range are allowed to be pinned, then
2184 * this routine leaves all pages pinned and returns zero for success.
2187 struct page **pages)
2193 nr_pages, pages);
2198 pages);
2202 struct page **pages)
2215 struct page **pages,
2223 return __get_user_pages_locked(mm, start, nr_pages, pages,
2229 pages, locked,
2237 rc = check_and_migrate_movable_pages(nr_pinned_pages, pages);
2247 static bool is_valid_gup_args(struct page **pages, int *locked,
2281 if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages))
2295 * get_user_pages_remote() - pin user pages in memory
2298 * @nr_pages: number of pages from start to pin
2300 * @pages: array that receives pointers to the pages pinned.
2302 * only intends to ensure the pages are faulted in.
2307 * Returns either number of pages pinned (which may be less than the
2311 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2312 * -- If nr_pages is >0, and some pages were pinned, returns the number of
2313 * pages pinned. Again, this may be less than nr_pages.
2315 * The caller is responsible for releasing returned @pages, via put_page().
2339 * via the user virtual addresses. The pages may be submitted for
2352 unsigned int gup_flags, struct page **pages,
2357 if (!is_valid_gup_args(pages, locked, &gup_flags,
2361 return __get_user_pages_locked(mm, start, nr_pages, pages,
2370 unsigned int gup_flags, struct page **pages,
2378 * get_user_pages() - pin user pages in memory
2380 * @nr_pages: number of pages from start to pin
2382 * @pages: array that receives pointers to the pages pinned.
2384 * only intends to ensure the pages are faulted in.
2392 unsigned int gup_flags, struct page **pages)
2396 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH))
2399 return __get_user_pages_locked(current->mm, start, nr_pages, pages,
2408 * get_user_pages(mm, ..., pages, NULL);
2413 * get_user_pages_unlocked(mm, ..., pages);
2420 struct page **pages, unsigned int gup_flags)
2424 if (!is_valid_gup_args(pages, NULL, &gup_flags,
2428 return __get_user_pages_locked(current->mm, start, nr_pages, pages,
2436 * get_user_pages_fast attempts to pin user pages by walking the page
2438 * protected from page table pages being freed from under it, and should
2443 * pages are freed. This is unsuitable for architectures that do not need
2446 * Another way to achieve this is to batch up page table containing pages
2448 * pages. Disabling interrupts will allow the fast_gup walker to both block
2456 * free pages containing page tables or TLB flushing requires IPI broadcast.
2546 struct page **pages)
2549 struct page *page = pages[--(*nr)];
2581 struct page **pages, int *nr)
2596 * Always fallback to ordinary GUP on PROT_NONE-mapped pages:
2597 * pte_access_permitted() better should reject these pages
2614 undo_dev_pagemap(nr, nr_start, flags, pages);
2662 pages[*nr] = page;
2682 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2687 struct page **pages, int *nr)
2696 struct page **pages, int *nr)
2706 undo_dev_pagemap(nr, nr_start, flags, pages);
2711 undo_dev_pagemap(nr, nr_start, flags, pages);
2716 pages[*nr] = page;
2718 undo_dev_pagemap(nr, nr_start, flags, pages);
2731 struct page **pages, int *nr)
2737 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2741 undo_dev_pagemap(nr, nr_start, flags, pages);
2749 struct page **pages, int *nr)
2755 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2759 undo_dev_pagemap(nr, nr_start, flags, pages);
2767 struct page **pages, int *nr)
2775 struct page **pages, int *nr)
2783 unsigned long end, struct page **pages)
2788 pages[nr] = nth_page(page, nr);
2803 struct page **pages, int *nr)
2824 refs = record_subpages(page, addr, end, pages + *nr);
2852 struct page **pages, int *nr)
2861 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
2870 struct page **pages, int *nr)
2878 struct page **pages, int *nr)
2891 pages, nr);
2895 refs = record_subpages(page, addr, end, pages + *nr);
2922 struct page **pages, int *nr)
2935 pages, nr);
2939 refs = record_subpages(page, addr, end, pages + *nr);
2967 struct page **pages, int *nr)
2979 refs = record_subpages(page, addr, end, pages + *nr);
3006 unsigned int flags, struct page **pages, int *nr)
3026 pages, nr))
3035 PMD_SHIFT, next, flags, pages, nr))
3037 } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr))
3045 unsigned int flags, struct page **pages, int *nr)
3059 pages, nr))
3063 PUD_SHIFT, next, flags, pages, nr))
3065 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
3073 unsigned int flags, struct page **pages, int *nr)
3088 P4D_SHIFT, next, flags, pages, nr))
3090 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
3098 unsigned int flags, struct page **pages, int *nr)
3112 pages, nr))
3116 PGDIR_SHIFT, next, flags, pages, nr))
3118 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
3124 unsigned int flags, struct page **pages, int *nr)
3143 struct page **pages)
3163 * With interrupts disabled, we block page table pages from being freed
3171 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
3175 * When pinning pages for DMA there could be a concurrent write protect
3180 unpin_user_pages_lockless(pages, nr_pinned);
3183 sanity_check_pinned_pages(pages, nr_pinned);
3192 struct page **pages)
3220 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
3224 /* Slow path: try to get the remaining pages with get_user_pages */
3226 pages += nr_pinned;
3228 pages, &locked,
3232 * The caller has to unpin the pages we already pinned so
3243 * get_user_pages_fast_only() - pin user pages in memory
3245 * @nr_pages: number of pages from start to pin
3247 * @pages: array that receives pointers to the pages pinned.
3254 * pages pinned.
3261 unsigned int gup_flags, struct page **pages)
3270 if (!is_valid_gup_args(pages, NULL, &gup_flags,
3274 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
3279 * get_user_pages_fast() - pin user pages in memory
3281 * @nr_pages: number of pages from start to pin
3283 * @pages: array that receives pointers to the pages pinned.
3286 * Attempt to pin user pages in memory without taking mm->mmap_lock.
3290 * Returns number of pages pinned. This may be fewer than the number requested.
3291 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
3295 unsigned int gup_flags, struct page **pages)
3303 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET))
3305 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
3310 * pin_user_pages_fast() - pin user pages in memory without taking locks
3313 * @nr_pages: number of pages from start to pin
3315 * @pages: array that receives pointers to the pages pinned.
3322 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3325 * Note that if a zero_page is amongst the returned pages, it will not have
3329 unsigned int gup_flags, struct page **pages)
3331 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
3333 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
3338 * pin_user_pages_remote() - pin pages of a remote process
3342 * @nr_pages: number of pages from start to pin
3344 * @pages: array that receives pointers to the pages pinned.
3354 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3357 * Note that if a zero_page is amongst the returned pages, it will not have
3362 unsigned int gup_flags, struct page **pages,
3367 if (!is_valid_gup_args(pages, locked, &gup_flags,
3370 return __gup_longterm_locked(mm, start, nr_pages, pages,
3377 * pin_user_pages() - pin user pages in memory for use by other devices
3380 * @nr_pages: number of pages from start to pin
3382 * @pages: array that receives pointers to the pages pinned.
3388 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3391 * Note that if a zero_page is amongst the returned pages, it will not have
3395 unsigned int gup_flags, struct page **pages)
3399 if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
3402 pages, &locked, gup_flags);
3411 * Note that if a zero_page is amongst the returned pages, it will not have
3415 struct page **pages, unsigned int gup_flags)
3419 if (!is_valid_gup_args(pages, NULL, &gup_flags,
3423 return __gup_longterm_locked(current->mm, start, nr_pages, pages,