Lines Matching defs:page

166  * Install PTEs, to map dst_addr (within dst_vma) to page.
173 unsigned long dst_addr, struct page *page,
182 struct folio *folio = page_folio(page);
185 _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
207 * registered, we firstly wr-protect a none pte which has no page cache
208 * page backing it, then access the page.
217 folio_add_file_rmap_pte(folio, page, dst_vma);
266 * process B thread 1 takes page fault, read lock on own mmap lock
271 * Disable page faults to prevent potential deadlock
284 /* don't free the page */
296 * preceding stores to the page contents become visible before
306 &folio->page, true, flags);
332 * zeroing out the folio become visible before mapping the page
338 &folio->page, true, 0);
391 struct page *page;
405 page = folio_file_page(folio, pgoff);
406 if (PageHWPoison(page)) {
412 page, false, flags);
511 * There is no default zero huge page for all huge page sizes as
513 * by THP. Since we can not reliably insert a zero page, this
529 * Validate alignment based on huge page size
674 * The normal page fault path for a shmem will invoke the
883 * smp_wmb() to ensure that any writes to the about-to-be-mapped page by
885 * subsequent loads from the page through the newly mapped address range.
1047 !PageAnonExclusive(&src_folio->page)) {
1063 orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot);
1125 * The mmap_lock for reading is held by the caller. Just move the page
1127 * in moving the page.
1165 * transparent huge page fault can establish new
1223 * Pin the page while holding the lock to be sure the
1224 * page isn't freed under us
1234 if (!folio || !PageAnonExclusive(&folio->page)) {
1284 /* page was unmapped from under us */
1534 * It provides a zero copy mechanism to handle userspace page faults.
1538 * The thread receiving the page during the userland page fault
1539 * will receive the faulting page in the source vma through the network,
1543 * page in the faulting address in the destination vma.
1561 * Only one thread should resolve the userland page fault at any given
1593 * the rmap code to provide this anonymous page remapping functionality.
1700 !PageAnonExclusive(&folio->page))) {
1754 /* Proceed to the next page */