Lines Matching refs:page
52 * any price. Since page is never written to after the initialization we
65 struct page *page;
76 page = virt_to_page((void *)empty_zero_page);
77 split_page(page, order);
78 for (i = 0; i < (1 << order); i++, page++)
79 mark_page_reserved(page);
84 static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
93 BUG_ON(folio_test_dcache_dirty(page_folio(page)));
100 pte = mk_pte(page, prot);
139 void *kmap_coherent(struct page *page, unsigned long addr)
141 return __kmap_pgprot(page, addr, PAGE_KERNEL);
144 void *kmap_noncoherent(struct page *page, unsigned long addr)
146 return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
171 void copy_user_highpage(struct page *to, struct page *from,
192 /* Make sure this page is cleared on other CPU's too before using it */
197 struct page *page, unsigned long vaddr, void *dst, const void *src,
200 struct folio *folio = page_folio(page);
204 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
213 flush_cache_page(vma, vaddr, page_to_pfn(page));
217 struct page *page, unsigned long vaddr, void *dst, const void *src,
220 struct folio *folio = page_folio(page);
224 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
454 struct page *page = pfn_to_page(tmp);
457 SetPageReserved(page);
459 free_highmem_page(page);
492 struct page *page = pfn_to_page(pfn);
496 free_reserved_page(page);