Lines Matching refs:page

33 #include <asm/page.h>
37 * The kernel provides one architecture bit PG_arch_1 in the page flags that
44 * are coherent. The kernel clears this bit whenever a page is added to the
45 * page cache. At that time, the caches might not be in sync. We, therefore,
52 * page.
59 static inline void kmap_invalidate_coherent(struct page *page,
62 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
65 if (!PageHighMem(page)) {
66 kvaddr = (unsigned long)page_to_virt(page);
71 (page_to_phys(page) & DCACHE_ALIAS_MASK);
75 page_to_phys(page));
81 static inline void *coherent_kvaddr(struct page *page, unsigned long base,
84 *paddr = page_to_phys(page);
88 void clear_user_highpage(struct page *page, unsigned long vaddr)
90 struct folio *folio = page_folio(page);
92 void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
95 kmap_invalidate_coherent(page, vaddr);
102 void copy_user_highpage(struct page *dst, struct page *src,
121 * Any time the kernel writes to a user page cache page, or it is about to
122 * read from a page cache page this routine is called.
131 * If we have a mapping but the page is not mapped to user-space
132 * yet, we simply mark this page dirty and defer flushing the
149 * Flush the page in kernel space and user space.
176 /* There shouldn't be an entry in the cache for this page anymore. */
193 * Remove any entry in the cache for this page.
268 * flush_dcache_page() on the page.
273 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
277 unsigned long phys = page_to_phys(page);
280 /* Flush and invalidate user page if aliased. */
294 * Flush and invalidate kernel page if aliased and synchronize
313 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
317 unsigned long phys = page_to_phys(page);
321 * Flush user page if aliased.