Searched refs:page (Results 51 - 75 of 3144) sorted by relevance

1234567891011>>

/linux-master/arch/mips/include/asm/
H A Dbcache.h20 void (*bc_wback_inv)(unsigned long page, unsigned long size);
21 void (*bc_inv)(unsigned long page, unsigned long size);
43 static inline void bc_wback_inv(unsigned long page, unsigned long size) argument
45 bcops->bc_wback_inv(page, size);
48 static inline void bc_inv(unsigned long page, unsigned long size) argument
50 bcops->bc_inv(page, size);
79 #define bc_wback_inv(page, size) do { } while (0)
80 #define bc_inv(page, size) do { } while (0)
H A Dcacheflush.h21 * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
24 * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
29 * - flush_data_cache_page() flushes a page from the data cache
34 * This flag is used to indicate that the page pointed to by a pte
52 extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
53 extern void __flush_dcache_pages(struct page *page, unsigned int nr);
59 __flush_dcache_pages(&folio->page, folio_nr_pages(folio));
65 static inline void flush_dcache_page(struct page *page) argument
78 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) argument
[all...]
/linux-master/include/xen/
H A Dmem-reservation.h18 #include <xen/page.h>
22 static inline void xenmem_reservation_scrub_page(struct page *page) argument
25 clear_highpage(page);
30 struct page **pages,
34 struct page **pages);
38 struct page **pages,
48 struct page **pages)
H A Dpage.h5 #include <asm/page.h>
7 /* The hypercall interface supports only 4KB page */
20 #define page_to_xen_pfn(page) \
21 ((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT))
28 #include <asm/xen/page.h>
30 /* Return the GFN associated to the first 4KB of the page */
31 static inline unsigned long xen_page_to_gfn(struct page *page) argument
33 return pfn_to_gfn(page_to_xen_pfn(page));
/linux-master/arch/xtensa/include/asm/
H A Dpgalloc.h26 #define pmd_populate(mm, pmdp, page) \
27 (pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
56 struct page *page; local
58 page = __pte_alloc_one(mm, GFP_PGTABLE_USER);
59 if (!page)
61 ptes_clear(page_address(page));
62 return page;
H A Dcachetype.h6 #include <asm/page.h>
/linux-master/drivers/xen/
H A Dbiomerge.c5 #include <xen/page.h>
7 /* check if @page can be merged with 'vec1' */
9 const struct page *page)
13 unsigned long bfn2 = pfn_to_bfn(page_to_pfn(page));
18 * XXX: Add support for merging bio_vec when using different page
8 xen_biovec_phys_mergeable(const struct bio_vec *vec1, const struct page *page) argument
/linux-master/arch/x86/include/asm/
H A Dhugetlb.h5 #include <asm/page.h>
/linux-master/drivers/infiniband/hw/mlx4/
H A Ddoorbell.c48 struct mlx4_ib_user_db_page *page; local
55 list_for_each_entry(page, &context->db_page_list, list)
56 if (page->user_virt == (virt & PAGE_MASK))
59 page = kmalloc(sizeof *page, GFP_KERNEL);
60 if (!page) {
65 page->user_virt = (virt & PAGE_MASK);
66 page->refcnt = 0;
67 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK,
69 if (IS_ERR(page
[all...]
/linux-master/arch/sh/include/asm/
H A Dhugetlb.h6 #include <asm/page.h>
30 static inline void arch_clear_hugepage_flags(struct page *page) argument
32 clear_bit(PG_dcache_clean, &page->flags);
/linux-master/arch/nios2/include/asm/
H A Dcachetype.h5 #include <asm/page.h>
/linux-master/fs/nfs/
H A Dsymlink.c9 * Jun 7 1999, cache symlink lookups in the page cache. -DaveM
25 /* Symlink caching in the page cache is even more simplistic
34 error = NFS_PROTO(inode)->readlink(inode, &folio->page, 0, PAGE_SIZE);
51 struct page *page; local
58 page = find_get_page(inode->i_mapping, 0);
59 if (!page)
61 if (!PageUptodate(page)) {
62 put_page(page);
69 page
[all...]
/linux-master/tools/testing/memblock/
H A Dinternal.h16 struct page {}; struct
18 void memblock_free_pages(struct page *page, unsigned long pfn, argument
/linux-master/kernel/dma/
H A Dops_helpers.c8 static struct page *dma_common_vaddr_to_page(void *cpu_addr)
22 struct page *page = dma_common_vaddr_to_page(cpu_addr); local
27 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
42 struct page *page = dma_common_vaddr_to_page(cpu_addr); local
54 page_to_pfn(page) + vma->vm_pgoff,
61 struct page *dma_common_alloc_pages(struct device *dev, size_t size,
65 struct page *page; local
84 dma_common_free_pages(struct device *dev, size_t size, struct page *page, dma_addr_t dma_handle, enum dma_data_direction dir) argument
[all...]
/linux-master/arch/arm64/include/asm/
H A Dmte.h15 #include <linux/page-flags.h>
26 int mte_save_tags(struct page *page);
28 void mte_restore_tags(swp_entry_t entry, struct page *page);
39 /* simple lock to avoid multiple threads tagging the same page */
42 static inline void set_page_mte_tagged(struct page *page) argument
46 * before the page flags update.
49 set_bit(PG_mte_tagged, &page
52 page_mte_tagged(struct page *page) argument
77 try_page_mte_tagging(struct page *page) argument
111 set_page_mte_tagged(struct page *page) argument
114 page_mte_tagged(struct page *page) argument
118 try_page_mte_tagging(struct page *page) argument
[all...]
/linux-master/fs/ecryptfs/
H A Dmmap.c16 #include <linux/page-flags.h>
28 * Get one page from cache or lower f/s, return error otherwise.
30 * Returns locked and up-to-date page (if ok), with increased
33 struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index)
35 struct page *page = read_mapping_page(inode->i_mapping, index, NULL); local
36 if (!IS_ERR(page))
37 lock_page(page);
38 return page;
43 * @page
52 ecryptfs_writepage(struct page *page, struct writeback_control *wbc) argument
109 ecryptfs_copy_up_encrypted_with_header(struct page *page, struct ecryptfs_crypt_stat *crypt_stat) argument
183 struct page *page = &folio->page; local
237 fill_zeros_to_end_of_page(struct page *page, unsigned int to) argument
271 struct page *page; local
463 ecryptfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) argument
[all...]
/linux-master/arch/x86/kernel/cpu/sgx/
H A Dsgx.h17 "EREMOVE returned %d (0x%x) and an EPC page was leaked. SGX may become unusable. " \
26 /* Pages, which are being tracked by the page reclaimer. */
42 * the free page list local to the node is stored here.
66 static inline unsigned long sgx_get_epc_phys_addr(struct sgx_epc_page *page) argument
68 struct sgx_epc_section *section = &sgx_epc_sections[page->section];
71 index = ((unsigned long)page - (unsigned long)section->pages) / sizeof(*page);
76 static inline void *sgx_get_epc_virt_addr(struct sgx_epc_page *page) argument
78 struct sgx_epc_section *section = &sgx_epc_sections[page->section];
81 index = ((unsigned long)page
[all...]
/linux-master/arch/arm/include/asm/
H A Dhighmem.h24 * page usage count does not decrease to zero while we're using its
49 extern void *kmap_high_get(struct page *page);
51 static inline void *arch_kmap_local_high_get(struct page *page) argument
55 return kmap_high_get(page);
60 static inline void *kmap_high_get(struct page *page) argument
/linux-master/fs/squashfs/
H A Dpage_actor.h12 struct page **page; member in union:squashfs_page_actor::__anon50
19 struct page *last_page;
32 struct page **page, int pages, int length);
33 static inline struct page *squashfs_page_actor_free(struct squashfs_page_actor *actor)
35 struct page *last_page = actor->last_page;
/linux-master/arch/alpha/include/asm/
H A Dcacheflush.h38 flush_icache_user_page(struct vm_area_struct *vma, struct page *page, argument
52 struct page *page, unsigned long addr, int len);
61 struct page *page, unsigned int nr)
63 flush_icache_user_page(vma, page, 0, 0);
60 flush_icache_pages(struct vm_area_struct *vma, struct page *page, unsigned int nr) argument
/linux-master/include/linux/
H A Dmigrate.h17 * - negative errno on page migration failure;
18 * - zero on page migration success;
24 * struct movable_operations - Driver page migration
26 * The VM calls this function to prepare the page to be moved. The page
28 * return ``true`` if the page is movable and ``false`` if it is not
30 * page->lru field, so the driver must preserve any information which
35 * @src page. The driver should copy the contents of the
36 * @src page to the @dst page an
95 isolate_movable_page(struct page *page, isolate_mode_t mode) argument
111 PageMovable(struct page *page) argument
112 __SetPageMovable(struct page *page, const struct movable_operations *ops) argument
116 __ClearPageMovable(struct page *page) argument
136 page_movable_ops(struct page *page) argument
[all...]
H A Dhighmem.h17 * kmap - Map a page for long term usage
18 * @page: Pointer to the page to be mapped
37 static inline void *kmap(struct page *page);
41 * @page: Pointer to the page which was mapped by kmap()
46 static inline void kunmap(struct page *page);
49 * kmap_to_page - Get the page fo
186 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) argument
202 clear_user_highpage(struct page *page, unsigned long vaddr) argument
237 clear_highpage(struct page *page) argument
244 clear_highpage_kasan_tagged(struct page *page) argument
254 tag_clear_highpage(struct page *page) argument
268 zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2) argument
289 zero_user_segment(struct page *page, unsigned start, unsigned end) argument
295 zero_user(struct page *page, unsigned start, unsigned size) argument
401 memset_page(struct page *page, size_t offset, int val, size_t len) argument
411 memcpy_from_page(char *to, struct page *page, size_t offset, size_t len) argument
421 memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len) argument
432 memzero_page(struct page *page, size_t offset, size_t len) argument
659 unmap_and_put_page(struct page *page, void *addr) argument
[all...]
H A Dmemremap.h39 * important to remember that there are certain points at which the struct page
40 * must be treated as an opaque object, rather than a "normal" struct page.
49 * type. Any page of a process can be migrated to such memory. However no one
54 * coherent and supports page pinning. In support of coordinating page
56 * wakeup event whenever a page is unpinned and becomes idle. This
62 * coherent and supports page pinning. This is for example used by DAX devices
80 * Called once the page refcount reaches 0. The reference count will be
82 * for handing out the page again.
84 void (*page_free)(struct page *pag
160 is_device_private_page(const struct page *page) argument
172 is_pci_p2pdma_page(const struct page *page) argument
179 is_device_coherent_page(const struct page *page) argument
[all...]
/linux-master/mm/
H A Dpage_alloc.c14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
69 * Skip free page reporting notification for the (possibly merged) page.
70 * This does not hinder free page reporting from grabbing the page,
72 * the free page reporting infrastructure about a newly freed page. For
73 * example, used when temporarily pulling a page from a freelist and
79 * Place the (possibly merged) page to the tail of the freelist. Will ignore
80 * page shufflin
217 get_pcppage_migratetype(struct page *page) argument
222 set_pcppage_migratetype(struct page *page, int migratetype) argument
345 get_pageblock_bitmap(const struct page *page, unsigned long pfn) argument
355 pfn_to_bitidx(const struct page *page, unsigned long pfn) argument
373 get_pfnblock_flags_mask(const struct page *page, unsigned long pfn, unsigned long mask) argument
393 get_pfnblock_migratetype(const struct page *page, unsigned long pfn) argument
406 set_pfnblock_flags_mask(struct page *page, unsigned long flags, unsigned long pfn, unsigned long mask) argument
432 set_pageblock_migratetype(struct page *page, int migratetype) argument
443 page_outside_zone_boundaries(struct zone *zone, struct page *page) argument
468 bad_range(struct zone *zone, struct page *page) argument
478 bad_range(struct zone *zone, struct page *page) argument
484 bad_page(struct page *page, const char *reason) argument
561 free_the_page(struct page *page, unsigned int order) argument
581 prep_compound_page(struct page *page, unsigned int order) argument
607 set_buddy_order(struct page *page, unsigned int order) argument
625 compaction_capture(struct capture_control *capc, struct page *page, int order, int migratetype) argument
656 compaction_capture(struct capture_control *capc, struct page *page, int order, int migratetype) argument
664 add_to_free_list(struct page *page, struct zone *zone, unsigned int order, int migratetype) argument
674 add_to_free_list_tail(struct page *page, struct zone *zone, unsigned int order, int migratetype) argument
688 move_to_free_list(struct page *page, struct zone *zone, unsigned int order, int migratetype) argument
696 del_page_from_free_list(struct page *page, struct zone *zone, unsigned int order) argument
725 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, struct page *page, unsigned int order) argument
765 __free_one_page(struct page *page, unsigned long pfn, struct zone *zone, unsigned int order, int migratetype, fpi_t fpi_flags) argument
784 VM_BUG_ON_PAGE(bad_range(zone, page), page); local
909 page_expected_state(struct page *page, unsigned long check_flags) argument
929 page_bad_reason(struct page *page, unsigned long flags) argument
956 free_page_is_bad_report(struct page *page) argument
962 free_page_is_bad(struct page *page) argument
977 free_tail_page_prepare(struct page *head_page, struct page *page) argument
1065 should_skip_kasan_poison(struct page *page) argument
1073 kernel_init_pages(struct page *page, int numpages) argument
1084 free_pages_prepare(struct page *page, unsigned int order) argument
1092 VM_BUG_ON_PAGE(PageTail(page), page); local
1107 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); local
1195 struct page *page; local
1234 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); local
1247 free_one_page(struct zone *zone, struct page *page, unsigned long pfn, unsigned int order, int migratetype, fpi_t fpi_flags) argument
1263 __free_pages_ok(struct page *page, unsigned int order, fpi_t fpi_flags) argument
1285 __free_pages_core(struct page *page, unsigned int order) argument
1387 expand(struct zone *zone, struct page *page, int low, int high, int migratetype) argument
1395 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); local
1411 check_new_page_bad(struct page *page) argument
1426 check_new_page(struct page *page) argument
1436 check_new_pages(struct page *page, unsigned int order) argument
1478 post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags) argument
1538 prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags) argument
1568 struct page *page; local
1621 struct page *page; local
1642 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); local
1643 VM_BUG_ON_PAGE(page_zone(page) != zone, page); local
1654 move_freepages_block(struct zone *zone, struct page *page, int migratetype, int *num_movable) argument
1765 steal_suitable_fallback(struct zone *zone, struct page *page, unsigned int alloc_flags, int start_type, bool whole_block) argument
1878 reserve_highatomic_pageblock(struct page *page, struct zone *zone) argument
1930 struct page *page; local
2012 struct page *page; local
2090 struct page *page; local
2133 struct page *page = __rmqueue(zone, order, migratetype, local
2342 free_unref_page_prepare(struct page *page, unsigned long pfn, unsigned int order) argument
2428 free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, struct page *page, int migratetype, unsigned int order) argument
2479 free_unref_page(struct page *page, unsigned int order) argument
2500 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE); local
2616 split_page(struct page *page, unsigned int order) argument
2620 VM_BUG_ON_PAGE(PageCompound(page), page); local
2630 __isolate_free_page(struct page *page, unsigned int order) argument
2682 __putback_isolated_page(struct page *page, unsigned int order, int mt) argument
2725 struct page *page; local
2819 struct page *page; local
2850 struct page *page; local
2896 struct page *page; local
2922 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); local
3194 struct page *page; local
3404 struct page *page; local
3430 struct page *page; local
3519 struct page *page = NULL; local
3791 struct page *page = NULL; local
4052 struct page *page = NULL; local
4392 struct page *page; local
4542 struct page *page; local
4607 struct page *page = __alloc_pages(gfp | __GFP_COMP, order, local
4620 struct page *page; local
4655 __free_pages(struct page *page, unsigned int order) argument
4692 struct page *page = NULL; local
4720 __page_frag_cache_drain(struct page *page, unsigned int count) argument
4722 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); local
4734 struct page *page; local
4807 struct page *page = virt_to_head_page(addr); local
4819 struct page *page = virt_to_page((void *)addr); local
5673 adjust_managed_page_count(struct page *page, long count) argument
5692 struct page *page = virt_to_page(pos); local
6229 struct page *page; local
6255 struct page *page; local
6478 struct page *page; local
6569 struct page *page = pfn_to_page(pfn); local
6627 struct page *page; local
6669 is_free_buddy_page(struct page *page) argument
6691 break_down_buddy_pages(struct zone *zone, struct page *page, struct page *target, int low, int high, int migratetype) argument
6720 take_page_off_buddy(struct page *page) argument
6757 put_page_back_buddy(struct page *page) argument
6815 page_contains_unaccepted(struct page *page, unsigned int order) argument
6823 accept_page(struct page *page, unsigned int order) argument
6833 struct page *page; local
6890 __free_unaccepted(struct page *page) argument
6914 page_contains_unaccepted(struct page *page, unsigned int order) argument
6919 accept_page(struct page *page, unsigned int order) argument
6933 __free_unaccepted(struct page *page) argument
[all...]
/linux-master/include/trace/events/
H A Dpage_pool.h45 const struct page *page, u32 release),
47 TP_ARGS(pool, page, release),
51 __field(const struct page *, page)
58 __entry->page = page;
60 __entry->pfn = page_to_pfn(page);
63 TP_printk("page_pool=%p page=%p pfn=0x%lx release=%u",
64 __entry->pool, __entry->page, __entr
[all...]

Completed in 207 milliseconds

1234567891011>>