/linux-master/kernel/ |
H A D | profile.c | 267 struct page *page; local 275 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]); 277 __free_page(page); 286 struct page *page; local 294 page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); 295 if (!page) { 299 per_cpu(cpu_profile_hits, cpu)[i] = page_address(page);
|
H A D | bounds.c | 10 #include <linux/page-flags.h>
|
H A D | vmcore_info.c | 18 #include <asm/page.h> 166 VMCOREINFO_STRUCT_SIZE(page); 172 VMCOREINFO_OFFSET(page, flags); 173 VMCOREINFO_OFFSET(page, _refcount); 174 VMCOREINFO_OFFSET(page, mapping); 175 VMCOREINFO_OFFSET(page, lru); 176 VMCOREINFO_OFFSET(page, _mapcount); 177 VMCOREINFO_OFFSET(page, private); 178 VMCOREINFO_OFFSET(page, compound_head);
|
/linux-master/fs/erofs/ |
H A D | internal.h | 220 struct page *page; member in struct:erofs_buf 224 #define __EROFS_BUF_INITIALIZER ((struct erofs_buf){ .page = NULL }) 323 struct page *erofs_grab_cache_page_nowait(struct address_space *mapping, 421 static inline void *erofs_vm_map_ram(struct page **pages, unsigned int count) 441 struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp); 442 static inline void erofs_pagepool_add(struct page **pagepool, struct page *page) argument [all...] |
/linux-master/drivers/soundwire/ |
H A D | amd_manager.c | 262 if (msg->page) {
|
/linux-master/drivers/irqchip/ |
H A D | irq-gic-v3-its.c | 76 * value of BASER register configuration and ITS page size. 141 /* Convert page order to size in bytes */ 2211 static struct page *its_allocate_prop_table(gfp_t gfp_flags) 2213 struct page *prop_page; 2224 static void its_free_prop_table(struct page *prop_page) 2279 struct page *page; local 2284 page = its_allocate_prop_table(GFP_NOWAIT); 2285 if (!page) { 2290 gic_rdists->prop_table_pa = page_to_phys(page); 2336 struct page *page; local 2780 struct page *page; local 2851 struct page *page; local 3319 struct page *page; local 5095 struct page *page; local [all...] |
/linux-master/arch/x86/include/asm/ |
H A D | pgtable_types.h | 13 #define _PAGE_BIT_PWT 3 /* page write through */ 14 #define _PAGE_BIT_PCD 4 /* page cache disabled */ 17 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ 95 * Tracking soft dirty bit when a page goes to a swap is tricky. 101 * Please note that this bit must be treated as swap dirty page 284 /* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */ 516 typedef struct page *pgtable_t;
|
/linux-master/arch/riscv/include/asm/ |
H A D | pgtable.h | 28 /* Number of entries in the page global directory */ 30 /* Number of entries in the page table */ 34 * Half of the kernel address space (1/4 of the entries of the page global 87 #define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT)) 116 #include <asm/page.h> 267 static inline struct page *pmd_page(pmd_t pmd) 320 /* Yields the page frame number (PFN) of a page table entry */ 333 /* Constructs a page table entry */ 343 #define mk_pte(page, pro [all...] |
H A D | page.h | 25 * PAGE_OFFSET -- the first address of the first page of memory. 26 * When not using MMU this corresponds to the first free page in 27 * physical memory (aligned on a page boundary). 48 void clear_page(void *page); 54 #define clear_user_page(pgaddr, vaddr, page) clear_page(pgaddr) 76 typedef struct page *pgtable_t; 182 #define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) 184 #define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) [all...] |
/linux-master/tools/testing/selftests/mm/ |
H A D | run_vmtests.sh | 61 test soft dirty page bit semantics 266 # For this test, we need one and just one huge page
|
/linux-master/net/tls/ |
H A D | tls_strp.c | 67 /* Create a new skb with the contents of input copied to its page frags */ 222 /* Assume one page is more than enough for headers */ 387 struct page *page; local 400 /* If we don't know the length go max plus page for cipher overhead */ 404 page = alloc_page(strp->sk->sk_allocation); 405 if (!page) { 411 page, 0, 0);
|
/linux-master/net/bluetooth/ |
H A D | hci_sync.c | 3232 /* 160 msec page scan interval */ 3766 /* When SSP is available, then the host features page 3819 static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page) argument 3827 cp.page = page; 4111 /* Read features beyond page 1 if available */ 4114 u8 page; local 4120 for (page = 2; page < HCI_MAX_PAGES && page < [all...] |
H A D | hci_event.c | 905 bt_dev_warn(hdev, "broken local ext features page 2"); 910 if (rp->page < HCI_MAX_PAGES) 911 memcpy(hdev->features[rp->page], rp->features, 8); 3759 cp.page = 0x01; 3864 /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554 3876 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553 4988 if (ev->page < HCI_MAX_PAGES) 4989 memcpy(conn->features[ev->page], ev->features, 8); 4991 if (!ev->status && ev->page == 0x01) {
|
/linux-master/mm/ |
H A D | zswap.c | 33 #include <linux/page-flags.h> 68 /* Compressed page was too big for the allocator to (optimally) store */ 131 * If disabled every page is considered non-same-value filled. 199 * page within zswap. 203 * length - the length in bytes of the compressed page data. Needed during 204 * decompression. For a same value filled page length is 0, and both 207 * handle - zpool allocation handle that stores the compressed page data 495 "%s: no page storage pool!\n", __func__); 1018 sg_set_page(&input, &folio->page, PAGE_SIZE, 0); 1034 * case, zswap actually does store and load page b 1072 zswap_decompress(struct zswap_entry *entry, struct page *page) argument 1470 unsigned long *page; local 1492 unsigned long *page; local 1645 struct page *page = &folio->page; local [all...] |
H A D | hugetlb.c | 40 #include <asm/page.h> 61 return cma_pages_valid(hugetlb_cma[folio_nid(folio)], &folio->page, 88 * Serializes faults on the same logical page. This is used to 214 * Return the number of global page reservations that must be dropped. 418 * However, the lock is also used to synchronize page 690 * Add the huge page range represented by [f, t) to the reserve 704 * 1 page will only require at most 1 entry. 928 * the reserve map region for a page. The huge page itself was free'ed 929 * and removed from the page cach 1568 struct page *page; local 2187 struct page *page; local 2401 dissolve_free_huge_page(struct page *page) argument 2487 struct page *page; local 3096 isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) argument 3346 struct page *page = pfn_to_page(pfn); local 3411 struct page *page = virt_to_page(m); local 5300 make_huge_pte(struct vm_area_struct *vma, struct page *page, int writable) argument 5659 struct page *page; local 5870 unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, unsigned long address) argument 6883 struct page *page = NULL; local 7016 struct page *page = pfn_swap_entry_to_page(entry); local [all...] |
/linux-master/lib/ |
H A D | stackdepot.c | 7 * of separate page allocations. 592 struct page *page = NULL; local 635 page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER); 636 if (page) 637 prealloc = page_address(page);
|
/linux-master/include/net/ |
H A D | sock.h | 315 * @sk_frag: cached page frag 1409 /* 1 MB per cpu, in page units */ 2261 struct page *page, 2266 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off, 2549 * Both direct reclaim and page faults can nest inside other 2259 skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from, struct sk_buff *skb, struct page *page, int off, int copy) argument
|
/linux-master/include/linux/ |
H A D | page-flags.h | 3 * Macros for manipulating and testing page->flags 18 * Various page->flags bits: 20 * PG_reserved is set for special pages. The "struct page" of such a page 25 * - Pages reserved or allocated early during boot (before the page allocator 27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much 29 * be given to the page allocator. 32 * - The zero page(s) 33 * - Pages not added to the page allocator when onlining a section because 49 * Consequently, PG_reserved for a page mappe 207 page_fixed_fake_head(const struct page *page) argument 233 page_fixed_fake_head(const struct page *page) argument 239 page_is_fake_head(const struct page *page) argument 244 _compound_head(const struct page *page) argument 283 PageTail(const struct page *page) argument 288 PageCompound(const struct page *page) argument 295 PagePoisoned(const struct page *page) argument 303 page_init_poison(struct page *page, size_t size) argument 311 const struct page *page = &folio->page; local 313 VM_BUG_ON_PGFLAGS(PageTail(page), page); local 320 struct page *page = &folio->page; local 322 VM_BUG_ON_PGFLAGS(PageTail(page), page); local 587 PageSwapCache(const struct page *page) argument 691 PageMappingFlags(const struct page *page) argument 701 PageAnon(const struct page *page) argument 712 __PageMovable(const struct page *page) argument 731 PageKsm(const struct page *page) argument 787 PageUptodate(const struct page *page) argument 809 __SetPageUptodate(struct page *page) argument 814 SetPageUptodate(struct page *page) argument 834 PageHead(const struct page *page) argument 855 set_compound_head(struct page *page, struct page *head) argument 860 clear_compound_head(struct page *page) argument 866 ClearPageCompound(struct page *page) argument 887 PageTransHuge(const struct page *page) argument 889 VM_BUG_ON_PAGE(PageTail(page), page); local 898 PageTransCompound(const struct page *page) argument 908 PageTransTail(const struct page *page) argument 961 page_has_type(const struct page *page) argument 1058 PageHuge(const struct page *page) argument 1068 is_page_hwpoison(struct page *page) argument 1079 PageAnonExclusive(const struct page *page) argument 1082 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); local 1086 SetPageAnonExclusive(struct page *page) argument 1089 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); local 1093 ClearPageAnonExclusive(struct page *page) argument 1096 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); local 1100 __ClearPageAnonExclusive(struct page *page) argument 1103 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); local 1152 page_has_private(const struct page *page) argument [all...] |
H A D | mm.h | 24 #include <linux/page-flags.h> 99 #include <asm/page.h> 116 * a zero page mapping on a read fault. 119 * related to the physical page in case of virtualization. 132 /* This function must be updated when the size of struct page grows above 96 139 static inline void __mm_zero_struct_page(struct page *page) argument 141 unsigned long *_pp = (void *)page; 143 /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */ 144 BUILD_BUG_ON(sizeof(struct page) 543 struct page *page; /* ->fault handlers should return a member in struct:vm_fault 1089 compound_order(struct page *page) argument 1132 put_page_testzero(struct page *page) argument 1134 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); local 1149 get_page_unless_zero(struct page *page) argument 1154 folio_get_nontail_page(struct page *page) argument 1213 page_mapcount_reset(struct page *page) argument 1229 page_mapcount(struct page *page) argument 1290 page_mapped(struct page *page) argument 1299 struct page *page = virt_to_page(x); local 1306 struct page *page = virt_to_page(x); local 1323 page_size(struct page *page) argument 1329 page_shift(struct page *page) argument 1338 thp_order(struct page *page) argument 1340 VM_BUG_ON_PGFLAGS(PageTail(page), page); local 1350 thp_size(struct page *page) argument 1440 put_devmap_managed_page_refs(struct page *page, int refs) argument 1449 put_devmap_managed_page_refs(struct page *page, int refs) argument 1455 put_devmap_managed_page(struct page *page) argument 1478 get_page(struct page *page) argument 1483 try_get_page(struct page *page) argument 1570 put_page(struct page *page) argument 1654 page_zone_id(struct page *page) argument 1662 page_to_nid(const struct page *page) argument 1732 page_cpupid_reset_last(struct page *page) argument 1744 page_cpupid_reset_last(struct page *page) argument 1809 page_cpupid_reset_last(struct page *page) argument 1831 page_kasan_tag(const struct page *page) argument 1843 page_kasan_tag_set(struct page *page, u8 tag) argument 1859 page_kasan_tag_reset(struct page *page) argument 1867 page_kasan_tag(const struct page *page) argument 1872 page_kasan_tag_set(struct page *page, u8 tag) argument 1873 page_kasan_tag_reset(struct page *page) argument 1877 page_zone(const struct page *page) argument 1882 page_pgdat(const struct page *page) argument 1898 set_page_section(struct page *page, unsigned long section) argument 1904 page_to_section(const struct page *page) argument 1971 page_maybe_dma_pinned(struct page *page) argument 1999 is_zero_page(const struct page *page) argument 2044 set_page_zone(struct page *page, enum zone_type zone) argument 2050 set_page_node(struct page *page, unsigned long node) argument 2056 set_page_links(struct page *page, enum zone_type zone, unsigned long node, unsigned long pfn) argument 2095 compound_nr(struct page *page) argument 2112 thp_nr_pages(struct page *page) argument 2185 arch_make_page_accessible(struct page *page) argument 2217 page_address(const struct page *page) argument 2221 set_page_address(struct page *page, void *address) argument 2234 lowmem_page_address(const struct page *page) argument 2256 page_index(struct page *page) argument 2268 page_is_pfmemalloc(const struct page *page) argument 2297 set_page_pfmemalloc(struct page *page) argument 2302 clear_page_pfmemalloc(struct page *page) argument 2479 struct page *page; local 2864 struct page *page = alloc_pages(gfp | __GFP_COMP, order); local 2878 struct page *page = ptdesc_page(pt); local 3135 free_reserved_page(struct page *page) argument 3144 mark_page_reserved(struct page *page) argument 3585 vmf_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) argument 3696 kernel_poison_pages(struct page *page, int numpages) argument 3701 kernel_unpoison_pages(struct page *page, int numpages) argument 3709 __kernel_poison_pages(struct page *page, int nunmpages) argument 3710 kernel_poison_pages(struct page *page, int numpages) argument 3711 kernel_unpoison_pages(struct page *page, int numpages) argument 3757 debug_pagealloc_map_pages(struct page *page, int numpages) argument 3763 debug_pagealloc_unmap_pages(struct page *page, int numpages) argument 3782 page_is_guard(struct page *page) argument 3792 set_page_guard(struct zone *zone, struct page *page, unsigned int order, int migratetype) argument 3802 clear_page_guard(struct zone *zone, struct page *page, unsigned int order, int migratetype) argument 3811 debug_pagealloc_map_pages(struct page *page, int numpages) argument 3812 debug_pagealloc_unmap_pages(struct page *page, int numpages) argument 3815 page_is_guard(struct page *page) argument 3816 set_page_guard(struct zone *zone, struct page *page, unsigned int order, int migratetype) argument 3818 clear_page_guard(struct zone *zone, struct page *page, unsigned int order, int migratetype) argument [all...] |
/linux-master/fs/proc/ |
H A D | page.c | 17 #include <linux/kernel-page-flags.h> 40 /* /proc/kpagecount - an array exposing page counts 43 * physical page count. 50 struct page *ppage; 99 /* /proc/kpageflags - an array exposing page flags 102 * physical page flags. 110 u64 stable_page_flags(struct page *page) argument 117 * it differentiates a memory hole from a page with no flags 119 if (!page) [all...] |
/linux-master/fs/ntfs3/ |
H A D | super.c | 18 * index - unit inside directory - 2K, 4K, <=page size, does not depend on cluster size. 1440 struct page *page = ntfs_map_page(inode->i_mapping, idx); local 1442 if (IS_ERR(page)) { 1443 err = PTR_ERR(page); 1447 memcpy(Add2Ptr(t, done), page_address(page), 1449 ntfs_unmap_page(page); 1501 struct page *page = ntfs_map_page(inode->i_mapping, idx); local 1503 if (IS_ERR(page)) { [all...] |
H A D | ntfs_fs.h | 19 #include <linux/page-flags.h> 29 #include <asm/page.h> 37 struct page; 386 struct page *offs_page; 430 struct ATTRIB **ins_attr, struct page *page); 437 int attr_data_read_resident(struct ntfs_inode *ni, struct page *page); 438 int attr_data_write_resident(struct ntfs_inode *ni, struct page *page); 913 ntfs_unmap_page(struct page *page) argument 922 struct page *page = read_mapping_page(mapping, index, NULL); local [all...] |
H A D | inode.c | 580 err = attr_data_read_resident(ni, &folio->page); 707 struct page *page = &folio->page; local 709 struct address_space *mapping = page->mapping; 715 err = attr_data_read_resident(ni, page); 718 unlock_page(page); 725 err = ni_readpage_cmpr(ni, page); 757 /* Range cross 'valid'. Read it page by page 911 struct page *page = local 944 ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, u32 len, u32 copied, struct page *page, void *fsdata) argument 1015 struct page *page; local 1101 struct page *page = ntfs_map_page(inode->i_mapping, idx); local [all...] |
H A D | file.c | 118 struct page *page; local 144 err = ntfs_write_begin(file, mapping, pos, len, &page, NULL); 148 zero_user_segment(page, zerofrom, PAGE_SIZE); 150 /* This function in any case puts page. */ 151 err = ntfs_write_end(file, mapping, pos, len, len, page, NULL); 539 * Write tail of the last page before removed range since 540 * it will get removed from the page cache below. 548 * when discarding page cache below. 809 struct page **page 818 struct page *page; local 855 struct page *page, **pages = NULL; local [all...] |
/linux-master/fs/netfs/ |
H A D | buffered_write.c | 24 NETFS_STREAMING_WRITE, /* Store incomplete data in non-uptodate page. */ 209 * we hit a nasty deadlock on copying from the same page as 216 * We rely on the page being held onto long enough by the LRU 259 zero_user_segment(&folio->page, 0, offset); 298 zero_user_segment(&folio->page, offset + copied, flen); 503 * Notification that a previously read-only page is about to become writable. 504 * Note that the caller indicates a single page of a multipage folio. 508 struct folio *folio = page_folio(vmf->page); 655 "bad %zx @%llx page %lx %lx\n", 668 /* Need to detach the group pointer if the page did [all...] |