Lines Matching defs:page

100 	struct page *page;
110 page = pfn_to_page(pfn);
111 dump_page(page, "remapping already mapped page");
436 * successfully (and before the addresses are expected to cause a page fault
487 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
501 struct page *page = pages[*nr];
505 if (WARN_ON(!page))
507 if (WARN_ON(!pfn_valid(page_to_pfn(page))))
510 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
518 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
536 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
554 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
572 pgprot_t prot, struct page **pages)
608 pgprot_t prot, struct page **pages, unsigned int page_shift)
634 pgprot_t prot, struct page **pages, unsigned int page_shift)
648 * @prot: page protection flags to use
657 pgprot_t prot, struct page **pages, unsigned int page_shift)
692 unsigned long end, struct page **pages)
735 * Walk a vmap address to the struct page it maps. Huge vmap mappings will
736 * return the tail page that corresponds to the base page address, which
739 struct page *vmalloc_to_page(const void *vmalloc_addr)
742 struct page *page = NULL;
789 page = pte_page(pte);
791 return page;
796 * Map a vmalloc()-space virtual address to the physical page frame number.
2081 * There is a tradeoff here: a larger number will cover more kernel page tables
2577 * @gfp_mask: flags for the page level allocator
2876 * to amortize TLB flushing overheads. What this means is that any page you
2879 * still referencing that page (additional to the regular 1:1 kernel mapping).
2943 void *vm_map_ram(struct page **pages, unsigned int count, int node)
3214 int (*set_direct_map)(struct page *page))
3338 struct page *page = vm->pages[i];
3340 BUG_ON(!page);
3341 mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
3346 __free_page(page);
3360 * which was created from the page array passed to vmap().
3385 * @pages: array of page pointers
3388 * @prot: page protection for the mapping
3398 void *vmap(struct page **pages, unsigned int count,
3467 * @prot: page protection for the mapping
3497 unsigned int order, unsigned int nr_pages, struct page **pages)
3502 struct page *page;
3507 * the page array is partly or not at all populated due
3508 * to fails, fallback to a single page allocator that is
3546 * fallback to a single page allocator.
3567 page = alloc_pages_noprof(alloc_gfp, order);
3569 page = alloc_pages_node_noprof(nid, alloc_gfp, order);
3570 if (unlikely(!page)) {
3583 * small-page vmallocs). Some drivers do their own refcounting
3584 * on vmalloc_to_page() pages, some use page->mapping,
3585 * page->lru, etc.
3588 split_page(page, order);
3591 * Careful, we allocate and map page-order pages, but
3592 * tracking is done per PAGE_SIZE page so as to keep the
3596 pages[nr_allocated + i] = page + i;
3619 array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
3634 "vmalloc error: size %lu, failed to allocated page array size %lu",
3664 * - insufficient huge page-order pages
3666 * Since we always retry allocations at order-0 in the huge page
3677 * page tables allocations ignore external gfp mask, enforce it
3717 * @gfp_mask: flags for the page level allocator
3723 * Allocate enough pages to cover @size from the page level
3871 * @gfp_mask: flags for the page level allocator
3875 * Allocate enough pages to cover @size from the page level allocator with
3912 * Allocate enough pages to cover @size from the page level
3915 * For tight control over page level allocator and protection flags
3930 * @gfp_mask: flags for the page level allocator
3932 * Allocate enough pages to cover @size from the page level
3951 * Allocate enough pages to cover @size from the page level
3955 * For tight control over page level allocator and protection flags
3990 * Allocate enough pages to cover @size from the page level
3993 * For tight control over page level allocator and protection flags
4010 * Allocate enough pages to cover @size from the page level
4040 * page level allocator and map them into contiguous kernel virtual space.
4094 * If the page is not present, fill zero.
4102 struct page *page;
4112 page = vmalloc_to_page(addr);
4121 if (page)
4122 copied = copy_page_to_iter_nofault(page, offset,
4393 struct page *page = vmalloc_to_page(kaddr);
4396 ret = vm_insert_page(vma, uaddr, page);
4414 * @pgoff: number of pages into addr before first page to map
4818 pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",