Lines Matching refs:page

14  * "current->executable", and page faults do the actual loading. Clean.
199 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
202 struct page *page;
229 &page, NULL);
237 return page;
240 static void put_arg_page(struct page *page)
242 put_page(page);
250 struct page *page)
252 flush_cache_page(bprm->vma, pos, page_to_pfn(page));
320 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
323 struct page *page;
325 page = bprm->page[pos / PAGE_SIZE];
326 if (!page && write) {
327 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
328 if (!page)
330 bprm->page[pos / PAGE_SIZE] = page;
333 return page;
336 static void put_arg_page(struct page *page)
342 if (bprm->page[i]) {
343 __free_page(bprm->page[i]);
344 bprm->page[i] = NULL;
357 struct page *page)
533 * ensures the destination page is created and not swapped out.
538 struct page *kmapped_page = NULL;
593 struct page *page;
595 page = get_arg_page(bprm, pos, 1);
596 if (!page) {
606 kmapped_page = page;
649 struct page *page;
655 page = get_arg_page(bprm, pos, 1);
656 if (!page)
658 flush_arg_page(bprm, pos & PAGE_MASK, page);
659 memcpy_to_page(page, offset_in_page(pos), arg, bytes_to_copy);
660 put_arg_page(page);
691 * 3) Move vma's page tables to the new range.
724 * move the page tables downwards, on failure we rely on
860 * Align this down to a page boundary as expand_stack
900 char *src = kmap_local_page(bprm->page[index]) + offset;
1744 struct page *page;
1751 page = get_arg_page(bprm, bprm->p, 0);
1752 if (!page)
1754 kaddr = kmap_local_page(page);
1761 put_arg_page(page);