• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/mm/

Lines Matching refs:pages

91 			unsigned long end, pgprot_t prot, struct page ***pages)
99 struct page *page = **pages;
104 (*pages)++;
110 unsigned long end, pgprot_t prot, struct page ***pages)
120 if (vmap_pte_range(pmd, addr, next, prot, pages))
127 unsigned long end, pgprot_t prot, struct page ***pages)
137 if (vmap_pmd_range(pud, addr, next, prot, pages))
143 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
155 err = vmap_pud_range(pgd, addr, next, prot, pages);
220 area->pages = NULL;
341 BUG_ON(!area->pages[i]);
342 __free_page(area->pages[i]);
346 vfree(area->pages);
348 kfree(area->pages);
389 * vmap - map an array of pages into virtually contiguous space
390 * @pages: array of page pointers
391 * @count: number of pages to map
395 * Maps @count pages from @pages into contiguous kernel virtual
398 void *vmap(struct page **pages, unsigned int count,
409 if (map_vm_area(area, prot, &pages)) {
421 struct page **pages;
430 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
433 pages = kmalloc_node(array_size,
437 area->pages = pages;
438 if (!area->pages) {
443 memset(area->pages, 0, array_size);
447 area->pages[i] = alloc_page(gfp_mask);
449 area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
450 if (unlikely(!area->pages[i])) {
451 /* Successfully allocated i pages, free them in __vunmap() */
457 if (map_vm_area(area, prot, &pages))
475 * @prot: protection mask for the allocated pages
478 * Allocate enough pages to cover @size from the page level
507 * Allocate enough pages to cover @size from the page level
547 * Allocate enough pages to cover @size from the page level
567 * Kernel-internal function to allocate enough pages to cover @size
592 * Allocate enough 32bit PA addressable pages to cover @size from the
700 * remap_vmalloc_range - map vmalloc pages to userspace
703 * @pgoff: number of pages into addr before first page to map