• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/mm/

Lines Matching refs:pages

23  * pcpu_get_pages_and_bitmap - get temp pages array and bitmap
39 * Pointer to temp pages array on success, NULL on failure.
45 static struct page **pages;
47 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
51 if (!pages || !bitmap) {
52 if (may_alloc && !pages)
53 pages = pcpu_mem_alloc(pages_size);
56 if (!pages || !bitmap)
60 memset(pages, 0, pages_size);
64 return pages;
68 * pcpu_free_pages - free pages which were allocated for @chunk
69 * @chunk: chunk pages were allocated for
70 * @pages: array of pages to be freed, indexed by pcpu_page_idx()
75 * Free pages [@page_start and @page_end) in @pages for all units.
76 * The pages were allocated for @chunk.
79 struct page **pages, unsigned long *populated,
87 struct page *page = pages[pcpu_page_idx(cpu, i)];
96 * pcpu_alloc_pages - allocates pages for @chunk
98 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
103 * Allocate pages [@page_start,@page_end) into @pages for all units.
105 * content of @pages and will pass it verbatim to pcpu_map_pages().
108 struct page **pages, unsigned long *populated,
117 struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
121 pcpu_free_pages(chunk, pages, populated,
156 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
158 * @pages: pages array which can be used to pass information to free
163 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
164 * Corresponding elements in @pages were cleared by the caller and can
170 struct page **pages, unsigned long *populated,
182 pages[pcpu_page_idx(cpu, i)] = page;
213 static int __pcpu_map_pages(unsigned long addr, struct page **pages,
217 PAGE_KERNEL, pages);
221 * pcpu_map_pages - map pages into a pcpu_chunk
223 * @pages: pages array containing pages to be mapped
228 * For each cpu, map pages [@page_start,@page_end) into @chunk. The
237 struct page **pages, unsigned long *populated,
245 &pages[pcpu_page_idx(cpu, page_start)],
254 pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
297 * For each cpu, populate and map pages [@page_start,@page_end) into
308 struct page **pages;
313 /* quick path, check whether all pages are already there */
319 /* need to allocate and map pages, this chunk can't be immutable */
322 pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
323 if (!pages)
328 rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
335 rc = pcpu_map_pages(chunk, pages, populated, rs, re);
352 pcpu_unmap_pages(chunk, pages, populated, rs, re);
356 pcpu_free_pages(chunk, pages, populated, rs, re);
367 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
378 struct page **pages;
393 * successful population attempt so the temp pages array must
396 pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
397 BUG_ON(!pages);
403 pcpu_unmap_pages(chunk, pages, populated, rs, re);
408 pcpu_free_pages(chunk, pages, populated, rs, re);