Lines Matching refs:gfp_mask

158  * We get the zone list from the current node and the gfp_mask.
214 static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
216 gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN);
224 pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node);
233 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
236 warn_if_node_offline(nid, gfp_mask);
238 return __alloc_pages(gfp_mask, order, nid, NULL);
255 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
261 return __alloc_pages_node(nid, gfp_mask, order);
272 static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
274 return alloc_pages_node(numa_node_id(), gfp_mask, order);
288 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
297 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
298 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
300 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1);
302 __meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
304 #define __get_free_page(gfp_mask) \
305 __get_free_pages((gfp_mask), 0)
307 #define __get_dma_pages(gfp_mask, order) \
308 __get_free_pages((gfp_mask) | GFP_DMA, (order))
317 gfp_t gfp_mask, unsigned int align_mask);
320 unsigned int fragsz, gfp_t gfp_mask,
324 return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align);
328 unsigned int fragsz, gfp_t gfp_mask)
330 return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
356 /* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
357 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
368 static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
370 return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO);
378 unsigned migratetype, gfp_t gfp_mask);
379 extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,