Lines Matching refs:gfp_mask

2926 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2928 return __should_fail_alloc_page(gfp_mask, order);
3050 unsigned int alloc_flags, gfp_t gfp_mask)
3129 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3137 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3160 /* Must be called after current_gfp_context() which can change gfp_mask */
3161 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
3165 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3176 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3199 !__cpuset_zone_allowed(zone, gfp_mask))
3259 gfp_mask))
3268 gfp_mask)) {
3295 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3315 gfp_mask, alloc_flags, ac->migratetype);
3317 prep_new_page(page, order, gfp_mask, alloc_flags);
3355 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3364 if (!(gfp_mask & __GFP_NOMEMALLOC))
3368 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3371 __show_mem(filter, nodemask, gfp_zone(gfp_mask));
3374 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3380 if ((gfp_mask & __GFP_NOWARN) ||
3382 ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
3389 current->comm, &vaf, gfp_mask, &gfp_mask,
3396 warn_alloc_show_mem(gfp_mask, nodemask);
3400 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3406 page = get_page_from_freelist(gfp_mask, order,
3413 page = get_page_from_freelist(gfp_mask, order,
3420 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3427 .gfp_mask = gfp_mask,
3451 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3471 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
3490 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
3497 if (gfp_mask & __GFP_NOFAIL)
3498 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
3515 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3530 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3547 prep_new_page(page, order, gfp_mask, alloc_flags);
3551 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3640 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3680 static bool __need_reclaim(gfp_t gfp_mask)
3683 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
3690 if (gfp_mask & __GFP_NOLOCKDEP)
3706 void fs_reclaim_acquire(gfp_t gfp_mask)
3708 gfp_mask = current_gfp_context(gfp_mask);
3710 if (__need_reclaim(gfp_mask)) {
3711 if (gfp_mask & __GFP_FS)
3723 void fs_reclaim_release(gfp_t gfp_mask)
3725 gfp_mask = current_gfp_context(gfp_mask);
3727 if (__need_reclaim(gfp_mask)) {
3728 if (gfp_mask & __GFP_FS)
3761 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
3771 fs_reclaim_acquire(gfp_mask);
3774 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3778 fs_reclaim_release(gfp_mask);
3787 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
3796 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
3801 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3820 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
3833 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
3840 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
3859 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
3861 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
3866 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
3883 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
3907 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
3909 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3911 if (gfp_mask & __GFP_MEMALLOC)
3925 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3927 return !!__gfp_pfmemalloc_flags(gfp_mask);
3941 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4046 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4049 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4050 bool can_compact = gfp_compaction_allowed(gfp_mask);
4075 alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
4093 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
4102 wake_all_kswapds(order, gfp_mask, ac);
4108 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4124 && !gfp_pfmemalloc_allowed(gfp_mask)) {
4125 page = __alloc_pages_direct_compact(gfp_mask, order,
4136 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4170 wake_all_kswapds(order, gfp_mask, ac);
4172 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4174 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
4189 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4202 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4208 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4214 if (gfp_mask & __GFP_NORETRY)
4222 !(gfp_mask & __GFP_RETRY_MAYFAIL)))
4225 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4251 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4258 (gfp_mask & __GFP_NOMEMALLOC)))
4280 if (gfp_mask & __GFP_NOFAIL) {
4285 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
4293 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
4301 WARN_ON_ONCE_GFP(costly_order, gfp_mask);
4310 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac);
4318 warn_alloc(gfp_mask, ac->nodemask,
4324 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4329 ac->highest_zoneidx = gfp_zone(gfp_mask);
4330 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4332 ac->migratetype = gfp_migratetype(gfp_mask);
4346 might_alloc(gfp_mask);
4348 if (should_fail_alloc_page(gfp_mask, order))
4351 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
4354 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4618 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
4622 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
4629 unsigned long get_zeroed_page(gfp_t gfp_mask)
4631 return __get_free_page(gfp_mask | __GFP_ZERO);
4690 gfp_t gfp_mask)
4693 gfp_t gfp = gfp_mask;
4696 gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP |
4698 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4730 unsigned int fragsz, gfp_t gfp_mask,
4739 page = __page_frag_cache_refill(nc, gfp_mask);
4837 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
4849 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4854 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
4855 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
4857 addr = __get_free_pages(gfp_mask, order);
4867 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
4874 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
4879 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
4880 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
4882 p = alloc_pages_node(nid, gfp_mask, order);
6253 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
6306 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
6326 * @gfp_mask: GFP mask to use during compaction
6340 unsigned migratetype, gfp_t gfp_mask)
6353 .gfp_mask = current_gfp_context(gfp_mask),
6379 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask);
6466 unsigned long nr_pages, gfp_t gfp_mask)
6471 gfp_mask);
6508 * @gfp_mask: GFP mask to limit search and used during compaction
6526 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
6534 zonelist = node_zonelist(nid, gfp_mask);
6536 gfp_zone(gfp_mask), nodemask) {
6551 gfp_mask);