/linux-master/lib/ |
H A D | textsearch.c | 250 * @gfp_mask: allocation mask 264 unsigned int len, gfp_t gfp_mask, int flags) 289 conf = ops->init(pattern, len, gfp_mask, flags); 263 textsearch_prepare(const char *algo, const void *pattern, unsigned int len, gfp_t gfp_mask, int flags) argument
|
H A D | ts_bm.c | 161 gfp_t gfp_mask, int flags) 169 conf = alloc_ts_config(priv_size, gfp_mask); 160 bm_init(const void *pattern, unsigned int len, gfp_t gfp_mask, int flags) argument
|
H A D | ts_fsm.c | 256 gfp_t gfp_mask, int flags) 282 conf = alloc_ts_config(priv_size, gfp_mask); 255 fsm_init(const void *pattern, unsigned int len, gfp_t gfp_mask, int flags) argument
|
H A D | ts_kmp.c | 92 gfp_t gfp_mask, int flags) 100 conf = alloc_ts_config(priv_size, gfp_mask); 91 kmp_init(const void *pattern, unsigned int len, gfp_t gfp_mask, int flags) argument
|
/linux-master/mm/ |
H A D | compaction.c | 835 if (cc->gfp_mask & __GFP_FS) { 1148 if (!(cc->gfp_mask & __GFP_FS) && mapping) 2548 cc->migratetype = gfp_migratetype(cc->gfp_mask); 2762 gfp_t gfp_mask, enum compact_priority prio, 2770 .gfp_mask = gfp_mask, 2817 * @gfp_mask: The GFP mask of the current allocation 2826 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, argument 2834 if (!gfp_compaction_allowed(gfp_mask)) 2837 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, pri 2761 compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, enum compact_priority prio, unsigned int alloc_flags, int highest_zoneidx, struct page **capture) argument [all...] |
H A D | fail_page_alloc.c | 24 bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) argument 30 if (gfp_mask & __GFP_NOFAIL) 32 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 35 (gfp_mask & __GFP_DIRECT_RECLAIM)) 39 if (gfp_mask & __GFP_NOWARN)
|
H A D | filemap.c | 3301 vmf->gfp_mask);
|
H A D | gup.c | 2146 .gfp_mask = GFP_USER | __GFP_NOWARN,
|
H A D | hugetlb.c | 1349 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask, argument 1358 zonelist = node_zonelist(nid, gfp_mask); 1362 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { 1365 if (!cpuset_zone_allowed(zone, gfp_mask)) 1397 gfp_t gfp_mask; local 1413 gfp_mask = htlb_alloc_mask(h); 1414 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 1417 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1425 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1565 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, argument 1603 alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nodemask) argument 1611 alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nodemask) argument 2182 alloc_buddy_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) argument 2245 __alloc_fresh_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) argument 2279 only_alloc_fresh_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) argument 2299 alloc_fresh_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) argument 2341 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; local 2512 alloc_surplus_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) argument 2553 alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) argument 2585 gfp_t gfp_mask = htlb_alloc_mask(h); local 2607 alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask, gfp_t gfp_mask) argument 3016 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; local 3479 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; local 4907 gfp_t gfp_mask = htlb_alloc_mask(h); local 6641 gfp_t gfp_mask; local [all...] |
H A D | hugetlb_vmemmap.c | 326 gfp_t gfp_mask = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; local 336 walk.reuse_page = alloc_pages_node(nid, gfp_mask, 0); 383 gfp_t gfp_mask = GFP_KERNEL | __GFP_RETRY_MAYFAIL; local 389 page = alloc_pages_node(nid, gfp_mask, 0);
|
H A D | internal.h | 597 const gfp_t gfp_mask; /* gfp mask of a direct compactor */ member in struct:compact_control 1053 gfp_t gfp_mask; member in struct:migration_target_control 1328 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
|
H A D | memcontrol.c | 1795 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, argument 1802 .gfp_mask = gfp_mask, 1826 gfp_t gfp_mask, 1864 total += mem_cgroup_shrink_node(victim, gfp_mask, false, 2462 gfp_t gfp_mask) 2477 gfp_mask, 2636 void mem_cgroup_handle_over_high(gfp_t gfp_mask) argument 2675 gfp_mask); 2729 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, argument 1824 mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, pg_data_t *pgdat, gfp_t gfp_mask, unsigned long *total_scanned) argument 2460 reclaim_high(struct mem_cgroup *memcg, unsigned int nr_pages, gfp_t gfp_mask) argument 2925 try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, unsigned int nr_pages) argument 3724 mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned) argument 7668 mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, gfp_t gfp_mask) argument [all...] |
H A D | memory-failure.c | 2680 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
|
H A D | memory.c | 5354 .gfp_mask = __get_fault_gfp_mask(vma),
|
H A D | memory_hotplug.c | 1843 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
|
H A D | mempolicy.c | 1072 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
|
H A D | mempool.c | 197 gfp_t gfp_mask, int node_id) 207 gfp_mask, node_id); 217 element = pool->alloc(gfp_mask, pool->pool_data); 278 gfp_t gfp_mask, int node_id) 282 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); 287 gfp_mask, node_id)) { 380 * @gfp_mask: the usual allocation bitmask. 390 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) argument 397 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); 398 might_alloc(gfp_mask); 195 mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id) argument 276 mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id) argument 561 mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) argument 580 mempool_kmalloc(gfp_t gfp_mask, void *pool_data) argument 593 mempool_kvmalloc(gfp_t gfp_mask, void *pool_data) argument 610 mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) argument [all...] |
H A D | migrate.c | 2009 gfp_t gfp_mask; local 2015 gfp_mask = mtc->gfp_mask; 2023 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); 2025 mtc->nmask, gfp_mask); 2033 gfp_mask &= ~__GFP_RECLAIM; 2034 gfp_mask |= GFP_TRANSHUGE; 2039 gfp_mask |= __GFP_HIGHMEM; 2041 return __folio_alloc(gfp_mask, orde [all...] |
H A D | nommu.c | 140 void *__vmalloc(unsigned long size, gfp_t gfp_mask) argument 146 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); 151 unsigned long start, unsigned long end, gfp_t gfp_mask, 155 return __vmalloc(size, gfp_mask); 158 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, argument 161 return __vmalloc(size, gfp_mask); 226 void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc); 150 __vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, unsigned long vm_flags, int node, const void *caller) argument
|
H A D | oom_kill.c | 256 enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask); 278 if (oc->gfp_mask & __GFP_THISNODE) 297 if (!cpuset_zone_allowed(zone, oc->gfp_mask)) 456 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n", 457 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order, 466 __show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask, gfp_zone(oc->gfp_mask)); 1138 if (!(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
|
H A D | page_alloc.c | 2926 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) argument 2928 return __should_fail_alloc_page(gfp_mask, order); 3050 unsigned int alloc_flags, gfp_t gfp_mask) 3129 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) argument 3137 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3160 /* Must be called after current_gfp_context() which can change gfp_mask */ 3161 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, argument 3165 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3176 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, argument 3199 !__cpuset_zone_allowed(zone, gfp_mask)) 3048 zone_watermark_fast(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags, gfp_t gfp_mask) argument 3355 warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) argument 3374 warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) argument 3400 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac) argument 3420 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac, unsigned long *did_some_progress) argument 3515 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) argument 3640 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) argument 3680 __need_reclaim(gfp_t gfp_mask) argument 3706 fs_reclaim_acquire(gfp_t gfp_mask) argument 3723 fs_reclaim_release(gfp_t gfp_mask) argument 3761 __perform_reclaim(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac) argument 3787 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, unsigned long *did_some_progress) argument 3820 wake_all_kswapds(unsigned int order, gfp_t gfp_mask, const struct alloc_context *ac) argument 3840 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) argument 3907 __gfp_pfmemalloc_flags(gfp_t gfp_mask) argument 3925 gfp_pfmemalloc_allowed(gfp_t gfp_mask) argument 3941 should_reclaim_retry(gfp_t gfp_mask, unsigned order, struct alloc_context *ac, int alloc_flags, bool did_some_progress, int *no_progress_loops) argument 4046 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct alloc_context *ac) argument 4324 prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask, struct alloc_context *ac, gfp_t *alloc_gfp, unsigned int *alloc_flags) argument 4618 __get_free_pages(gfp_t gfp_mask, unsigned int order) argument 4629 get_zeroed_page(gfp_t gfp_mask) argument 4689 __page_frag_cache_refill(struct page_frag_cache *nc, gfp_t gfp_mask) argument 4729 __page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask, unsigned int align_mask) argument 4849 alloc_pages_exact(size_t size, gfp_t gfp_mask) argument 4874 alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) argument 6339 alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype, gfp_t gfp_mask) argument 6465 __alloc_contig_pages(unsigned long start_pfn, unsigned long nr_pages, gfp_t gfp_mask) argument 6526 alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, int nid, nodemask_t *nodemask) argument [all...] |
H A D | page_isolation.c | 417 .gfp_mask = gfp_flags,
|
H A D | page_owner.c | 27 gfp_t gfp_mask; member in struct:page_owner 166 gfp_t gfp_mask) 171 /* Filter gfp_mask the same way stackdepot does, for consistency */ 172 gfp_mask &= ~GFP_ZONEMASK; 173 gfp_mask &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP); 174 gfp_mask |= __GFP_NOWARN; 177 stack = kmalloc(sizeof(*stack), gfp_mask); 199 static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask, argument 219 add_stack_record_to_list(stack_record, gfp_mask); 240 gfp_t gfp_mask, 165 add_stack_record_to_list(struct stack_record *stack_record, gfp_t gfp_mask) argument 237 __update_page_owner_handle(struct page_ext *page_ext, depot_stack_handle_t handle, unsigned short order, gfp_t gfp_mask, short last_migrate_reason, u64 ts_nsec, pid_t pid, pid_t tgid, char *comm) argument 318 __set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask) argument 603 gfp_t gfp_mask; local [all...] |
/linux-master/mm/kasan/ |
H A D | shadow.c | 581 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) argument 614 kmemleak_vmalloc(vm, size, gfp_mask);
|
/linux-master/mm/kmsan/ |
H A D | hooks.c | 155 gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO; local 166 shadow = alloc_pages(gfp_mask, 1); 167 origin = alloc_pages(gfp_mask, 1);
|