Lines Matching refs:gfpflags

2564 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
3233 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
3240 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
3244 nid, gfpflags, &gfpflags);
3268 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { }
3271 static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
3274 return gfp_pfmemalloc_allowed(gfpflags);
3371 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3413 if (unlikely(!pfmemalloc_match(slab, gfpflags)))
3486 pfmemalloc_match(slab, gfpflags))) {
3503 pc.flags = gfpflags;
3525 slab = new_slab(s, gfpflags, node);
3529 slab_out_of_memory(s, gfpflags, node);
3558 if (unlikely(!pfmemalloc_match(slab, gfpflags))) {
3596 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3610 p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size);
3618 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
3663 object = __slab_alloc(s, gfpflags, node, addr, c, orig_size);
3693 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
3699 pc.flags = gfpflags;
3706 slab = new_slab(s, gfpflags, node);
3708 slab_out_of_memory(s, gfpflags, node);
3730 noinline int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
3732 if (__should_failslab(s, gfpflags))
3821 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
3827 s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags);
3831 object = kfence_alloc(s, orig_size, gfpflags);
3835 object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);
3838 init = slab_want_init_on_alloc(gfpflags, s);
3845 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init, orig_size);
3850 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
3852 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_,
3855 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
3862 gfp_t gfpflags)
3864 void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_,
3867 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
3876 * @gfpflags: See kmalloc().
3886 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
3888 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
3890 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node);
3990 void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
3992 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
3995 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
3997 ret = kasan_kmalloc(s, ret, size, gfpflags);
4002 void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
4005 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
4007 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
4009 ret = kasan_kmalloc(s, ret, size, gfpflags);