Searched refs:gfp_mask (Results 1 - 25 of 225) sorted by relevance

123456789

/linux-master/fs/btrfs/
H A Dulist.h49 struct ulist *ulist_alloc(gfp_t gfp_mask);
51 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
53 u64 *old_aux, gfp_t gfp_mask);
58 void **old_aux, gfp_t gfp_mask)
62 int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask);
66 return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask);
57 ulist_add_merge_ptr(struct ulist *ulist, u64 val, void *aux, void **old_aux, gfp_t gfp_mask) argument
H A Dulist.c92 * @gfp_mask: allocation flags to for base allocation
96 struct ulist *ulist_alloc(gfp_t gfp_mask) argument
98 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
177 * @gfp_mask: flags to use for allocation
192 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) argument
194 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
198 u64 *old_aux, gfp_t gfp_mask)
209 node = kmalloc(sizeof(*node), gfp_mask);
197 ulist_add_merge(struct ulist *ulist, u64 val, u64 aux, u64 *old_aux, gfp_t gfp_mask) argument
/linux-master/include/linux/
H A Dgfp.h158 * We get the zone list from the current node and the gfp_mask.
214 static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) argument
216 gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN);
224 pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node);
233 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) argument
236 warn_if_node_offline(nid, gfp_mask);
238 return __alloc_pages(gfp_mask, order, nid, NULL);
255 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, argument
261 return __alloc_pages_node(nid, gfp_mask, order);
272 static inline struct page *alloc_pages(gfp_t gfp_mask, unsigne argument
319 page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask, unsigned int align) argument
327 page_frag_alloc(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask) argument
368 gfp_compaction_allowed(gfp_t gfp_mask) argument
[all...]
H A Dmempool.h13 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
41 gfp_t gfp_mask, int node_id);
49 gfp_t gfp_mask, int nid);
53 extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
62 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
83 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
98 void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data);
115 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
H A Dblk-crypto.h84 gfp_t gfp_mask);
115 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
120 * @gfp_mask: memory allocation flags
125 * @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
128 gfp_t gfp_mask)
131 return __bio_crypt_clone(dst, src, gfp_mask);
127 bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) argument
H A Dpage_owner.h13 unsigned short order, gfp_t gfp_mask);
29 unsigned short order, gfp_t gfp_mask)
32 __set_page_owner(page, order, gfp_mask);
61 unsigned short order, gfp_t gfp_mask)
28 set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask) argument
60 set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask) argument
H A Dcpuset.h86 extern bool cpuset_node_allowed(int node, gfp_t gfp_mask);
88 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) argument
90 return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
93 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) argument
96 return __cpuset_zone_allowed(z, gfp_mask);
224 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) argument
229 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) argument
H A Dfault-inject.h94 bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
97 bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
99 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) argument
H A Dconnector.h92 * @gfp_mask: GFP mask.
102 u32 group, gfp_t gfp_mask,
120 * @gfp_mask: GFP mask.
127 int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
/linux-master/mm/
H A Dfail_page_alloc.c24 bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) argument
30 if (gfp_mask & __GFP_NOFAIL)
32 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
35 (gfp_mask & __GFP_DIRECT_RECLAIM))
39 if (gfp_mask & __GFP_NOWARN)
H A Dmempool.c197 gfp_t gfp_mask, int node_id)
207 gfp_mask, node_id);
217 element = pool->alloc(gfp_mask, pool->pool_data);
278 gfp_t gfp_mask, int node_id)
282 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
287 gfp_mask, node_id)) {
380 * @gfp_mask: the usual allocation bitmask.
390 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) argument
397 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
398 might_alloc(gfp_mask);
195 mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id) argument
276 mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id) argument
561 mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) argument
580 mempool_kmalloc(gfp_t gfp_mask, void *pool_data) argument
593 mempool_kvmalloc(gfp_t gfp_mask, void *pool_data) argument
610 mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) argument
[all...]
H A Dpage_alloc.c2926 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) argument
2928 return __should_fail_alloc_page(gfp_mask, order);
3050 unsigned int alloc_flags, gfp_t gfp_mask)
3129 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) argument
3137 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3160 /* Must be called after current_gfp_context() which can change gfp_mask */
3161 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, argument
3165 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3176 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, argument
3199 !__cpuset_zone_allowed(zone, gfp_mask))
3048 zone_watermark_fast(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags, gfp_t gfp_mask) argument
3355 warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) argument
3374 warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) argument
3400 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac) argument
3420 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac, unsigned long *did_some_progress) argument
3515 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) argument
3640 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) argument
3680 __need_reclaim(gfp_t gfp_mask) argument
3706 fs_reclaim_acquire(gfp_t gfp_mask) argument
3723 fs_reclaim_release(gfp_t gfp_mask) argument
3761 __perform_reclaim(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac) argument
3787 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, unsigned long *did_some_progress) argument
3820 wake_all_kswapds(unsigned int order, gfp_t gfp_mask, const struct alloc_context *ac) argument
3840 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) argument
3907 __gfp_pfmemalloc_flags(gfp_t gfp_mask) argument
3925 gfp_pfmemalloc_allowed(gfp_t gfp_mask) argument
3941 should_reclaim_retry(gfp_t gfp_mask, unsigned order, struct alloc_context *ac, int alloc_flags, bool did_some_progress, int *no_progress_loops) argument
4046 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct alloc_context *ac) argument
4324 prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask, struct alloc_context *ac, gfp_t *alloc_gfp, unsigned int *alloc_flags) argument
4618 __get_free_pages(gfp_t gfp_mask, unsigned int order) argument
4629 get_zeroed_page(gfp_t gfp_mask) argument
4689 __page_frag_cache_refill(struct page_frag_cache *nc, gfp_t gfp_mask) argument
4729 __page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask, unsigned int align_mask) argument
4849 alloc_pages_exact(size_t size, gfp_t gfp_mask) argument
4874 alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) argument
6339 alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype, gfp_t gfp_mask) argument
6465 __alloc_contig_pages(unsigned long start_pfn, unsigned long nr_pages, gfp_t gfp_mask) argument
6526 alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, int nid, nodemask_t *nodemask) argument
[all...]
H A Dpage_owner.c27 gfp_t gfp_mask; member in struct:page_owner
166 gfp_t gfp_mask)
171 /* Filter gfp_mask the same way stackdepot does, for consistency */
172 gfp_mask &= ~GFP_ZONEMASK;
173 gfp_mask &= (GFP_ATOMIC | GFP_KERNEL);
174 gfp_mask |= __GFP_NOWARN;
177 stack = kmalloc(sizeof(*stack), gfp_mask);
199 static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask, argument
219 add_stack_record_to_list(stack_record, gfp_mask);
240 gfp_t gfp_mask,
165 add_stack_record_to_list(struct stack_record *stack_record, gfp_t gfp_mask) argument
237 __update_page_owner_handle(struct page_ext *page_ext, depot_stack_handle_t handle, unsigned short order, gfp_t gfp_mask, short last_migrate_reason, u64 ts_nsec, pid_t pid, pid_t tgid, char *comm) argument
318 __set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask) argument
603 gfp_t gfp_mask; local
[all...]
H A Dswap.h50 struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
85 gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx)
90 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, argument
129 gfp_t gfp_mask, void **shadowp)
84 swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) argument
128 add_to_swap_cache(struct folio *folio, swp_entry_t entry, gfp_t gfp_mask, void **shadowp) argument
/linux-master/block/
H A Dblk-lib.c39 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
67 bio = blk_next_bio(bio, bdev, 0, REQ_OP_DISCARD, gfp_mask);
92 * @gfp_mask: memory allocation flags (for bio_alloc)
98 sector_t nr_sects, gfp_t gfp_mask)
105 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio);
119 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
137 bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
166 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
178 REQ_OP_WRITE, gfp_mask);
201 * @gfp_mask
38 __blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) argument
97 blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask) argument
118 __blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags) argument
165 __blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) argument
215 __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags) argument
249 blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned flags) argument
[all...]
H A Dblk-map.c22 gfp_t gfp_mask)
29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
132 struct iov_iter *iter, gfp_t gfp_mask)
142 bmd = bio_alloc_map_data(iter, gfp_mask);
157 bio = bio_kmalloc(nr_pages, gfp_mask);
185 page = alloc_page(GFP_NOIO | gfp_mask);
254 unsigned int nr_vecs, gfp_t gfp_mask)
259 bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask,
264 bio = bio_kmalloc(nr_vecs, gfp_mask);
273 gfp_t gfp_mask)
21 bio_alloc_map_data(struct iov_iter *data, gfp_t gfp_mask) argument
131 bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, struct iov_iter *iter, gfp_t gfp_mask) argument
253 blk_rq_map_bio_alloc(struct request *rq, unsigned int nr_vecs, gfp_t gfp_mask) argument
272 bio_map_user_iov(struct request *rq, struct iov_iter *iter, gfp_t gfp_mask) argument
389 bio_map_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask) argument
474 bio_copy_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask, int reading) argument
632 blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, const struct iov_iter *iter, gfp_t gfp_mask) argument
687 blk_rq_map_user(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, void __user *ubuf, unsigned long len, gfp_t gfp_mask) argument
701 blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data, void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask, bool vec, int iov_count, bool check_iter_count, int rw) argument
782 blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) argument
[all...]
H A Dblk-crypto.c92 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask)
97 * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so
100 WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM));
102 bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
116 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) argument
118 dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
304 gfp_t gfp_mask)
307 rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
91 bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key, const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) argument
303 __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, gfp_t gfp_mask) argument
/linux-master/drivers/infiniband/core/
H A Dsa.h55 unsigned long timeout_ms, gfp_t gfp_mask,
/linux-master/fs/nfs/blocklayout/
H A Ddev.c231 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask);
236 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
242 dev = bl_resolve_deviceid(server, v, gfp_mask);
327 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
385 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
390 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask);
401 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
408 sizeof(struct pnfs_block_dev), gfp_mask);
414 volumes, v->concat.volumes[i], gfp_mask);
430 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
235 bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) argument
326 bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) argument
384 bl_parse_slice(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) argument
400 bl_parse_concat(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) argument
429 bl_parse_stripe(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) argument
458 bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) argument
479 bl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, gfp_t gfp_mask) argument
[all...]
/linux-master/drivers/net/wireless/ath/
H A Dmain.c31 gfp_t gfp_mask)
49 skb = __dev_alloc_skb(len + common->cachelsz - 1, gfp_mask);
29 ath_rxbuf_alloc(struct ath_common *common, u32 len, gfp_t gfp_mask) argument
/linux-master/include/linux/sched/
H A Dmm.h267 extern void fs_reclaim_acquire(gfp_t gfp_mask);
268 extern void fs_reclaim_release(gfp_t gfp_mask);
272 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } argument
273 static inline void fs_reclaim_release(gfp_t gfp_mask) { } argument
304 * @gfp_mask: gfp_t flags that would be used to allocate
310 static inline void might_alloc(gfp_t gfp_mask) argument
312 fs_reclaim_acquire(gfp_mask);
313 fs_reclaim_release(gfp_mask);
315 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
/linux-master/lib/
H A Dgeneric-radix-tree.c80 static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask) argument
82 return kzalloc(GENRADIX_NODE_SIZE, gfp_mask);
95 gfp_t gfp_mask)
112 new_node = genradix_alloc_node(gfp_mask);
135 new_node = genradix_alloc_node(gfp_mask);
277 gfp_t gfp_mask)
282 if (!__genradix_ptr_alloc(radix, offset, gfp_mask))
94 __genradix_ptr_alloc(struct __genradix *radix, size_t offset, gfp_t gfp_mask) argument
276 __genradix_prealloc(struct __genradix *radix, size_t size, gfp_t gfp_mask) argument
/linux-master/drivers/net/ethernet/mellanox/mlx4/
H A Dicm.c99 gfp_t gfp_mask, int node)
103 page = alloc_pages_node(node, gfp_mask, order);
105 page = alloc_pages(gfp_mask, order);
115 int order, gfp_t gfp_mask)
118 &buf->dma_addr, gfp_mask);
133 gfp_t gfp_mask, int coherent)
142 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
149 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
162 gfp_mask
98 mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask, int node) argument
114 mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf, int order, gfp_t gfp_mask) argument
132 mlx4_alloc_icm(struct mlx4_dev *dev, int npages, gfp_t gfp_mask, int coherent) argument
[all...]
/linux-master/drivers/net/vmxnet3/
H A Dvmxnet3_xdp.h40 gfp_t gfp_mask);
/linux-master/fs/netfs/
H A Dmisc.c17 gfp_t gfp_mask)
28 if (!xas_nomem(&xas, gfp_mask))
48 pgoff_t index, pgoff_t to, gfp_t gfp_mask)
63 NETFS_FLAG_PUT_MARK, gfp_mask);
15 netfs_xa_store_and_mark(struct xarray *xa, unsigned long index, struct folio *folio, unsigned int flags, gfp_t gfp_mask) argument
46 netfs_add_folios_to_buffer(struct xarray *buffer, struct address_space *mapping, pgoff_t index, pgoff_t to, gfp_t gfp_mask) argument

Completed in 555 milliseconds

123456789