Searched refs:gfp_mask (Results 51 - 75 of 223) sorted by relevance

123456789

/linux-master/lib/
H A Dts_kmp.c92 gfp_t gfp_mask, int flags)
100 conf = alloc_ts_config(priv_size, gfp_mask);
91 kmp_init(const void *pattern, unsigned int len, gfp_t gfp_mask, int flags) argument
H A Dsg_split.c135 * @gfp_mask: the allocation flag
152 gfp_t gfp_mask)
157 splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask);
170 gfp_mask);
148 sg_split(struct scatterlist *in, const int in_mapped_nents, const off_t skip, const int nb_splits, const size_t *split_sizes, struct scatterlist **out, int *out_mapped_nents, gfp_t gfp_mask) argument
H A Dref_tracker.c192 gfp_t gfp_mask = gfp | __GFP_NOWARN; local
202 gfp_mask |= __GFP_NOFAIL;
203 *trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask);
/linux-master/mm/
H A Dshrinker.c467 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, argument
527 .gfp_mask = gfp_mask,
585 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, argument
594 * @gfp_mask: allocation context
612 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, argument
626 return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
652 .gfp_mask = gfp_mask,
H A Dreadahead.c210 gfp_t gfp_mask = readahead_gfp_mask(mapping); local
221 * gfp_mask, but let's be explicit here.
248 folio = filemap_alloc_folio(gfp_mask, 0);
252 ret = filemap_add_folio(mapping, folio, index + i, gfp_mask);
785 gfp_t gfp_mask = readahead_gfp_mask(mapping); local
797 folio = filemap_alloc_folio(gfp_mask, 0);
800 if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
824 folio = filemap_alloc_folio(gfp_mask, 0);
827 if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) {
H A Dvmalloc.c1817 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node) argument
1831 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1954 int node, gfp_t gfp_mask,
1983 gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1985 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1993 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1998 preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
2057 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
2577 * @gfp_mask
1951 alloc_vmap_area(unsigned long size, unsigned long align, unsigned long vstart, unsigned long vend, int node, gfp_t gfp_mask, unsigned long va_flags, struct vm_struct *vm) argument
2581 new_vmap_block(unsigned int order, gfp_t gfp_mask) argument
2722 vb_alloc(unsigned long size, gfp_t gfp_mask) argument
3073 __get_vm_area_node(unsigned long size, unsigned long align, unsigned long shift, unsigned long flags, unsigned long start, unsigned long end, int node, gfp_t gfp_mask, const void *caller) argument
3604 __vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot, unsigned int page_shift, int node) argument
3738 __vmalloc_node_range_noprof(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, unsigned long vm_flags, int node, const void *caller) argument
3885 __vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask, int node, const void *caller) argument
3900 __vmalloc_noprof(unsigned long size, gfp_t gfp_mask) argument
3938 vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) argument
[all...]
H A Dhugetlb.c1349 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask, argument
1358 zonelist = node_zonelist(nid, gfp_mask);
1362 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1365 if (!cpuset_zone_allowed(zone, gfp_mask))
1397 gfp_t gfp_mask; local
1413 gfp_mask = htlb_alloc_mask(h);
1414 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1417 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1425 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1565 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, argument
1603 alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nodemask) argument
1611 alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nodemask) argument
2175 alloc_buddy_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) argument
2237 __alloc_fresh_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) argument
2271 only_alloc_fresh_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) argument
2291 alloc_fresh_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) argument
2333 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; local
2503 alloc_surplus_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) argument
2544 alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) argument
2576 gfp_t gfp_mask = htlb_alloc_mask(h); local
2598 alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback) argument
3011 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; local
3474 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; local
4902 gfp_t gfp_mask = htlb_alloc_mask(h); local
6640 gfp_t gfp_mask; local
[all...]
H A Dhugetlb_vmemmap.c326 gfp_t gfp_mask = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; local
336 walk.reuse_page = alloc_pages_node(nid, gfp_mask, 0);
383 gfp_t gfp_mask = GFP_KERNEL | __GFP_RETRY_MAYFAIL; local
389 page = alloc_pages_node(nid, gfp_mask, 0);
/linux-master/drivers/gpu/drm/msm/
H A Dmsm_gem_shrinker.c29 if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
31 return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
/linux-master/fs/nfs/blocklayout/
H A Dblocklayout.h176 struct pnfs_device *pdev, gfp_t gfp_mask);
193 struct pnfs_block_volume *b, gfp_t gfp_mask);
/linux-master/fs/nilfs2/
H A Dmdt.h78 int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz);
/linux-master/fs/crypto/
H A Dinline_crypt.c256 * @gfp_mask: memory allocation flags - these must be a waiting mask so that
268 u64 first_lblk, gfp_t gfp_mask)
278 bio_crypt_set_ctx(bio, ci->ci_enc_key.blk_key, dun, gfp_mask);
311 * @gfp_mask: memory allocation flags
318 gfp_t gfp_mask)
324 fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask);
267 fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, u64 first_lblk, gfp_t gfp_mask) argument
316 fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, const struct buffer_head *first_bh, gfp_t gfp_mask) argument
/linux-master/kernel/
H A Dumh.c338 * @gfp_mask: gfp mask for memory allocation
357 char **envp, gfp_t gfp_mask,
363 sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
486 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; local
488 info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
356 call_usermodehelper_setup(const char *path, char **argv, char **envp, gfp_t gfp_mask, int (*init)(struct subprocess_info *info, struct cred *new), void (*cleanup)(struct subprocess_info *info), void *data) argument
/linux-master/net/core/
H A Dskbuff.c272 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask) argument
623 * @gfp_mask: allocation mask
634 * Buffers may only be allocated from interrupts using a @gfp_mask of
637 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, argument
649 gfp_mask |= __GFP_MEMALLOC;
656 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node);
666 data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
705 * @gfp_mask: get_free_pages mask, passed to alloc_skb
715 gfp_t gfp_mask)
729 (gfp_mask
714 __netdev_alloc_skb(struct net_device *dev, unsigned int len, gfp_t gfp_mask) argument
789 gfp_t gfp_mask = GFP_ATOMIC | __GFP_NOWARN; local
1901 skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, gfp_t gfp_mask) argument
1936 skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) argument
2033 skb_clone(struct sk_buff *skb, gfp_t gfp_mask) argument
2113 skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) argument
2158 __pskb_copy_fclone(struct sk_buff *skb, int headroom, gfp_t gfp_mask, bool fclone) argument
2222 pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask) argument
2446 skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t gfp_mask) argument
6488 alloc_skb_with_frags(unsigned long header_len, unsigned long data_len, int order, int *errcode, gfp_t gfp_mask) argument
6544 pskb_carve_inside_header(struct sk_buff *skb, const u32 off, const int headlen, gfp_t gfp_mask) argument
6605 pskb_carve_frag_list(struct sk_buff *skb, struct skb_shared_info *shinfo, int eat, gfp_t gfp_mask) argument
6659 pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, int pos, gfp_t gfp_mask) argument
[all...]
/linux-master/drivers/infiniband/hw/mthca/
H A Dmthca_memfree.c107 static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) argument
115 page = alloc_pages(gfp_mask | __GFP_ZERO, order);
124 int order, gfp_t gfp_mask)
127 gfp_mask);
138 gfp_t gfp_mask, int coherent)
146 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
148 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
160 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
176 cur_order, gfp_mask);
179 cur_order, gfp_mask);
123 mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, int order, gfp_t gfp_mask) argument
137 mthca_alloc_icm(struct mthca_dev *dev, int npages, gfp_t gfp_mask, int coherent) argument
[all...]
/linux-master/drivers/infiniband/core/
H A Dsa_query.c783 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) argument
803 skb = nlmsg_new(len, gfp_mask);
821 gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
1267 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) argument
1293 gfp_mask,
1335 gfp_t gfp_mask)
1342 ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask);
1360 if (!ib_nl_make_request(query, gfp_mask))
1499 * @gfp_mask:GFP mask to use for internal allocations
1520 unsigned long timeout_ms, gfp_t gfp_mask,
1334 send_mad(struct ib_sa_query *query, unsigned long timeout_ms, gfp_t gfp_mask) argument
1516 ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device, u32 port_num, struct sa_path_rec *rec, ib_sa_comp_mask comp_mask, unsigned long timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct sa_path_rec *resp, unsigned int num_paths, void *context), void *context, struct ib_sa_query **sa_query) argument
1642 ib_sa_mcmember_rec_query(struct ib_sa_client *client, struct ib_device *device, u32 port_num, u8 method, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, unsigned long timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_mcmember_rec *resp, void *context), void *context, struct ib_sa_query **sa_query) argument
1733 ib_sa_guid_info_rec_query(struct ib_sa_client *client, struct ib_device *device, u32 port_num, struct ib_sa_guidinfo_rec *rec, ib_sa_comp_mask comp_mask, u8 method, unsigned long timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_guidinfo_rec *resp, void *context), void *context, struct ib_sa_query **sa_query) argument
1887 gfp_t gfp_mask = GFP_KERNEL; local
[all...]
/linux-master/block/
H A Dbio-integrity.c41 * @gfp_mask: Memory allocation mask
49 gfp_t gfp_mask,
60 bip = kmalloc(struct_size(bip, bip_inline_vecs, nr_vecs), gfp_mask);
63 bip = mempool_alloc(&bs->bio_integrity_pool, gfp_mask);
76 &bip->bip_max_vcnt, gfp_mask);
611 * @gfp_mask: Memory allocation mask
616 gfp_t gfp_mask)
623 bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
48 bio_integrity_alloc(struct bio *bio, gfp_t gfp_mask, unsigned int nr_vecs) argument
615 bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask) argument
/linux-master/include/linux/
H A Dbio.h422 blk_opf_t opf, gfp_t gfp_mask,
424 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask);
435 unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask)
437 return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set);
767 gfp_t gfp_mask)
837 sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask);
434 bio_alloc(struct block_device *bdev, unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask) argument
766 bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask) argument
H A Dbtree.h46 * @gfp_mask: gfp mask for the allocation
49 void *btree_alloc(gfp_t gfp_mask, void *pool_data);
H A Dswap.h403 gfp_t gfp_mask, nodemask_t *mask);
409 gfp_t gfp_mask,
412 gfp_t gfp_mask, bool noswap,
539 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) argument
/linux-master/net/sunrpc/auth_gss/
H A Dgss_mech_switch.c357 gfp_t gfp_mask)
359 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), gfp_mask)))
364 *ctx_id, endtime, gfp_mask);
353 gss_import_sec_context(const void *input_token, size_t bufsize, struct gss_api_mech *mech, struct gss_ctx **ctx_id, time64_t *endtime, gfp_t gfp_mask) argument
/linux-master/mm/kmsan/
H A Dhooks.c155 gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO; local
166 shadow = alloc_pages(gfp_mask, 1);
167 origin = alloc_pages(gfp_mask, 1);
/linux-master/drivers/net/ethernet/mellanox/mlx4/
H A Dicm.h81 gfp_t gfp_mask, int coherent);
/linux-master/include/linux/greybus/
H A Dhd.h36 struct gb_message *message, gfp_t gfp_mask);
/linux-master/security/selinux/ss/
H A Dmls.h38 gfp_t gfp_mask);

Completed in 423 milliseconds

123456789