Searched refs:gfp_mask (Results 26 - 50 of 225) sorted by relevance

123456789

/linux-master/net/sunrpc/auth_gss/
H A Dgss_krb5_keys.c152 const struct xdr_netobj *in_constant, gfp_t gfp_mask)
174 inblockdata = kmalloc(blocksize, gfp_mask);
178 outblockdata = kmalloc(blocksize, gfp_mask);
260 * @gfp_mask: memory allocation control flags
271 gfp_t gfp_mask)
277 inblock.data = kmalloc(inblock.len, gfp_mask);
281 ret = krb5_DK(gk5e, inkey, inblock.data, label, gfp_mask);
349 * @gfp_mask: memory allocation control flags
372 gfp_t gfp_mask)
401 step.data = kzalloc(step.len, gfp_mask);
150 krb5_DK(const struct gss_krb5_enctype *gk5e, const struct xdr_netobj *inkey, u8 *rawkey, const struct xdr_netobj *in_constant, gfp_t gfp_mask) argument
267 krb5_derive_key_v2(const struct gss_krb5_enctype *gk5e, const struct xdr_netobj *inkey, struct xdr_netobj *outkey, const struct xdr_netobj *label, gfp_t gfp_mask) argument
368 krb5_kdf_feedback_cmac(const struct gss_krb5_enctype *gk5e, const struct xdr_netobj *inkey, struct xdr_netobj *outkey, const struct xdr_netobj *constant, gfp_t gfp_mask) argument
500 krb5_kdf_hmac_sha2(const struct gss_krb5_enctype *gk5e, const struct xdr_netobj *inkey, struct xdr_netobj *outkey, const struct xdr_netobj *label, gfp_t gfp_mask) argument
[all...]
H A Dgss_krb5_internal.h40 gfp_t gfp_mask);
110 gfp_t gfp_mask);
116 gfp_t gfp_mask);
122 gfp_t gfp_mask);
131 * @gfp_mask: memory allocation control flags
141 u32 usage, u8 seed, gfp_t gfp_mask)
153 return gk5e->derive_key(gk5e, inkey, outkey, &label, gfp_mask);
138 krb5_derive_key(struct krb5_ctx *kctx, const struct xdr_netobj *inkey, struct xdr_netobj *outkey, u32 usage, u8 seed, gfp_t gfp_mask) argument
H A Dgss_krb5_mech.c297 gss_krb5_import_ctx_v2(struct krb5_ctx *ctx, gfp_t gfp_mask) argument
306 keyout.data = kmalloc(GSS_KRB5_MAX_KEYLEN, gfp_mask);
313 KEY_USAGE_SEED_ENCRYPTION, gfp_mask))
329 KEY_USAGE_SEED_ENCRYPTION, gfp_mask))
346 KEY_USAGE_SEED_CHECKSUM, gfp_mask))
354 KEY_USAGE_SEED_CHECKSUM, gfp_mask))
363 KEY_USAGE_SEED_INTEGRITY, gfp_mask))
371 KEY_USAGE_SEED_INTEGRITY, gfp_mask))
396 gfp_t gfp_mask)
447 gss_kerberos_mech.gm_oid.len, gfp_mask);
395 gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, gfp_t gfp_mask) argument
469 gss_krb5_import_sec_context(const void *p, size_t len, struct gss_ctx *ctx_id, time64_t *endtime, gfp_t gfp_mask) argument
[all...]
/linux-master/include/linux/
H A Dtextsearch.h163 gfp_t gfp_mask)
167 conf = kzalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask);
162 alloc_ts_config(size_t payload, gfp_t gfp_mask) argument
H A Doom.h40 const gfp_t gfp_mask; member in struct:oom_control
H A Dcompaction.h87 extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
H A Dumh.h38 gfp_t gfp_mask,
H A Dshrinker.h35 gfp_t gfp_mask; member in struct:shrink_control
H A Dvmalloc.h148 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
150 unsigned long start, unsigned long end, gfp_t gfp_mask,
153 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
155 void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
H A Dscatterlist.h429 gfp_t gfp_mask);
445 unsigned int left_pages, gfp_t gfp_mask);
449 unsigned int max_segment, gfp_t gfp_mask);
459 * @gfp_mask: GFP allocation mask
475 unsigned long size, gfp_t gfp_mask)
478 size, UINT_MAX, gfp_mask);
471 sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, gfp_t gfp_mask) argument
H A DmISDNif.h537 mI_alloc_skb(unsigned int len, gfp_t gfp_mask) argument
541 skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask);
548 _alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) argument
550 struct sk_buff *skb = mI_alloc_skb(len, gfp_mask);
565 u_int id, u_int len, void *dp, gfp_t gfp_mask)
571 skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask);
564 _queue_data(struct mISDNchannel *ch, u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) argument
/linux-master/drivers/connector/
H A Dconnector.c62 gfp_t gfp_mask, netlink_filter_fn filter,
97 skb = nlmsg_new(size, gfp_mask);
115 gfp_mask, filter,
118 !gfpflags_allow_blocking(gfp_mask));
124 gfp_t gfp_mask)
126 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask,
61 cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group, gfp_t gfp_mask, netlink_filter_fn filter, void *filter_data) argument
123 cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group, gfp_t gfp_mask) argument
/linux-master/block/
H A Dblk-crypto-internal.h189 gfp_t gfp_mask);
195 * @gfp_mask: Memory allocation flags
198 * @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
201 gfp_t gfp_mask)
204 return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
200 blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, gfp_t gfp_mask) argument
H A Dbio.c183 gfp_t gfp_mask)
204 bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
205 if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
210 return mempool_alloc(pool, gfp_mask);
463 * @gfp_mask: the GFP_* mask given to the slab allocator
493 blk_opf_t opf, gfp_t gfp_mask,
496 gfp_t saved_gfp = gfp_mask;
507 gfp_mask, bs);
541 gfp_mask &= ~__GFP_DIRECT_RECLAIM;
543 p = mempool_alloc(&bs->bio_pool, gfp_mask);
182 bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, gfp_t gfp_mask) argument
492 bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask, struct bio_set *bs) argument
599 bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask) argument
[all...]
/linux-master/security/integrity/
H A Dintegrity.h206 integrity_audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type) argument
208 return audit_log_start(ctx, gfp_mask, type);
228 integrity_audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type) argument
/linux-master/include/trace/events/
H A Dcompaction.h169 gfp_t gfp_mask,
172 TP_ARGS(order, gfp_mask, prio),
176 __field(unsigned long, gfp_mask)
182 __entry->gfp_mask = (__force unsigned long)gfp_mask;
186 TP_printk("order=%d gfp_mask=%s priority=%d",
188 show_gfp_flags(__entry->gfp_mask),
/linux-master/net/ceph/
H A Dmsgpool.c12 static void *msgpool_alloc(gfp_t gfp_mask, void *arg) argument
18 gfp_mask, true);
/linux-master/include/linux/sunrpc/
H A Dgss_api.h53 gfp_t gfp_mask);
115 gfp_t gfp_mask);
/linux-master/lib/
H A Dsg_pool.c62 static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask) argument
67 return mempool_alloc(sgp->pool, gfp_mask);
H A Dradix-tree.c233 radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, argument
245 if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
254 gfp_mask | __GFP_NOWARN);
276 ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
322 static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) argument
332 gfp_mask &= ~__GFP_ACCOUNT;
338 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
365 int radix_tree_preload(gfp_t gfp_mask) argument
368 WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
369 return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZ
378 radix_tree_maybe_preload(gfp_t gfp_mask) argument
1469 idr_preload(gfp_t gfp_mask) argument
[all...]
H A Dscatterlist.c151 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) argument
163 void *ptr = (void *) __get_free_page(gfp_mask);
164 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
168 gfp_mask);
271 * @gfp_mask: GFP allocation mask
287 unsigned int nents_first_chunk, gfp_t gfp_mask,
321 sg = alloc_fn(alloc_size, gfp_mask);
367 * @gfp_mask: GFP allocation mask
374 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) argument
379 NULL, 0, gfp_mask, sg_kmallo
285 __sg_alloc_table(struct sg_table *table, unsigned int nents, unsigned int max_ents, struct scatterlist *first_chunk, unsigned int nents_first_chunk, gfp_t gfp_mask, sg_alloc_fn *alloc_fn) argument
386 get_next_sg(struct sg_append_table *table, struct scatterlist *cur, unsigned long needed_sges, gfp_t gfp_mask) argument
454 sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, unsigned int max_segment, unsigned int left_pages, gfp_t gfp_mask) argument
578 sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, unsigned int max_segment, gfp_t gfp_mask) argument
[all...]
H A Dtextsearch.c250 * @gfp_mask: allocation mask
264 unsigned int len, gfp_t gfp_mask, int flags)
289 conf = ops->init(pattern, len, gfp_mask, flags);
263 textsearch_prepare(const char *algo, const void *pattern, unsigned int len, gfp_t gfp_mask, int flags) argument
/linux-master/kernel/power/
H A Dsnapshot.c180 * @gfp_mask: GFP mask for the allocation.
191 static void *get_image_page(gfp_t gfp_mask, int safe_needed) argument
195 res = (void *)get_zeroed_page(gfp_mask);
201 res = (void *)get_zeroed_page(gfp_mask);
210 static void *__get_safe_page(gfp_t gfp_mask) argument
219 return get_image_page(gfp_mask, PG_SAFE);
222 unsigned long get_safe_page(gfp_t gfp_mask) argument
224 return (unsigned long)__get_safe_page(gfp_mask);
227 static struct page *alloc_image_page(gfp_t gfp_mask) argument
231 page = alloc_page(gfp_mask);
297 gfp_t gfp_mask; /* mask for allocating pages */ member in struct:chain_allocator
301 chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed) argument
442 alloc_rtree_node(gfp_t gfp_mask, int safe_needed, struct chain_allocator *ca, struct list_head *list) argument
468 add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask, int safe_needed, struct chain_allocator *ca) argument
538 create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed, struct chain_allocator *ca, unsigned long start, unsigned long end) argument
628 create_mem_extents(struct list_head *list, gfp_t gfp_mask) argument
684 memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) argument
[all...]
/linux-master/mm/
H A Dswap_state.c429 struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, argument
470 folio = (struct folio *)alloc_pages_mpol(gfp_mask, 0,
514 if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
518 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
552 struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, argument
562 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
635 * @gfp_mask: memory allocation flags
650 struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, argument
680 gfp_mask, mpol, ilx, &page_allocated, false);
697 folio = __read_swap_cache_async(entry, gfp_mask, mpo
815 swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf) argument
894 swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_fault *vmf) argument
[all...]
H A Dvmscan.c155 gfp_t gfp_mask; member in struct:scan_control
939 mtc->gfp_mask |= __GFP_THISNODE;
944 mtc->gfp_mask &= ~__GFP_THISNODE;
967 .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN |
992 static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask) argument
994 if (gfp_mask & __GFP_FS)
996 if (!folio_test_swapcache(folio) || !(gfp_mask & __GFP_IO))
1138 !may_enter_fs(folio, sc->gfp_mask)) {
1199 if (!(sc->gfp_mask & __GFP_IO))
1312 if (!may_enter_fs(folio, sc->gfp_mask))
6334 throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, nodemask_t *nodemask) argument
6414 try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask) argument
6460 mem_cgroup_shrink_node(struct mem_cgroup *memcg, gfp_t gfp_mask, bool noswap, pg_data_t *pgdat, unsigned long *nr_scanned) argument
6499 try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, unsigned int reclaim_options) argument
7390 __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) argument
7443 node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) argument
[all...]

Completed in 212 milliseconds

123456789