/linux-master/fs/btrfs/ |
H A D | ulist.h | 49 struct ulist *ulist_alloc(gfp_t gfp_mask); 51 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask); 53 u64 *old_aux, gfp_t gfp_mask); 58 void **old_aux, gfp_t gfp_mask) 62 int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask); 66 return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask); 57 ulist_add_merge_ptr(struct ulist *ulist, u64 val, void *aux, void **old_aux, gfp_t gfp_mask) argument
|
H A D | ulist.c | 92 * @gfp_mask: allocation flags to for base allocation 96 struct ulist *ulist_alloc(gfp_t gfp_mask) argument 98 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask); 177 * @gfp_mask: flags to use for allocation 192 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) argument 194 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); 198 u64 *old_aux, gfp_t gfp_mask) 209 node = kmalloc(sizeof(*node), gfp_mask); 197 ulist_add_merge(struct ulist *ulist, u64 val, u64 aux, u64 *old_aux, gfp_t gfp_mask) argument
|
/linux-master/include/linux/ |
H A D | gfp.h | 185 * We get the zone list from the current node and the gfp_mask. 245 static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) argument 247 gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN); 255 pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node); 264 __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) argument 267 warn_if_node_offline(nid, gfp_mask); 269 return __alloc_pages_noprof(gfp_mask, order, nid, NULL); 290 static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask, argument 296 return __alloc_pages_node_noprof(nid, gfp_mask, order); 309 static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigne argument 372 page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask, unsigned int align) argument 380 page_frag_alloc(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask) argument 421 gfp_compaction_allowed(gfp_t gfp_mask) argument [all...] |
H A D | blk-crypto.h | 84 gfp_t gfp_mask); 115 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask); 120 * @gfp_mask: memory allocation flags 125 * @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM. 128 gfp_t gfp_mask) 131 return __bio_crypt_clone(dst, src, gfp_mask); 127 bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) argument
|
H A D | mempool.h | 15 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); 43 gfp_t gfp_mask, int node_id); 55 gfp_t gfp_mask, int nid); 66 extern void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) __malloc; 78 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); 90 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); 100 void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data); 117 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
|
H A D | page_owner.h | 13 unsigned short order, gfp_t gfp_mask); 29 unsigned short order, gfp_t gfp_mask) 32 __set_page_owner(page, order, gfp_mask); 61 unsigned short order, gfp_t gfp_mask) 28 set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask) argument 60 set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask) argument
|
H A D | cpuset.h | 85 extern bool cpuset_node_allowed(int node, gfp_t gfp_mask); 87 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) argument 89 return cpuset_node_allowed(zone_to_nid(z), gfp_mask); 92 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) argument 95 return __cpuset_zone_allowed(z, gfp_mask); 221 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) argument 226 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) argument
|
H A D | fault-inject.h | 94 bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); 97 bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); 99 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) argument
|
H A D | connector.h | 92 * @gfp_mask: GFP mask. 102 u32 group, gfp_t gfp_mask, 120 * @gfp_mask: GFP mask. 127 int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
|
/linux-master/mm/ |
H A D | fail_page_alloc.c | 24 bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) argument 30 if (gfp_mask & __GFP_NOFAIL) 32 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 35 (gfp_mask & __GFP_DIRECT_RECLAIM)) 39 if (gfp_mask & __GFP_NOWARN)
|
H A D | mempool.c | 197 gfp_t gfp_mask, int node_id) 207 gfp_mask, node_id); 217 element = pool->alloc(gfp_mask, pool->pool_data); 259 * @gfp_mask: memory allocation flags 272 gfp_t gfp_mask, int node_id) 276 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); 281 gfp_mask, node_id)) { 374 * @gfp_mask: the usual allocation bitmask. 384 void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) argument 391 VM_WARN_ON_ONCE(gfp_mask 195 mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id) argument 270 mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id) argument 555 mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) argument 574 mempool_kmalloc(gfp_t gfp_mask, void *pool_data) argument 587 mempool_kvmalloc(gfp_t gfp_mask, void *pool_data) argument 604 mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) argument [all...] |
H A D | page_alloc.c | 3011 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) argument 3013 return __should_fail_alloc_page(gfp_mask, order); 3135 unsigned int alloc_flags, gfp_t gfp_mask) 3214 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) argument 3222 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3245 /* Must be called after current_gfp_context() which can change gfp_mask */ 3246 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, argument 3250 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3261 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, argument 3284 !__cpuset_zone_allowed(zone, gfp_mask)) 3133 zone_watermark_fast(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags, gfp_t gfp_mask) argument 3440 warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) argument 3459 warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) argument 3485 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac) argument 3505 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac, unsigned long *did_some_progress) argument 3600 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) argument 3725 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) argument 3765 __need_reclaim(gfp_t gfp_mask) argument 3791 fs_reclaim_acquire(gfp_t gfp_mask) argument 3808 fs_reclaim_release(gfp_t gfp_mask) argument 3846 __perform_reclaim(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac) argument 3872 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, unsigned long *did_some_progress) argument 3905 wake_all_kswapds(unsigned int order, gfp_t gfp_mask, const struct alloc_context *ac) argument 3925 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) argument 3992 __gfp_pfmemalloc_flags(gfp_t gfp_mask) argument 4010 gfp_pfmemalloc_allowed(gfp_t gfp_mask) argument 4026 should_reclaim_retry(gfp_t gfp_mask, unsigned order, struct alloc_context *ac, int alloc_flags, bool did_some_progress, int *no_progress_loops) argument 4131 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct alloc_context *ac) argument 4409 prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask, struct alloc_context *ac, gfp_t *alloc_gfp, unsigned int *alloc_flags) argument 4703 get_free_pages_noprof(gfp_t gfp_mask, unsigned int order) argument 4714 get_zeroed_page_noprof(gfp_t gfp_mask) argument 4777 __page_frag_cache_refill(struct page_frag_cache *nc, gfp_t gfp_mask) argument 4817 __page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask, unsigned int align_mask) argument 4938 alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) argument 4963 alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) argument 6409 alloc_contig_range_noprof(unsigned long start, unsigned long end, unsigned migratetype, gfp_t gfp_mask) argument 6512 __alloc_contig_pages(unsigned long start_pfn, unsigned long nr_pages, gfp_t gfp_mask) argument 6573 alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, int nid, nodemask_t *nodemask) argument [all...] |
H A D | swap.h | 50 struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 85 gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) 90 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, argument 129 gfp_t gfp_mask, void **shadowp) 84 swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) argument 128 add_to_swap_cache(struct folio *folio, swp_entry_t entry, gfp_t gfp_mask, void **shadowp) argument
|
/linux-master/block/ |
H A D | blk-lib.c | 39 sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask) 47 bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask); 64 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) 69 gfp_mask))) 80 * @gfp_mask: memory allocation flags (for bio_alloc) 86 sector_t nr_sects, gfp_t gfp_mask) 93 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); 107 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, 125 bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); 154 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, 38 blk_alloc_discard_bio(struct block_device *bdev, sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask) argument 63 __blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) argument 85 blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask) argument 106 __blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags) argument 153 __blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) argument 203 __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags) argument 237 blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned flags) argument [all...] |
H A D | blk-map.c | 22 gfp_t gfp_mask) 29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); 132 struct iov_iter *iter, gfp_t gfp_mask) 142 bmd = bio_alloc_map_data(iter, gfp_mask); 157 bio = bio_kmalloc(nr_pages, gfp_mask); 185 page = alloc_page(GFP_NOIO | gfp_mask); 254 unsigned int nr_vecs, gfp_t gfp_mask) 259 bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask, 264 bio = bio_kmalloc(nr_vecs, gfp_mask); 273 gfp_t gfp_mask) 21 bio_alloc_map_data(struct iov_iter *data, gfp_t gfp_mask) argument 131 bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, struct iov_iter *iter, gfp_t gfp_mask) argument 253 blk_rq_map_bio_alloc(struct request *rq, unsigned int nr_vecs, gfp_t gfp_mask) argument 272 bio_map_user_iov(struct request *rq, struct iov_iter *iter, gfp_t gfp_mask) argument 389 bio_map_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask) argument 474 bio_copy_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask, int reading) argument 632 blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, const struct iov_iter *iter, gfp_t gfp_mask) argument 687 blk_rq_map_user(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, void __user *ubuf, unsigned long len, gfp_t gfp_mask) argument 701 blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data, void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask, bool vec, int iov_count, bool check_iter_count, int rw) argument 782 blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) argument [all...] |
H A D | blk-crypto.c | 92 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) 97 * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so 100 WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM)); 102 bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); 116 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) argument 118 dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); 304 gfp_t gfp_mask) 307 rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); 91 bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key, const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) argument 303 __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, gfp_t gfp_mask) argument
|
/linux-master/drivers/infiniband/core/ |
H A D | sa.h | 55 unsigned long timeout_ms, gfp_t gfp_mask,
|
/linux-master/fs/nfs/blocklayout/ |
H A D | dev.c | 231 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask); 236 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 242 dev = bl_resolve_deviceid(server, v, gfp_mask); 327 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 385 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 390 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); 401 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 408 sizeof(struct pnfs_block_dev), gfp_mask); 414 volumes, v->concat.volumes[i], gfp_mask); 430 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 235 bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) argument 326 bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) argument 384 bl_parse_slice(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) argument 400 bl_parse_concat(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) argument 429 bl_parse_stripe(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) argument 458 bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) argument 479 bl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, gfp_t gfp_mask) argument [all...] |
/linux-master/drivers/net/wireless/ath/ |
H A D | main.c | 31 gfp_t gfp_mask) 49 skb = __dev_alloc_skb(len + common->cachelsz - 1, gfp_mask); 29 ath_rxbuf_alloc(struct ath_common *common, u32 len, gfp_t gfp_mask) argument
|
/linux-master/include/linux/sched/ |
H A D | mm.h | 289 extern void fs_reclaim_acquire(gfp_t gfp_mask); 290 extern void fs_reclaim_release(gfp_t gfp_mask); 294 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } argument 295 static inline void fs_reclaim_release(gfp_t gfp_mask) { } argument 326 * @gfp_mask: gfp_t flags that would be used to allocate 332 static inline void might_alloc(gfp_t gfp_mask) argument 334 fs_reclaim_acquire(gfp_mask); 335 fs_reclaim_release(gfp_mask); 337 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
|
/linux-master/lib/ |
H A D | generic-radix-tree.c | 80 static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask) argument 82 return kzalloc(GENRADIX_NODE_SIZE, gfp_mask); 95 gfp_t gfp_mask) 112 new_node = genradix_alloc_node(gfp_mask); 135 new_node = genradix_alloc_node(gfp_mask); 277 gfp_t gfp_mask) 282 if (!__genradix_ptr_alloc(radix, offset, gfp_mask)) 94 __genradix_ptr_alloc(struct __genradix *radix, size_t offset, gfp_t gfp_mask) argument 276 __genradix_prealloc(struct __genradix *radix, size_t size, gfp_t gfp_mask) argument
|
/linux-master/drivers/net/ethernet/mellanox/mlx4/ |
H A D | icm.c | 99 gfp_t gfp_mask, int node) 103 page = alloc_pages_node(node, gfp_mask, order); 105 page = alloc_pages(gfp_mask, order); 115 int order, gfp_t gfp_mask) 118 &buf->dma_addr, gfp_mask); 133 gfp_t gfp_mask, int coherent) 142 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); 145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), 149 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); 162 gfp_mask 98 mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask, int node) argument 114 mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf, int order, gfp_t gfp_mask) argument 132 mlx4_alloc_icm(struct mlx4_dev *dev, int npages, gfp_t gfp_mask, int coherent) argument [all...] |
/linux-master/drivers/net/vmxnet3/ |
H A D | vmxnet3_xdp.h | 40 gfp_t gfp_mask);
|
/linux-master/fs/netfs/ |
H A D | misc.c | 17 gfp_t gfp_mask) 28 if (!xas_nomem(&xas, gfp_mask)) 48 pgoff_t index, pgoff_t to, gfp_t gfp_mask) 63 NETFS_FLAG_PUT_MARK, gfp_mask); 15 netfs_xa_store_and_mark(struct xarray *xa, unsigned long index, struct folio *folio, unsigned int flags, gfp_t gfp_mask) argument 46 netfs_add_folios_to_buffer(struct xarray *buffer, struct address_space *mapping, pgoff_t index, pgoff_t to, gfp_t gfp_mask) argument
|
/linux-master/net/sunrpc/auth_gss/ |
H A D | gss_krb5_keys.c | 152 const struct xdr_netobj *in_constant, gfp_t gfp_mask) 174 inblockdata = kmalloc(blocksize, gfp_mask); 178 outblockdata = kmalloc(blocksize, gfp_mask); 260 * @gfp_mask: memory allocation control flags 271 gfp_t gfp_mask) 277 inblock.data = kmalloc(inblock.len, gfp_mask); 281 ret = krb5_DK(gk5e, inkey, inblock.data, label, gfp_mask); 349 * @gfp_mask: memory allocation control flags 372 gfp_t gfp_mask) 401 step.data = kzalloc(step.len, gfp_mask); 150 krb5_DK(const struct gss_krb5_enctype *gk5e, const struct xdr_netobj *inkey, u8 *rawkey, const struct xdr_netobj *in_constant, gfp_t gfp_mask) argument 267 krb5_derive_key_v2(const struct gss_krb5_enctype *gk5e, const struct xdr_netobj *inkey, struct xdr_netobj *outkey, const struct xdr_netobj *label, gfp_t gfp_mask) argument 368 krb5_kdf_feedback_cmac(const struct gss_krb5_enctype *gk5e, const struct xdr_netobj *inkey, struct xdr_netobj *outkey, const struct xdr_netobj *constant, gfp_t gfp_mask) argument 500 krb5_kdf_hmac_sha2(const struct gss_krb5_enctype *gk5e, const struct xdr_netobj *inkey, struct xdr_netobj *outkey, const struct xdr_netobj *label, gfp_t gfp_mask) argument [all...] |