Searched refs:gfp_mask (Results 1 - 25 of 225) sorted by last modified time

123456789

/linux-master/net/core/
H A Dskbuff.c274 static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask) argument
625 * @gfp_mask: allocation mask
636 * Buffers may only be allocated from interrupts using a @gfp_mask of
639 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, argument
651 gfp_mask |= __GFP_MEMALLOC;
658 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node);
668 data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
707 * @gfp_mask: get_free_pages mask, passed to alloc_skb
717 gfp_t gfp_mask)
731 (gfp_mask
716 __netdev_alloc_skb(struct net_device *dev, unsigned int len, gfp_t gfp_mask) argument
790 __napi_alloc_skb(struct napi_struct *napi, unsigned int len, gfp_t gfp_mask) argument
1912 skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, gfp_t gfp_mask) argument
1947 skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) argument
2044 skb_clone(struct sk_buff *skb, gfp_t gfp_mask) argument
2124 skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) argument
2169 __pskb_copy_fclone(struct sk_buff *skb, int headroom, gfp_t gfp_mask, bool fclone) argument
2233 pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask) argument
2457 skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t gfp_mask) argument
6499 alloc_skb_with_frags(unsigned long header_len, unsigned long data_len, int order, int *errcode, gfp_t gfp_mask) argument
6555 pskb_carve_inside_header(struct sk_buff *skb, const u32 off, const int headlen, gfp_t gfp_mask) argument
6616 pskb_carve_frag_list(struct sk_buff *skb, struct skb_shared_info *shinfo, int eat, gfp_t gfp_mask) argument
6670 pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, int pos, gfp_t gfp_mask) argument
[all...]
/linux-master/lib/
H A Dscatterlist.c151 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) argument
163 void *ptr = (void *) __get_free_page(gfp_mask);
164 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
168 gfp_mask);
271 * @gfp_mask: GFP allocation mask
287 unsigned int nents_first_chunk, gfp_t gfp_mask,
321 sg = alloc_fn(alloc_size, gfp_mask);
367 * @gfp_mask: GFP allocation mask
374 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) argument
379 NULL, 0, gfp_mask, sg_kmallo
285 __sg_alloc_table(struct sg_table *table, unsigned int nents, unsigned int max_ents, struct scatterlist *first_chunk, unsigned int nents_first_chunk, gfp_t gfp_mask, sg_alloc_fn *alloc_fn) argument
386 get_next_sg(struct sg_append_table *table, struct scatterlist *cur, unsigned long needed_sges, gfp_t gfp_mask) argument
454 sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, unsigned int max_segment, unsigned int left_pages, gfp_t gfp_mask) argument
578 sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, unsigned int max_segment, gfp_t gfp_mask) argument
[all...]
/linux-master/drivers/gpu/drm/ttm/
H A Dttm_tt.c218 gfp_t gfp_mask; local
225 gfp_mask = mapping_gfp_mask(swap_space);
229 gfp_mask);
H A Dttm_pool.c767 struct shrink_control sc = { .gfp_mask = GFP_NOFS };
/linux-master/mm/
H A Dzswap.c1331 if (!gfp_has_io_fs(sc->gfp_mask))
H A Dhugetlb.c1349 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask, argument
1358 zonelist = node_zonelist(nid, gfp_mask);
1362 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1365 if (!cpuset_zone_allowed(zone, gfp_mask))
1397 gfp_t gfp_mask; local
1413 gfp_mask = htlb_alloc_mask(h);
1414 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1417 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1425 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1565 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, argument
1603 alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nodemask) argument
1611 alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nodemask) argument
2182 alloc_buddy_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) argument
2245 __alloc_fresh_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) argument
2279 only_alloc_fresh_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) argument
2299 alloc_fresh_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) argument
2341 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; local
2512 alloc_surplus_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) argument
2553 alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) argument
2585 gfp_t gfp_mask = htlb_alloc_mask(h); local
2607 alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask, gfp_t gfp_mask) argument
3016 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; local
3479 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; local
4907 gfp_t gfp_mask = htlb_alloc_mask(h); local
6641 gfp_t gfp_mask; local
[all...]
H A Dpage_owner.c27 gfp_t gfp_mask; member in struct:page_owner
166 gfp_t gfp_mask)
171 /* Filter gfp_mask the same way stackdepot does, for consistency */
172 gfp_mask &= ~GFP_ZONEMASK;
173 gfp_mask &= (GFP_ATOMIC | GFP_KERNEL);
174 gfp_mask |= __GFP_NOWARN;
177 stack = kmalloc(sizeof(*stack), gfp_mask);
199 static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask, argument
219 add_stack_record_to_list(stack_record, gfp_mask);
240 gfp_t gfp_mask,
165 add_stack_record_to_list(struct stack_record *stack_record, gfp_t gfp_mask) argument
237 __update_page_owner_handle(struct page_ext *page_ext, depot_stack_handle_t handle, unsigned short order, gfp_t gfp_mask, short last_migrate_reason, u64 ts_nsec, pid_t pid, pid_t tgid, char *comm) argument
318 __set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask) argument
603 gfp_t gfp_mask; local
[all...]
H A Dmemory-failure.c2680 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
H A Dinternal.h597 const gfp_t gfp_mask; /* gfp mask of a direct compactor */ member in struct:compact_control
1053 gfp_t gfp_mask; member in struct:migration_target_control
1328 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
H A Dgup.c2146 .gfp_mask = GFP_USER | __GFP_NOWARN,
/linux-master/include/net/
H A Dsock.h999 static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask) argument
1001 return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC);
/linux-master/include/linux/
H A Dmm.h515 * MM layer fills up gfp_mask for page allocations but fault handler might
523 gfp_t gfp_mask; /* gfp mask to be used for allocations */ member in struct:vm_fault::__anon82
3232 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
H A Dblkdev.h1062 sector_t nr_sects, gfp_t gfp_mask);
1064 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
1072 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
1075 sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
1078 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1085 gfp_mask);
1088 sector_t nr_blocks, gfp_t gfp_mask)
1095 gfp_mask, 0);
1077 sb_issue_discard(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) argument
1087 sb_issue_zeroout(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask) argument
H A Dshmem_fs.h109 pgoff_t index, gfp_t gfp_mask);
/linux-master/fs/ntfs3/
H A Dfile.c812 gfp_t gfp_mask = mapping_gfp_mask(mapping); local
820 page = find_or_create_page(mapping, index, gfp_mask);
/linux-master/drivers/net/ethernet/renesas/
H A Dravb_main.c118 gfp_t gfp_mask)
124 gfp_mask);
117 ravb_alloc_skb(struct net_device *ndev, const struct ravb_hw_info *info, gfp_t gfp_mask) argument
/linux-master/drivers/md/
H A Ddm.c573 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask) argument
579 clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs);
615 unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask)
627 clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
614 alloc_tio(struct clone_info *ci, struct dm_target *ti, unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask) argument
/linux-master/fs/btrfs/
H A Dbackref.c392 struct share_check *sc, gfp_t gfp_mask)
399 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
422 struct share_check *sc, gfp_t gfp_mask)
425 parent, wanted_disk_byte, count, sc, gfp_mask);
433 struct share_check *sc, gfp_t gfp_mask)
440 wanted_disk_byte, count, sc, gfp_mask);
388 add_prelim_ref(const struct btrfs_fs_info *fs_info, struct preftree *preftree, u64 root_id, const struct btrfs_key *key, int level, u64 parent, u64 wanted_disk_byte, int count, struct share_check *sc, gfp_t gfp_mask) argument
419 add_direct_ref(const struct btrfs_fs_info *fs_info, struct preftrees *preftrees, int level, u64 parent, u64 wanted_disk_byte, int count, struct share_check *sc, gfp_t gfp_mask) argument
429 add_indirect_ref(const struct btrfs_fs_info *fs_info, struct preftrees *preftrees, u64 root_id, const struct btrfs_key *key, int level, u64 wanted_disk_byte, int count, struct share_check *sc, gfp_t gfp_mask) argument
/linux-master/kernel/dma/
H A Dswiotlb.c430 int swiotlb_init_late(size_t size, gfp_t gfp_mask, argument
452 if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA))
454 else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32))
468 vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
/linux-master/fs/bcachefs/
H A Dsysfs.c511 sc.gfp_mask = GFP_KERNEL;
/linux-master/block/
H A Dblk-cgroup.c301 * @gfp_mask: allocation mask to use
306 gfp_t gfp_mask)
312 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, disk->queue->node);
315 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
317 blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask);
346 pd = pol->pd_alloc_fn(disk, blkcg, gfp_mask);
305 blkg_alloc(struct blkcg *blkcg, struct gendisk *disk, gfp_t gfp_mask) argument
/linux-master/drivers/scsi/
H A Dsg.c1877 gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN | __GFP_ZERO; local
1913 schp->pages[k] = alloc_pages(gfp_mask, order);
/linux-master/drivers/net/
H A Dvirtio_net.c448 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) argument
457 p = alloc_page(gfp_mask);
/linux-master/drivers/isdn/mISDN/
H A Dsocket.c35 _l2_alloc_skb(unsigned int len, gfp_t gfp_mask) argument
39 skb = alloc_skb(len + L2_HEADER_LEN, gfp_mask);
/linux-master/drivers/crypto/ccp/
H A Dsev-dev.c427 static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order) argument
436 page = alloc_pages(gfp_mask, order);
452 void *snp_alloc_firmware_page(gfp_t gfp_mask) argument
456 page = __snp_alloc_firmware_pages(gfp_mask, 0);

Completed in 350 milliseconds

123456789