Searched refs:gfp_mask (Results 26 - 50 of 223) sorted by path

123456789

/linux-master/drivers/gpu/drm/ttm/
H A Dttm_pool.c767 struct shrink_control sc = { .gfp_mask = GFP_NOFS };
H A Dttm_tt.c219 gfp_t gfp_mask; local
226 gfp_mask = mapping_gfp_mask(swap_space);
230 gfp_mask);
/linux-master/drivers/greybus/
H A Des2.c308 static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask) argument
335 return usb_alloc_urb(0, gfp_mask);
391 struct gb_message *message, gfp_t gfp_mask)
411 urb = next_free_urb(es2, gfp_mask);
433 retval = usb_submit_urb(urb, gfp_mask);
390 message_send(struct gb_host_device *hd, u16 cport_id, struct gb_message *message, gfp_t gfp_mask) argument
/linux-master/drivers/infiniband/core/
H A Dmad.c789 size_t mad_size, gfp_t gfp_mask)
803 seg = kmalloc(sizeof(*seg) + seg_size, gfp_mask);
836 int data_len, gfp_t gfp_mask,
867 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
903 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
788 alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, size_t mad_size, gfp_t gfp_mask) argument
833 ib_create_send_mad(struct ib_mad_agent *mad_agent, u32 remote_qpn, u16 pkey_index, int rmpp_active, int hdr_len, int data_len, gfp_t gfp_mask, u8 base_version) argument
H A Dmulticast.c558 union ib_gid *mgid, gfp_t gfp_mask)
573 group = kzalloc(sizeof *group, gfp_mask);
610 ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
624 member = kmalloc(sizeof *member, gfp_mask);
639 &rec->mgid, gfp_mask);
557 acquire_group(struct mcast_port *port, union ib_gid *mgid, gfp_t gfp_mask) argument
607 ib_sa_join_multicast(struct ib_sa_client *client, struct ib_device *device, u32 port_num, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, gfp_t gfp_mask, int (*callback)(int status, struct ib_sa_multicast *multicast), void *context) argument
H A Dsa.h55 unsigned long timeout_ms, gfp_t gfp_mask,
H A Dsa_query.c783 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) argument
803 skb = nlmsg_new(len, gfp_mask);
821 gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
1267 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) argument
1293 gfp_mask,
1335 gfp_t gfp_mask)
1342 ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask);
1360 if (!ib_nl_make_request(query, gfp_mask))
1499 * @gfp_mask:GFP mask to use for internal allocations
1520 unsigned long timeout_ms, gfp_t gfp_mask,
1334 send_mad(struct ib_sa_query *query, unsigned long timeout_ms, gfp_t gfp_mask) argument
1516 ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device, u32 port_num, struct sa_path_rec *rec, ib_sa_comp_mask comp_mask, unsigned long timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct sa_path_rec *resp, unsigned int num_paths, void *context), void *context, struct ib_sa_query **sa_query) argument
1642 ib_sa_mcmember_rec_query(struct ib_sa_client *client, struct ib_device *device, u32 port_num, u8 method, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, unsigned long timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_mcmember_rec *resp, void *context), void *context, struct ib_sa_query **sa_query) argument
1733 ib_sa_guid_info_rec_query(struct ib_sa_client *client, struct ib_device *device, u32 port_num, struct ib_sa_guidinfo_rec *rec, ib_sa_comp_mask comp_mask, u8 method, unsigned long timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_guidinfo_rec *resp, void *context), void *context, struct ib_sa_query **sa_query) argument
1887 gfp_t gfp_mask = GFP_KERNEL; local
[all...]
H A Duverbs_main.c797 alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
/linux-master/drivers/infiniband/hw/hns/
H A Dhns_roce_hem.c253 gfp_t gfp_mask)
259 WARN_ON(gfp_mask & __GFP_HIGHMEM);
269 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
274 &hem->dma, gfp_mask);
251 hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, unsigned long hem_alloc_size, gfp_t gfp_mask) argument
/linux-master/drivers/infiniband/hw/mlx5/
H A Dumr.c432 static void *mlx5r_umr_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask) argument
446 gfp_mask |= __GFP_ZERO | __GFP_NORETRY;
456 res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
464 res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
471 res = (void *)__get_free_page(gfp_mask);
/linux-master/drivers/infiniband/hw/mthca/
H A Dmthca_cmd.c610 gfp_t gfp_mask)
614 mailbox = kmalloc(sizeof *mailbox, gfp_mask);
618 mailbox->buf = dma_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
609 mthca_alloc_mailbox(struct mthca_dev *dev, gfp_t gfp_mask) argument
H A Dmthca_memfree.c107 static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) argument
115 page = alloc_pages(gfp_mask | __GFP_ZERO, order);
124 int order, gfp_t gfp_mask)
127 gfp_mask);
138 gfp_t gfp_mask, int coherent)
146 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
148 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
160 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
176 cur_order, gfp_mask);
179 cur_order, gfp_mask);
123 mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, int order, gfp_t gfp_mask) argument
137 mthca_alloc_icm(struct mthca_dev *dev, int npages, gfp_t gfp_mask, int coherent) argument
[all...]
H A Dmthca_memfree.h83 gfp_t gfp_mask, int coherent);
/linux-master/drivers/infiniband/ulp/rtrs/
H A Drtrs.c21 struct rtrs_iu *rtrs_iu_alloc(u32 iu_num, size_t size, gfp_t gfp_mask, argument
29 ius = kcalloc(iu_num, sizeof(*ius), gfp_mask);
35 iu->buf = kzalloc(size, gfp_mask);
/linux-master/drivers/infiniband/ulp/srp/
H A Dib_srp.c222 gfp_t gfp_mask,
227 iu = kmalloc(sizeof *iu, gfp_mask);
231 iu->buf = kzalloc(size, gfp_mask);
221 srp_alloc_iu(struct srp_host *host, size_t size, gfp_t gfp_mask, enum dma_data_direction direction) argument
/linux-master/drivers/isdn/mISDN/
H A Dsocket.c35 _l2_alloc_skb(unsigned int len, gfp_t gfp_mask) argument
39 skb = alloc_skb(len + L2_HEADER_LEN, gfp_mask);
/linux-master/drivers/md/bcache/
H A Dbtree.c683 if (sc->gfp_mask & __GFP_IO)
H A Dsysfs.c861 sc.gfp_mask = GFP_KERNEL;
H A Dutil.c261 * @gfp_mask: flags for allocation
268 int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask) argument
278 bv->bv_page = alloc_page(gfp_mask);
H A Dutil.h559 int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask);
/linux-master/drivers/md/
H A Ddm-bufio.c1179 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, argument
1184 return kmem_cache_alloc(c->slab_cache, gfp_mask);
1188 gfp_mask & __GFP_NORETRY) {
1190 return (void *)__get_free_pages(gfp_mask,
1196 return __vmalloc(c->block_size, gfp_mask);
1229 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) argument
1231 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
1238 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
H A Ddm-crypt.c1682 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM; local
1687 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
1708 pages = alloc_pages(gfp_mask
1719 pages = mempool_alloc(&cc->page_pool, gfp_mask);
1723 gfp_mask |= __GFP_DIRECT_RECLAIM;
1741 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
2731 static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data) argument
2742 likely(gfp_mask & __GFP_NORETRY))
2745 page = alloc_page(gfp_mask);
H A Ddm-rq.c321 struct dm_rq_target_io *tio, gfp_t gfp_mask)
325 r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask,
320 setup_clone(struct request *clone, struct request *rq, struct dm_rq_target_io *tio, gfp_t gfp_mask) argument
H A Ddm-verity-fec.c572 static void *fec_rs_alloc(gfp_t gfp_mask, void *pool_data) argument
576 return init_rs_gfp(8, 0x11d, 0, 1, v->fec->roots, gfp_mask);
/linux-master/drivers/md/dm-vdo/
H A Dflush.c101 static void *allocate_flush(gfp_t gfp_mask, void *pool_data) argument
105 if ((gfp_mask & GFP_NOWAIT) == GFP_NOWAIT) {

Completed in 382 milliseconds

123456789