/linux-master/kernel/bpf/ |
H A D | cpumap.c | 279 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; local 334 gfp, nframes, skbs); 395 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; local 402 rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa); 408 sizeof(void *), gfp); 418 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp, 423 err = ptr_ring_init(rcpu->queue, value->qsize, gfp);
|
/linux-master/include/linux/ |
H A D | memcontrol.h | 670 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); 676 * @gfp: Reclaim mode. 679 * pages according to @gfp if necessary. If @mm is NULL, try to 687 gfp_t gfp) 691 return __mem_cgroup_charge(folio, mm, gfp); 694 int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, 698 gfp_t gfp, swp_entry_t entry); 1271 struct mm_struct *mm, gfp_t gfp) 1277 gfp_t gfp, long nr_pages) 1283 struct mm_struct *mm, gfp_t gfp, swp_entry_ 686 mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp) argument 1270 mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp) argument 1276 mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, long nr_pages) argument 1282 mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) argument 1842 memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) argument 1888 memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) argument 1898 __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) argument [all...] |
/linux-master/mm/ |
H A D | shmem.c | 150 struct folio **foliop, enum sgp_type sgp, gfp_t gfp, 763 pgoff_t index, void *expected, gfp_t gfp) 777 gfp &= GFP_RECLAIM_MASK; 778 folio_throttle_swaprate(folio, gfp); 800 } while (xas_nomem(&xas, gfp)); 1568 static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp, argument 1576 folio = swap_cluster_readahead(swap, gfp, mpol, ilx); 1597 * Minimize the result gfp by taking the union with the deny flags, 1606 static struct folio *shmem_alloc_hugefolio(gfp_t gfp, argument 1614 page = alloc_pages_mpol(gfp, HPAGE_PMD_ORDE 761 shmem_add_to_page_cache(struct folio *folio, struct address_space *mapping, pgoff_t index, void *expected, gfp_t gfp) argument 1620 shmem_alloc_folio(gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) argument 1634 shmem_alloc_and_add_folio(gfp_t gfp, struct inode *inode, pgoff_t index, struct mm_struct *fault_mm, bool huge) argument 1743 shmem_should_replace_folio(struct folio *folio, gfp_t gfp) argument 1748 shmem_replace_folio(struct folio **foliop, gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) argument 1848 shmem_swapin_folio(struct inode *inode, pgoff_t index, struct folio **foliop, enum sgp_type sgp, gfp_t gfp, struct mm_struct *fault_mm, vm_fault_t *fault_type) argument 1959 shmem_get_folio_gfp(struct inode *inode, pgoff_t index, struct folio **foliop, enum sgp_type sgp, gfp_t gfp, struct vm_fault *vmf, vm_fault_t *fault_type) argument 2239 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); local 2615 gfp_t gfp = mapping_gfp_mask(mapping); local 4952 shmem_read_folio_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp) argument 4976 shmem_read_mapping_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp) argument [all...] |
H A D | readahead.c | 119 #include <linux/gfp.h> 465 pgoff_t mark, unsigned int order, gfp_t gfp) 468 struct folio *folio = filemap_alloc_folio(gfp, order); 475 err = filemap_add_folio(ractl->mapping, folio, index, gfp); 494 gfp_t gfp = readahead_gfp_mask(mapping); local 517 err = ra_alloc_folio(ractl, index, mark, order, gfp); 464 ra_alloc_folio(struct readahead_control *ractl, pgoff_t index, pgoff_t mark, unsigned int order, gfp_t gfp) argument
|
/linux-master/drivers/greybus/ |
H A D | operation.c | 215 static int gb_message_send(struct gb_message *message, gfp_t gfp) argument 223 gfp); 469 size_t response_size, gfp_t gfp) 477 response = gb_operation_message_alloc(hd, type, response_size, gfp); 578 gfp_t gfp) 592 flags, gfp); 604 gfp_t gfp) 612 flags, gfp); 708 * @gfp: the memory flags to use for any allocations 723 gfp_t gfp) 468 gb_operation_response_alloc(struct gb_operation *operation, size_t response_size, gfp_t gfp) argument 575 gb_operation_create_flags(struct gb_connection *connection, u8 type, size_t request_size, size_t response_size, unsigned long flags, gfp_t gfp) argument 601 gb_operation_create_core(struct gb_connection *connection, u8 type, size_t request_size, size_t response_size, unsigned long flags, gfp_t gfp) argument 720 gb_operation_request_send(struct gb_operation *operation, gb_operation_callback callback, unsigned int timeout, gfp_t gfp) argument [all...] |
/linux-master/net/core/ |
H A D | page_pool.c | 454 gfp_t gfp) 458 gfp |= __GFP_COMP; 459 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); 481 gfp_t gfp) 491 return __page_pool_alloc_page_order(pool, gfp); 500 nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk, 539 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) argument 549 page = __page_pool_alloc_pages_slow(pool, gfp); 827 unsigned int size, gfp_t gfp) 847 page = page_pool_alloc_pages(pool, gfp); 453 __page_pool_alloc_page_order(struct page_pool *pool, gfp_t gfp) argument 480 __page_pool_alloc_pages_slow(struct page_pool *pool, gfp_t gfp) argument 825 page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, unsigned int size, gfp_t gfp) argument [all...] |
/linux-master/net/sctp/ |
H A D | stream_interleave.c | 27 int len, __u8 flags, gfp_t gfp) 38 retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp); 820 struct sctp_chunk *chunk, gfp_t gfp) 826 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); 926 static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp) argument 955 gfp_t gfp) 974 if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0) 975 sctp_intl_start_pd(ulpq, gfp); 979 __u32 mid, __u16 flags, gfp_t gfp) 989 sid, mid, flags, gfp); 24 sctp_make_idatafrag_empty( const struct sctp_association *asoc, const struct sctp_sndrcvinfo *sinfo, int len, __u8 flags, gfp_t gfp) argument 819 sctp_ulpevent_idata(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, gfp_t gfp) argument 954 sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, gfp_t gfp) argument 978 sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid, __u16 flags, gfp_t gfp) argument 1053 sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) argument [all...] |
H A D | sm_make_chunk.c | 51 gfp_t gfp); 53 __u8 flags, int paylen, gfp_t gfp); 56 gfp_t gfp); 66 gfp_t gfp); 208 gfp_t gfp, int vparam_len) 233 addrs = sctp_bind_addrs_to_raw(bp, &addrs_len, gfp); 319 retval = sctp_make_control(asoc, SCTP_CID_INIT, 0, chunksize, gfp); 381 gfp_t gfp, int unkparam_len) 400 addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len, gfp); 472 retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize, gfp); 206 sctp_make_init(const struct sctp_association *asoc, const struct sctp_bind_addr *bp, gfp_t gfp, int vparam_len) argument 379 sctp_make_init_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk, gfp_t gfp, int unkparam_len) argument 722 sctp_make_datafrag_empty(const struct sctp_association *asoc, const struct sctp_sndrcvinfo *sinfo, int len, __u8 flags, gfp_t gfp) argument 1375 sctp_chunkify(struct sk_buff *skb, const struct sctp_association *asoc, struct sock *sk, gfp_t gfp) argument 1428 _sctp_make_chunk(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen, gfp_t gfp) argument 1472 sctp_make_data(const struct sctp_association *asoc, __u8 flags, int paylen, gfp_t gfp) argument 1478 sctp_make_idata(const struct sctp_association *asoc, __u8 flags, int paylen, gfp_t gfp) argument 1484 sctp_make_control(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen, gfp_t gfp) argument 1628 sctp_make_temp_asoc(const struct sctp_endpoint *ep, struct sctp_chunk *chunk, gfp_t gfp) argument 1740 sctp_unpack_cookie( const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, gfp_t gfp, int *error, struct sctp_chunk **errp) argument 2355 sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, const union sctp_addr *peer_addr, struct sctp_init_chunk *peer_init, gfp_t gfp) argument 2540 sctp_process_param(struct sctp_association *asoc, union sctp_params param, const union sctp_addr *peer_addr, gfp_t gfp) argument [all...] |
/linux-master/drivers/gpu/drm/i915/ |
H A D | i915_gpu_error.c | 196 static int pool_refill(struct folio_batch *fbatch, gfp_t gfp) argument 201 folio = folio_alloc(gfp, 0); 211 static int pool_init(struct folio_batch *fbatch, gfp_t gfp) argument 217 err = pool_refill(fbatch, gfp); 224 static void *pool_alloc(struct folio_batch *fbatch, gfp_t gfp) argument 228 folio = folio_alloc(gfp, 0); 1444 gfp_t gfp, const char *name) 1451 c = kmalloc(sizeof(*c), gfp); 1471 gfp_t gfp) 1484 next = capture_vma_snapshot(next, vma->resource, gfp, nam 1442 capture_vma_snapshot(struct intel_engine_capture_vma *next, struct i915_vma_resource *vma_res, gfp_t gfp, const char *name) argument 1468 capture_vma(struct intel_engine_capture_vma *next, struct i915_vma *vma, const char *name, gfp_t gfp) argument 1490 capture_user(struct intel_engine_capture_vma *capture, const struct i915_request *rq, gfp_t gfp) argument 1543 intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags) argument 1562 engine_coredump_add_context(struct intel_engine_coredump *ee, struct intel_context *ce, gfp_t gfp) argument 1584 intel_engine_coredump_add_request(struct intel_engine_coredump *ee, struct i915_request *rq, gfp_t gfp) argument 2009 i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp) argument 2036 intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags) argument [all...] |
H A D | i915_request.c | 503 gfp_t gfp) 510 cb = kmem_cache_alloc(slab_execute_cbs, gfp); 836 gfp_t gfp) 841 if (!gfpflags_allow_blocking(gfp)) { 857 gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 869 return kmem_cache_alloc(slab_requests, gfp); 894 __i915_request_create(struct intel_context *ce, gfp_t gfp) argument 901 might_alloc(gfp); 936 gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 938 rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp); 501 __await_execution(struct i915_request *rq, struct i915_request *signal, gfp_t gfp) argument 834 request_alloc_slow(struct intel_timeline *tl, struct i915_request **rsvd, gfp_t gfp) argument 1200 emit_semaphore_wait(struct i915_request *to, struct i915_request *from, gfp_t gfp) argument [all...] |
/linux-master/drivers/crypto/marvell/octeontx/ |
H A D | otx_cptvf_reqmgr.c | 171 struct otx_cpt_req_info *req, gfp_t gfp) 196 info = kzalloc(total_mem_len, gfp); 310 gfp_t gfp; local 312 gfp = (req->areq->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : 314 ret = setup_sgio_list(pdev, &info, req, gfp); 345 if (gfp == GFP_KERNEL && 169 setup_sgio_list(struct pci_dev *pdev, struct otx_cpt_info_buffer **pinfo, struct otx_cpt_req_info *req, gfp_t gfp) argument
|
/linux-master/drivers/net/ethernet/broadcom/bnxt/ |
H A D | bnxt_hwrm.c | 85 ctx->gfp = GFP_KERNEL; 148 * @gfp: A bitmask of GFP flags. These flags are passed to dma_alloc_coherent() 158 void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t gfp) argument 163 ctx->gfp = gfp; 211 ctx->gfp = GFP_KERNEL; 809 addr = dma_alloc_coherent(&bp->pdev->dev, size, dma_handle, ctx->gfp);
|
/linux-master/lib/ |
H A D | test_objpool.c | 289 gfp_t gfp = GFP_KERNEL; local 296 gfp = GFP_ATOMIC; 299 gfp, sop, ot_init_node, NULL)) { 459 gfp_t gfp = GFP_KERNEL; local 466 gfp = GFP_ATOMIC; 468 if (objpool_init(&sop->pool, max, test->objsz, gfp, sop,
|
/linux-master/drivers/crypto/ccp/ |
H A D | ccp-crypto-main.c | 271 gfp_t gfp; local 273 gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; 275 crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
|
/linux-master/drivers/crypto/ |
H A D | atmel-ecc.c | 170 gfp_t gfp; local 182 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : 185 work_data = kmalloc(sizeof(*work_data), gfp);
|
/linux-master/arch/sh/boards/mach-hp6xx/ |
H A D | pm.c | 12 #include <linux/gfp.h>
|
/linux-master/net/ceph/ |
H A D | string_table.c | 3 #include <linux/gfp.h>
|
/linux-master/net/rose/ |
H A D | rose_out.c | 15 #include <linux/gfp.h>
|
/linux-master/arch/x86/power/ |
H A D | hibernate_64.c | 10 #include <linux/gfp.h>
|
/linux-master/net/ipv6/netfilter/ |
H A D | ip6t_REJECT.c | 17 #include <linux/gfp.h>
|
/linux-master/sound/usb/caiaq/ |
H A D | midi.c | 8 #include <linux/gfp.h>
|
/linux-master/drivers/acpi/ |
H A D | event.c | 17 #include <linux/gfp.h>
|
/linux-master/drivers/net/wan/ |
H A D | hdlc_raw_eth.c | 11 #include <linux/gfp.h>
|
/linux-master/arch/mips/jazz/ |
H A D | jazzdma.c | 18 #include <linux/gfp.h> 492 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 498 gfp |= __GFP_NOWARN; 501 page = alloc_pages(gfp, get_order(size)); 491 jazz_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) argument
|
/linux-master/drivers/infiniband/core/ |
H A D | iwpm_util.h | 100 * @gfp: Indicates how the memory for the request should be allocated 106 u8 nl_client, gfp_t gfp);
|