Lines Matching refs:gfp

459 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *pol,
1218 gfp_t gfp;
1228 gfp = htlb_alloc_mask(h);
1229 nodemask = policy_nodemask(gfp, pol, ilx, &nid);
1230 return alloc_hugetlb_folio_nodemask(h, nid, nodemask, gfp);
1234 gfp = GFP_TRANSHUGE;
1236 gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL | __GFP_COMP;
1238 page = alloc_pages_mpol(gfp, order, pol, ilx, nid);
1864 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
2044 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *pol,
2061 if (apply_policy_zone(pol, gfp_zone(gfp)) &&
2071 WARN_ON_ONCE(gfp & __GFP_THISNODE);
2189 static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2201 preferred_gfp = gfp | __GFP_NOWARN;
2205 page = __alloc_pages(gfp, order, nid, NULL);
2212 * @gfp: GFP flags.
2220 struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
2226 nodemask = policy_nodemask(gfp, pol, ilx, &nid);
2229 return alloc_pages_preferred_many(gfp, order, nid, nodemask);
2252 gfp | __GFP_THISNODE | __GFP_NORETRY, order);
2253 if (page || !(gfp & __GFP_DIRECT_RECLAIM))
2264 page = __alloc_pages(gfp, order, nid, nodemask);
2281 * @gfp: GFP flags.
2295 struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
2303 page = alloc_pages_mpol(gfp | __GFP_COMP, order,
2312 * @gfp: GFP flags.
2324 struct page *alloc_pages(gfp_t gfp, unsigned int order)
2332 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2335 return alloc_pages_mpol(gfp, order,
2340 struct folio *folio_alloc(gfp_t gfp, unsigned int order)
2342 return page_rmappable_folio(alloc_pages(gfp | __GFP_COMP, order));
2346 static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2363 nr_allocated = __alloc_pages_bulk(gfp,
2369 nr_allocated = __alloc_pages_bulk(gfp,
2381 static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
2419 nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
2482 nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
2496 static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2503 preferred_gfp = gfp | __GFP_NOWARN;
2510 nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2522 unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2529 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2533 return alloc_pages_bulk_array_interleave(gfp, pol,
2538 gfp, pol, nr_pages, page_array);
2541 return alloc_pages_bulk_array_preferred_many(gfp,
2545 nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid);
2546 return __alloc_pages_bulk(gfp, nid, nodemask,