Lines Matching defs:folio

365  * mem_cgroup_css_from_folio - css of the memcg associated with a folio
366 * @folio: folio of interest
369 * with @folio is returned. The returned css remains associated with @folio
375 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
377 struct mem_cgroup *memcg = folio_memcg(folio);
900 void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
904 pg_data_t *pgdat = folio_pgdat(folio);
908 memcg = folio_memcg(folio);
1343 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1350 memcg = folio_memcg(folio);
1353 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1355 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1360 * folio_lruvec_lock - Lock the lruvec for a folio.
1361 * @folio: Pointer to the folio.
1364 * - folio locked
1367 * - folio frozen (refcount of 0)
1369 * Return: The lruvec this folio is on with its lock held.
1371 struct lruvec *folio_lruvec_lock(struct folio *folio)
1373 struct lruvec *lruvec = folio_lruvec(folio);
1376 lruvec_memcg_debug(lruvec, folio);
1382 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1383 * @folio: Pointer to the folio.
1386 * - folio locked
1389 * - folio frozen (refcount of 0)
1391 * Return: The lruvec this folio is on with its lock held and interrupts
1394 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1396 struct lruvec *lruvec = folio_lruvec(folio);
1399 lruvec_memcg_debug(lruvec, folio);
1405 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1406 * @folio: Pointer to the folio.
1410 * - folio locked
1413 * - folio frozen (refcount of 0)
1415 * Return: The lruvec this folio is on with its lock held and interrupts
1418 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1421 struct lruvec *lruvec = folio_lruvec(folio);
1424 lruvec_memcg_debug(lruvec, folio);
2176 * folio_memcg_lock - Bind a folio to its memcg.
2177 * @folio: The folio.
2183 * for the lifetime of the folio.
2185 void folio_memcg_lock(struct folio *folio)
2200 memcg = folio_memcg(folio);
2214 if (memcg != folio_memcg(folio)) {
2244 * folio_memcg_unlock - Release the binding between a folio and its memcg.
2245 * @folio: The folio.
2248 * not change the accounting of this folio to its memcg, but it does
2251 void folio_memcg_unlock(struct folio *folio)
2253 __folio_memcg_unlock(folio_memcg(folio));
2949 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2951 VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2961 folio->memcg_data = (unsigned long)memcg;
2966 * @folio: folio to commit the charge to.
2969 void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2972 commit_charge(folio, memcg);
2975 mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
2976 memcg_check_events(memcg, folio_nid(folio));
3043 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
3050 if (folio_test_slab(folio)) {
3055 slab = folio_slab(folio);
3069 * a folio where the slab flag has been cleared already, but
3074 return folio_memcg_check(folio);
3093 struct folio *folio;
3099 folio = page_folio(vmalloc_to_page(p));
3101 folio = virt_to_folio(p);
3103 return mem_cgroup_from_obj_folio(folio, p);
3232 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
3239 if (folio_memcg_kmem(folio)) {
3240 objcg = __folio_objcg(folio);
3246 memcg = __folio_memcg(folio);
3346 struct folio *folio = page_folio(page);
3350 if (!folio_memcg_kmem(folio))
3353 objcg = __folio_objcg(folio);
3355 folio->memcg_data = 0;
3612 struct folio *folio = page_folio(head);
3613 struct mem_cgroup *memcg = folio_memcg(folio);
3622 folio_page(folio, i)->memcg_data = folio->memcg_data;
3624 if (folio_memcg_kmem(folio))
3625 obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
4868 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4871 struct mem_cgroup *memcg = folio_memcg(folio);
4878 trace_track_foreign_dirty(folio, wb);
5879 struct folio *folio;
5954 struct folio *folio;
5961 /* folio is moved even if it's not RSS of this task(page-faulted). */
5964 folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
5965 if (IS_ERR(folio))
5967 return folio_file_page(folio, index);
5971 * mem_cgroup_move_account - move account of the folio
5972 * @folio: The folio.
5974 * @from: mem_cgroup which the folio is moved from.
5975 * @to: mem_cgroup which the folio is moved to. @from != @to.
5977 * The folio must be locked and not on the LRU.
5982 static int mem_cgroup_move_account(struct folio *folio,
5989 unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5993 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5994 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5995 VM_BUG_ON(compound && !folio_test_large(folio));
5998 if (folio_memcg(folio) != from)
6001 pgdat = folio_pgdat(folio);
6005 folio_memcg_lock(folio);
6007 if (folio_test_anon(folio)) {
6008 if (folio_mapped(folio)) {
6011 if (folio_test_pmd_mappable(folio)) {
6022 if (folio_test_swapbacked(folio)) {
6027 if (folio_mapped(folio)) {
6032 if (folio_test_dirty(folio)) {
6033 struct address_space *mapping = folio_mapping(folio);
6045 if (folio_test_swapcache(folio)) {
6050 if (folio_test_writeback(folio)) {
6073 folio->memcg_data = (unsigned long)to;
6078 nid = folio_nid(folio);
6101 * move charge. If @target is not NULL, the folio is stored in target->folio
6115 struct folio *folio;
6131 folio = page_folio(page);
6133 if (!folio_trylock(folio)) {
6134 folio_put(folio);
6149 folio_unlock(folio);
6150 folio_put(folio);
6163 if (folio_memcg(folio) == mc.from) {
6165 if (folio_is_device_private(folio) ||
6166 folio_is_device_coherent(folio))
6169 target->folio = folio;
6173 folio_unlock(folio);
6174 folio_put(folio);
6200 struct folio *folio;
6210 folio = page_folio(page);
6213 if (folio_memcg(folio) == mc.from) {
6216 folio_get(folio);
6217 if (!folio_trylock(folio)) {
6218 folio_put(folio);
6221 target->folio = folio;
6441 struct folio *folio;
6451 folio = target.folio;
6452 if (folio_isolate_lru(folio)) {
6453 if (!mem_cgroup_move_account(folio, true,
6458 folio_putback_lru(folio);
6460 folio_unlock(folio);
6461 folio_put(folio);
6463 folio = target.folio;
6464 if (!mem_cgroup_move_account(folio, true,
6469 folio_unlock(folio);
6470 folio_put(folio);
6493 folio = target.folio;
6500 if (folio_test_large(folio))
6502 if (!device && !folio_isolate_lru(folio))
6504 if (!mem_cgroup_move_account(folio, false,
6511 folio_putback_lru(folio);
6513 folio_unlock(folio);
6514 folio_put(folio);
7279 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
7284 ret = try_charge(memcg, gfp, folio_nr_pages(folio));
7288 mem_cgroup_commit_charge(folio, memcg);
7293 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
7299 ret = charge_memcg(folio, memcg, gfp);
7306 * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio
7311 * This function is called when allocating a huge page folio to determine if
7313 * as the hugetlb folio itself has not been obtained from the hugetlb pool.
7315 * Once we have obtained the hugetlb folio, we can call
7317 * folio, we should instead call mem_cgroup_cancel_charge() to undo the effect
7341 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
7342 * @folio: folio to charge.
7345 * @entry: swap entry for which the folio is allocated
7347 * This function charges a folio allocated for swapin. Please call this before
7348 * adding the folio to the swapcache.
7352 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
7369 ret = charge_memcg(folio, memcg, gfp);
7444 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
7450 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7454 * folio memcg or objcg at this point, we have fully
7455 * exclusive access to the folio.
7457 if (folio_memcg_kmem(folio)) {
7458 objcg = __folio_objcg(folio);
7465 memcg = __folio_memcg(folio);
7477 ug->nid = folio_nid(folio);
7483 nr_pages = folio_nr_pages(folio);
7485 if (folio_memcg_kmem(folio)) {
7489 folio->memcg_data = 0;
7497 folio->memcg_data = 0;
7503 void __mem_cgroup_uncharge(struct folio *folio)
7507 /* Don't touch folio->lru of any random page, pre-check: */
7508 if (!folio_memcg(folio))
7512 uncharge_folio(folio, &ug);
7529 * mem_cgroup_replace_folio - Charge a folio's replacement.
7530 * @old: Currently circulating folio.
7531 * @new: Replacement folio.
7533 * Charge @new as a replacement folio for @old. @old will
7539 void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
7553 /* Page cache replacement: new folio already charged? */
7579 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
7580 * @old: Currently circulating folio.
7581 * @new: Replacement folio.
7583 * Transfer the memcg data from the old folio to the new folio for migration.
7584 * The old folio's data info will be cleared. Note that the memory counters
7589 void mem_cgroup_migrate(struct folio *old, struct folio *new)
7603 * Note that it is normal to see !memcg for a hugetlb folio.
7614 * If the old folio is a large folio and is in the split queue, it needs
7616 * split queue in destroy_large_folio() after the memcg of the old folio
7619 * In addition, the old folio is about to be freed after migration, so
7792 * @folio: folio whose memsw charge to transfer
7795 * Transfer the memsw charge of @folio to @entry.
7797 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7803 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7804 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7812 memcg = folio_memcg(folio);
7814 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7824 nr_entries = folio_nr_pages(folio);
7830 VM_BUG_ON_FOLIO(oldid, folio);
7833 folio->memcg_data = 0;
7853 memcg_check_events(memcg, folio_nid(folio));
7859 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7860 * @folio: folio being added to swap
7863 * Try to charge @folio's memcg for the swap space at @entry.
7867 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
7869 unsigned int nr_pages = folio_nr_pages(folio);
7877 memcg = folio_memcg(folio);
7879 VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7902 VM_BUG_ON_FOLIO(oldid, folio);
7947 bool mem_cgroup_swap_full(struct folio *folio)
7951 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
7958 memcg = folio_memcg(folio);