Lines Matching defs:memcg

218  * completely broken with the legacy memcg and direct stalling in
279 * single memcg. For example, a memcg-aware shrinker can free one object
280 * charged to the target memcg, causing an entire page to be freed.
281 * If we count the entire page as reclaimed from the memcg, we end up
285 * from the target memcg; preventing unnecessary retries during memcg
289 * charged to the target memcg, we end up underestimating the reclaimed
296 * memcg reclaim, to make reporting more accurate and reduce
317 static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg,
321 if (memcg == NULL) {
323 * For non-memcg reclaim, is there
329 /* Is the memcg below its swap limit? */
330 if (mem_cgroup_get_nr_swap_pages(memcg) > 0)
389 struct mem_cgroup *memcg = NULL;
391 memcg = mem_cgroup_iter(NULL, NULL, NULL);
393 freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
394 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
1100 * 2) Global or new memcg reclaim encounters a folio that is
1113 * 3) Legacy memcg encounters a folio that already has the
1114 * reclaim flag set. memcg does not have any dirty folio
1149 * memcg reclaim reaches the tests above,
1864 * inhibits memcg migration).
2247 * Flush the memory cgroup stats, so that we read accurate per-memcg
2354 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2356 int swappiness = mem_cgroup_swappiness(memcg);
2364 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) {
2371 * swappiness, but memcg users want to use this knob to
2446 mem_cgroup_protection(sc->target_mem_cgroup, memcg,
2479 unsigned long cgroup_size = mem_cgroup_size(memcg);
2512 if (!scan && !mem_cgroup_online(memcg))
2527 scan = mem_cgroup_online(memcg) ?
2605 static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
2610 if (memcg) {
2611 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec;
2627 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2634 mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH)
2637 return mem_cgroup_swappiness(memcg);
2753 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
2761 if (memcg)
2762 return &memcg->mm_list;
2795 struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
2796 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
2800 VM_WARN_ON_ONCE(mm->lru_gen.memcg);
2801 mm->lru_gen.memcg = memcg;
2806 struct lruvec *lruvec = get_lruvec(memcg, nid);
2823 struct mem_cgroup *memcg = NULL;
2829 memcg = mm->lru_gen.memcg;
2831 mm_list = get_mm_list(memcg);
2836 struct lruvec *lruvec = get_lruvec(memcg, nid);
2853 mem_cgroup_put(mm->lru_gen.memcg);
2854 mm->lru_gen.memcg = NULL;
2861 struct mem_cgroup *memcg;
2872 if (!mm->lru_gen.memcg)
2876 memcg = mem_cgroup_from_task(task);
2878 if (memcg == mm->lru_gen.memcg)
2890 static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
2938 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
2939 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
2999 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3000 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
3315 static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
3328 if (folio_memcg_rcu(folio) != memcg)
3356 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
3388 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
3424 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
3469 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
3630 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3644 if (!mem_cgroup_trylock_pages(memcg))
3909 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3925 return mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
3933 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
3946 mem_cgroup_calculate_protection(NULL, memcg);
3948 return !mem_cgroup_below_min(NULL, memcg);
3956 struct mem_cgroup *memcg;
3965 memcg = mem_cgroup_iter(NULL, NULL, NULL);
3967 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
3970 mem_cgroup_iter_break(NULL, memcg);
3975 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
4016 struct mem_cgroup *memcg = folio_memcg(folio);
4018 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4051 if (!mem_cgroup_trylock_pages(memcg))
4069 folio = get_pfn_folio(pfn, memcg, pgdat, can_swap);
4107 * memcg LRU
4167 void lru_gen_online_memcg(struct mem_cgroup *memcg)
4175 struct lruvec *lruvec = get_lruvec(memcg, nid);
4192 void lru_gen_offline_memcg(struct mem_cgroup *memcg)
4197 struct lruvec *lruvec = get_lruvec(memcg, nid);
4203 void lru_gen_release_memcg(struct mem_cgroup *memcg)
4210 struct lruvec *lruvec = get_lruvec(memcg, nid);
4229 void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
4231 struct lruvec *lruvec = get_lruvec(memcg, nid);
4359 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4414 __count_memcg_events(memcg, item, isolated);
4415 __count_memcg_events(memcg, PGREFILL, sorted);
4536 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4601 __count_memcg_events(memcg, item, reclaimed);
4678 * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
4685 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4688 if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
4693 /* try to scrape all its memory if this memcg was deleted */
4694 if (nr_to_scan && !mem_cgroup_online(memcg))
4710 /* don't abort memcg reclaim to ensure fairness */
4772 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
4775 mem_cgroup_calculate_protection(NULL, memcg);
4777 if (mem_cgroup_below_min(NULL, memcg))
4780 if (mem_cgroup_below_low(NULL, memcg)) {
4785 memcg_memory_event(memcg, MEMCG_LOW);
4790 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority);
4793 vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned,
4798 if (success && mem_cgroup_online(memcg))
4817 struct mem_cgroup *memcg;
4824 memcg = NULL;
4834 mem_cgroup_put(memcg);
4835 memcg = NULL;
4841 memcg = lruvec_memcg(lruvec);
4843 if (!mem_cgroup_tryget(memcg)) {
4844 lru_gen_release_memcg(memcg);
4845 memcg = NULL;
4864 mem_cgroup_put(memcg);
5055 struct mem_cgroup *memcg;
5070 memcg = mem_cgroup_iter(NULL, NULL, NULL);
5075 struct lruvec *lruvec = get_lruvec(memcg, nid);
5094 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
5190 struct mem_cgroup *memcg;
5197 memcg = mem_cgroup_iter(NULL, NULL, NULL);
5203 return get_lruvec(memcg, nid);
5205 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
5222 struct mem_cgroup *memcg = lruvec_memcg(v);
5228 memcg = mem_cgroup_iter(NULL, memcg, NULL);
5229 if (!memcg)
5235 return get_lruvec(memcg, nid);
5301 struct mem_cgroup *memcg = lruvec_memcg(lruvec);
5306 const char *path = memcg ? m->private : "";
5309 if (memcg)
5310 cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
5312 seq_printf(m, "memcg %5hu %s\n", mem_cgroup_id(memcg), path);
5410 struct mem_cgroup *memcg = NULL;
5418 memcg = mem_cgroup_from_id(memcg_id);
5419 if (!mem_cgroup_tryget(memcg))
5420 memcg = NULL;
5424 if (!memcg)
5428 if (memcg_id != mem_cgroup_id(memcg))
5431 lruvec = get_lruvec(memcg, nid);
5447 mem_cgroup_put(memcg);
5583 void lru_gen_init_memcg(struct mem_cgroup *memcg)
5585 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
5594 void lru_gen_exit_memcg(struct mem_cgroup *memcg)
5598 struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
5603 struct lruvec *lruvec = get_lruvec(memcg, nid);
5685 * do a batch of work at once. For memcg reclaim one check is made to
5714 * For kswapd and memcg, reclaim at least the number of pages
5848 struct mem_cgroup *memcg;
5850 memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
5852 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
5864 mem_cgroup_calculate_protection(target_memcg, memcg);
5866 if (mem_cgroup_below_min(target_memcg, memcg)) {
5872 } else if (mem_cgroup_below_low(target_memcg, memcg)) {
5883 memcg_memory_event(memcg, MEMCG_LOW);
5891 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
5896 vmpressure(sc->gfp_mask, memcg, false,
5900 } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
5975 * Tag a node/memcg as congested if all the dirty pages were marked
5978 * Legacy memcg will stall in page writeback so avoid forcibly
6150 * and balancing, not for a memcg's limit.
6476 unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
6481 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
6484 .target_mem_cgroup = memcg,
6515 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
6527 .target_mem_cgroup = memcg,
6557 struct mem_cgroup *memcg;
6572 memcg = mem_cgroup_iter(NULL, NULL, NULL);
6574 lruvec = mem_cgroup_lruvec(memcg, pgdat);
6577 memcg = mem_cgroup_iter(NULL, memcg, NULL);
6578 } while (memcg);
7528 /* block memcg migration while the folio moves between lrus */