Lines Matching defs:memcg

716 	struct mem_cgroup *memcg;
721 * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
724 * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
725 * new entry will be reparented to memcg's parent's list_lru.
726 * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
727 * new entry will be added directly to memcg's parent's list_lru.
732 memcg = mem_cgroup_from_entry(entry);
734 list_lru_add(list_lru, &entry->lru, nid, memcg);
737 lru_size = list_lru_count_one(list_lru, nid, memcg);
738 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
754 struct mem_cgroup *memcg;
757 memcg = mem_cgroup_from_entry(entry);
759 list_lru_del(list_lru, &entry->lru, nid, memcg);
778 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
780 /* lock out zswap shrinker walking memcg tree */
782 if (zswap_next_shrink == memcg)
1175 struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
1180 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
1216 struct mem_cgroup *memcg = sc->memcg;
1217 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
1220 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
1232 * For memcg, use the cgroup-wide ZSWAP stats since we don't
1233 * have them per-node and thus per-lruvec. Careful if memcg is
1234 * runtime-disabled: we can get sc->memcg == NULL, which is ok
1237 * Without memcg, use the zswap pool-wide metrics.
1240 mem_cgroup_flush_stats(memcg);
1241 nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
1242 nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
1290 static int shrink_memcg(struct mem_cgroup *memcg)
1294 if (!mem_cgroup_zswap_writeback_enabled(memcg))
1299 * reclaiming from the parent instead of the dead memcg.
1301 if (memcg && !mem_cgroup_online(memcg))
1307 shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
1315 struct mem_cgroup *memcg;
1326 memcg = zswap_next_shrink;
1330 * got an offline memcg (or else we risk undoing the effect of the
1331 * zswap memcg offlining cleanup callback). This is not catastrophic
1332 * per se, but it will keep the now offlined memcg hostage for a while.
1334 * Note that if we got an online memcg, we will keep the extra
1336 * is dropped by the zswap memcg offlining callback, ensuring that the
1337 * memcg is not killed when we are reclaiming.
1339 if (!memcg) {
1347 if (!mem_cgroup_tryget_online(memcg)) {
1349 mem_cgroup_iter_break(NULL, memcg);
1360 ret = shrink_memcg(memcg);
1362 mem_cgroup_put(memcg);
1419 struct mem_cgroup *memcg = NULL;
1435 memcg = get_mem_cgroup_from_objcg(objcg);
1436 if (shrink_memcg(memcg)) {
1437 mem_cgroup_put(memcg);
1440 mem_cgroup_put(memcg);
1466 memcg = get_mem_cgroup_from_objcg(objcg);
1467 if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
1468 mem_cgroup_put(memcg);
1471 mem_cgroup_put(memcg);