Lines Matching defs:memcg

89 		    struct mem_cgroup *memcg)
96 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
100 set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
113 struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
116 return list_lru_add(lru, item, nid, memcg);
121 struct mem_cgroup *memcg)
128 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
143 struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
146 return list_lru_del(lru, item, nid, memcg);
166 int nid, struct mem_cgroup *memcg)
172 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
258 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
266 ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
274 list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
282 ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
420 void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent)
424 int src_idx = memcg->kmemcg_id;
440 css_for_each_descendant_pre(css, &memcg->css) {
454 static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
457 int idx = memcg->kmemcg_id;
462 int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
469 struct mem_cgroup *memcg;
473 if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
477 table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp);
486 for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) {
487 if (memcg_list_lru_allocated(memcg, lru))
490 table[i].memcg = memcg;
502 int index = READ_ONCE(table[i].memcg->kmemcg_id);
517 * The xas lock has been released, this memcg
519 * memcg id. More details see the comments
522 index = READ_ONCE(table[i].memcg->kmemcg_id);