Lines Matching defs:memcg

62 void free_shrinker_info(struct mem_cgroup *memcg)
69 pn = memcg->nodeinfo[nid];
77 int alloc_shrinker_info(struct mem_cgroup *memcg)
92 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
100 free_shrinker_info(memcg);
104 static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg,
107 return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
111 static int expand_one_shrinker_info(struct mem_cgroup *memcg, int new_size,
119 pn = memcg->nodeinfo[nid];
120 old = shrinker_info_protected(memcg, nid);
121 /* Not yet online memcg */
153 struct mem_cgroup *memcg;
163 memcg = mem_cgroup_iter(NULL, NULL, NULL);
165 ret = expand_one_shrinker_info(memcg, new_size, old_size,
168 mem_cgroup_iter_break(NULL, memcg);
171 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
194 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
196 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
201 info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
251 struct mem_cgroup *memcg)
258 info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
267 struct mem_cgroup *memcg)
274 info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
283 void reparent_shrinker_deferred(struct mem_cgroup *memcg)
291 parent = parent_mem_cgroup(memcg);
298 child_info = shrinker_info_protected(memcg, nid);
322 struct mem_cgroup *memcg)
328 struct mem_cgroup *memcg)
342 if (sc->memcg &&
345 sc->memcg);
359 if (sc->memcg &&
362 sc->memcg);
468 struct mem_cgroup *memcg, int priority)
474 if (!mem_cgroup_online(memcg))
478 * lockless algorithm of memcg shrink.
484 * The shrinker_info_unit is never freed unless its corresponding memcg
485 * is destroyed. Here we already hold the refcount of memcg, so the
486 * memcg will not be destroyed, and of course shrinker_info_unit will
489 * So in the memcg shrink:
514 info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
529 .memcg = memcg,
554 * the memcg shrinker map, a new object might have been
571 set_shrinker_bit(memcg, nid, shrinker_id);
586 struct mem_cgroup *memcg, int priority)
596 * @memcg: memory cgroup whose slab caches to target
604 * @memcg specifies the memory cgroup to target. Unaware shrinkers
612 unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
619 * The root memcg might be allocated even though memcg is disabled
621 * mem_cgroup_is_root() return false, then just run memcg slab
625 if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
626 return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
654 .memcg = memcg,
699 /* Memcg is not supported, fallback to non-memcg-aware shrinker. */
712 * The nr_deferred is available on per memcg level for memcg aware
714 * - non-memcg-aware shrinkers
716 * - memcg is disabled by kernel command line