• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/mm/

Lines Matching defs:memcg

70  * Per memcg event counter is incremented at every pagein/pageout. This counter
72 * than using jiffies etc. to handle periodic memcg event.
705 * Operations are called by routine of global LRU independently from memcg.
866 static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
873 inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
874 active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
890 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
897 inactive_ratio = calc_inactive_ratio(memcg, present_pages);
908 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
913 inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
914 active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
919 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
925 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
930 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
935 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1039 static unsigned int get_swappiness(struct mem_cgroup *memcg)
1041 struct cgroup *cgrp = memcg->css.cgroup;
1048 spin_lock(&memcg->reclaim_param_lock);
1049 swappiness = memcg->swappiness;
1050 spin_unlock(&memcg->reclaim_param_lock);
1105 * @memcg: The memory cgroup that went over limit
1108 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1111 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1123 if (!memcg || !p)
1129 mem_cgrp = memcg->css.cgroup;
1160 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1161 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1162 res_counter_read_u64(&memcg->res, RES_FAILCNT));
1165 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1166 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1167 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1171 * This function returns the number of memcg under hierarchy tree. Returns
1182 * Return the memory (and swap, if configured) limit for a memcg.
1184 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1189 limit = res_counter_read_u64(&memcg->res, RES_LIMIT) +
1191 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1194 * to this memcg, return that limit.
1332 * a memcg is already locked. But condidering unlock ops and
1333 * creation/removal of memcg, scan-all is simple operation.
1696 gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
1718 if (!*memcg && !mm)
1721 if (*memcg) { /* css should be a valid one */
1722 mem = *memcg;
1751 * It seems dagerous to access memcg without css_get().
1754 * from this memcg are cached on this cpu. So, we
1815 *memcg = mem;
1818 *memcg = NULL;
1821 *memcg = NULL;
1849 * memcg.)
1901 /* try_charge() can return NULL to *memcg, taking care of it. */
2169 * And when try_charge() successfully returns, one refcnt to memcg without
2231 struct mem_cgroup *memcg;
2235 memcg = mem_cgroup_lookup(id);
2236 if (memcg) {
2238 * This recorded memcg can be obsolete one. So, avoid
2241 if (!mem_cgroup_is_root(memcg))
2242 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2243 mem_cgroup_swap_statistics(memcg, false);
2244 mem_cgroup_put(memcg);
2282 * In usual, we do css_get() when we remember memcg pointer.
2284 * uncharges. Then, it's ok to ignore memcg's refcnt.
2286 if (!batch->memcg)
2287 batch->memcg = mem;
2300 * In typical case, batch->memcg == mem. This means we can
2304 if (batch->memcg != mem)
2315 if (unlikely(batch->memcg != mem))
2379 * even after unlock, we have mem->res.usage here and this memcg
2417 * are in the same memcg. All these calls itself limits the number of
2427 current->memcg_batch.memcg = NULL;
2444 if (!batch->memcg)
2447 * This "batch->memcg" is valid without any css_get/put etc...
2451 res_counter_uncharge(&batch->memcg->res, batch->bytes);
2453 res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
2454 memcg_oom_recover(batch->memcg);
2456 batch->memcg = NULL;
2462 * memcg information is recorded to swap_cgroup of "ent"
2467 struct mem_cgroup *memcg;
2473 memcg = __mem_cgroup_uncharge_common(page, ctype);
2476 * record memcg information, if swapout && memcg != NULL,
2479 if (do_swap_account && swapout && memcg)
2480 swap_cgroup_record(ent, css_id(&memcg->css));
2491 struct mem_cgroup *memcg;
2499 memcg = mem_cgroup_lookup(id);
2500 if (memcg) {
2503 * This memcg can be obsolete one. We avoid calling css_tryget
2505 if (!mem_cgroup_is_root(memcg))
2506 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2507 mem_cgroup_swap_statistics(memcg, false);
2508 mem_cgroup_put(memcg);
2718 * not from the memcg which this page would be charged to.
2740 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2746 int children = mem_cgroup_count_children(memcg);
2757 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2771 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2778 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2782 ret = res_counter_set_limit(&memcg->res, val);
2785 memcg->memsw_is_minimum = true;
2787 memcg->memsw_is_minimum = false;
2794 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
2796 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2804 memcg_oom_recover(memcg);
2809 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2814 int children = mem_cgroup_count_children(memcg);
2820 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2832 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2838 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2841 ret = res_counter_set_limit(&memcg->memsw, val);
2844 memcg->memsw_is_minimum = true;
2846 memcg->memsw_is_minimum = false;
2853 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
2856 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2864 memcg_oom_recover(memcg);
2924 } else /* next_mz == NULL or other memcg */
3224 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3233 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3242 ret = mem_cgroup_resize_limit(memcg, val);
3244 ret = mem_cgroup_resize_memsw_limit(memcg, val);
3256 ret = res_counter_set_soft_limit(&memcg->res, val);
3267 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3273 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3274 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3275 cgroup = memcg->css.cgroup;
3276 if (!memcg->use_hierarchy)
3281 memcg = mem_cgroup_from_cont(cgroup);
3282 if (!memcg->use_hierarchy)
3284 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3286 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3501 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3503 return get_swappiness(memcg);
3509 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3524 (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
3529 spin_lock(&memcg->reclaim_param_lock);
3530 memcg->swappiness = val;
3531 spin_unlock(&memcg->reclaim_param_lock);
3538 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3546 t = rcu_dereference(memcg->thresholds.primary);
3548 t = rcu_dereference(memcg->memsw_thresholds.primary);
3553 usage = mem_cgroup_usage(memcg, swap);
3589 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3591 while (memcg) {
3592 __mem_cgroup_threshold(memcg, false);
3594 __mem_cgroup_threshold(memcg, true);
3596 memcg = parent_mem_cgroup(memcg);
3625 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3636 mutex_lock(&memcg->thresholds_lock);
3639 thresholds = &memcg->thresholds;
3641 thresholds = &memcg->memsw_thresholds;
3645 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3649 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3699 mutex_unlock(&memcg->thresholds_lock);
3707 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3714 mutex_lock(&memcg->thresholds_lock);
3716 thresholds = &memcg->thresholds;
3718 thresholds = &memcg->memsw_thresholds;
3728 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3731 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3777 mutex_unlock(&memcg->thresholds_lock);
3783 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3795 list_add(&event->list, &memcg->oom_notify);
3798 if (atomic_read(&memcg->oom_lock))