Lines Matching defs:memcg

73  * Per memcg event counter is incremented at every pagein/pageout. With THP,
76 * than using jiffies etc. to handle periodic memcg event.
114 struct mem_cgroup *memcg; /* Back pointer, we cannot */
156 int memcg_id; /* memcg->css.id of foreign inode */
169 struct mem_cgroup *memcg;
186 /* Private memcg ID. Used to ID objects that outlive the cgroup */
208 * Prevent pages from this memcg from being written back from zswap to
283 * memcg->objcg is wiped out as a part of the objcg repaprenting
284 * process. memcg->orig_objcg preserves a pointer (and a reference)
285 * to the original objcg until the end of live of memcg.
318 /* per-memcg mm_struct list */
365 * After the initialization objcg->memcg is always pointing at
366 * a valid memcg, but can be atomically swapped to the parent memcg.
368 * The caller must ensure that the returned memcg won't be released:
373 return READ_ONCE(objcg->memcg);
427 * For a non-kmem folio any of the following ensures folio and memcg binding
436 * For a kmem folio a caller should hold an rcu read lock to protect memcg
489 * For a non-kmem folio any of the following ensures folio and memcg binding
498 * For a kmem folio a caller should hold an rcu read lock to protect memcg
531 struct mem_cgroup *memcg;
535 memcg = obj_cgroup_memcg(objcg);
536 if (unlikely(!css_tryget(&memcg->css)))
540 return memcg;
573 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
575 return (memcg == root_mem_cgroup);
584 struct mem_cgroup *memcg,
626 if (root == memcg)
629 *min = READ_ONCE(memcg->memory.emin);
630 *low = READ_ONCE(memcg->memory.elow);
634 struct mem_cgroup *memcg);
637 struct mem_cgroup *memcg)
640 * The root memcg doesn't account charges, and doesn't support
641 * protection. The target memcg's protection is ignored, see
644 return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) ||
645 memcg == target;
649 struct mem_cgroup *memcg)
651 if (mem_cgroup_unprotected(target, memcg))
654 return READ_ONCE(memcg->memory.elow) >=
655 page_counter_read(&memcg->memory);
659 struct mem_cgroup *memcg)
661 if (mem_cgroup_unprotected(target, memcg))
664 return READ_ONCE(memcg->memory.emin) >=
665 page_counter_read(&memcg->memory);
668 void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
678 * Try to charge @folio to the memcg that @mm belongs to, reclaiming
680 * charge to the active memcg.
694 int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
724 void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages);
729 * mem_cgroup_lruvec - get the lru list vector for a memcg & node
730 * @memcg: memcg of the wanted lruvec
733 * Returns the lru list vector holding pages for a given @memcg &
737 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
748 if (!memcg)
749 memcg = root_mem_cgroup;
751 mz = memcg->nodeinfo[pgdat->node_id];
772 struct mem_cgroup *memcg = folio_memcg(folio);
774 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
775 return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
825 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
827 return !memcg || css_tryget(&memcg->css);
830 static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
832 return !memcg || css_tryget_online(&memcg->css);
835 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
837 if (memcg)
838 css_put(&memcg->css);
848 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
851 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
856 return memcg->id.id;
861 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
863 return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
882 return mz->memcg;
886 * parent_mem_cgroup - find the accounting parent of a memcg
887 * @memcg: memcg whose parent to find
889 * Returns the parent memcg, or NULL if this is the root.
891 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
893 return mem_cgroup_from_css(memcg->css.parent);
896 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
899 if (root == memcg)
901 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
905 struct mem_cgroup *memcg)
913 match = mem_cgroup_is_descendant(task_memcg, memcg);
921 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
925 return !!(memcg->css.flags & CSS_ONLINE);
943 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
945 unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
947 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
950 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
972 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
977 void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
980 /* try to stablize folio_memcg() for all the pages in a memcg */
981 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
985 if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
998 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1004 __mod_memcg_state(memcg, idx, val);
1011 struct mem_cgroup *memcg;
1017 memcg = page_memcg(page);
1018 if (memcg)
1019 mod_memcg_state(memcg, idx, val);
1023 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
1028 void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
1029 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
1043 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1046 static inline void count_memcg_events(struct mem_cgroup *memcg,
1053 __count_memcg_events(memcg, idx, count);
1060 struct mem_cgroup *memcg = folio_memcg(folio);
1062 if (memcg)
1063 count_memcg_events(memcg, idx, nr);
1069 struct mem_cgroup *memcg;
1075 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1076 if (likely(memcg))
1077 count_memcg_events(memcg, idx, 1);
1081 static inline void memcg_memory_event(struct mem_cgroup *memcg,
1087 atomic_long_inc(&memcg->memory_events_local[event]);
1089 cgroup_file_notify(&memcg->events_local_file);
1092 atomic_long_inc(&memcg->memory_events[event]);
1094 cgroup_file_notify(&memcg->swap_events_file);
1096 cgroup_file_notify(&memcg->events_file);
1102 } while ((memcg = parent_mem_cgroup(memcg)) &&
1103 !mem_cgroup_is_root(memcg));
1109 struct mem_cgroup *memcg;
1115 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1116 if (likely(memcg))
1117 memcg_memory_event(memcg, event);
1172 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1182 static inline void memcg_memory_event(struct mem_cgroup *memcg,
1193 struct mem_cgroup *memcg,
1201 struct mem_cgroup *memcg)
1206 struct mem_cgroup *memcg)
1211 struct mem_cgroup *memcg)
1217 struct mem_cgroup *memcg)
1223 struct mem_cgroup *memcg)
1233 static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg,
1257 static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
1271 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1288 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1294 struct mem_cgroup *memcg)
1319 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
1324 static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
1329 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1371 static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1376 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
1389 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
1410 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1422 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1427 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1433 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1438 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1450 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
1490 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1494 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1500 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1511 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1528 static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
1532 static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
1552 static inline void count_memcg_events(struct mem_cgroup *memcg,
1558 static inline void __count_memcg_events(struct mem_cgroup *memcg,
1612 struct mem_cgroup *memcg;
1614 memcg = lruvec_memcg(lruvec);
1615 if (!memcg)
1617 memcg = parent_mem_cgroup(memcg);
1618 if (!memcg)
1620 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1639 /* Test requires a stable page->memcg binding, see page_memcg() */
1688 struct mem_cgroup *memcg;
1693 memcg = folio_memcg(folio);
1694 if (unlikely(memcg && &memcg->css != wb->memcg_css))
1727 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
1729 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1735 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1738 return !!memcg->tcpmem_pressure;
1740 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
1742 } while ((memcg = parent_mem_cgroup(memcg)));
1746 int alloc_shrinker_info(struct mem_cgroup *memcg);
1747 void free_shrinker_info(struct mem_cgroup *memcg);
1748 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
1749 void reparent_shrinker_deferred(struct mem_cgroup *memcg);
1754 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1759 static inline void set_shrinker_bit(struct mem_cgroup *memcg,
1822 * A helper for accessing memcg's kmem_id, used for getting
1825 static inline int memcg_kmem_id(struct mem_cgroup *memcg)
1827 return memcg ? memcg->kmemcg_id : -1;
1836 struct mem_cgroup *memcg;
1842 memcg = obj_cgroup_memcg(objcg);
1843 count_memcg_events(memcg, idx, 1);
1888 static inline int memcg_kmem_id(struct mem_cgroup *memcg)
1914 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg);
1928 static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)