Lines Matching refs:memcg

73  * Per memcg event counter is incremented at every pagein/pageout. With THP,
76 * than using jiffies etc. to handle periodic memcg event.
131 struct mem_cgroup *memcg; /* Back pointer, we cannot */
173 int memcg_id; /* memcg->css.id of foreign inode */
186 struct mem_cgroup *memcg;
203 /* Private memcg ID. Used to ID objects that outlive the cgroup */
225 * Prevent pages from this memcg from being written back from zswap to
300 * memcg->objcg is wiped out as a part of the objcg repaprenting
301 * process. memcg->orig_objcg preserves a pointer (and a reference)
302 * to the original objcg until the end of live of memcg.
335 /* per-memcg mm_struct list */
365 * After the initialization objcg->memcg is always pointing at
366 * a valid memcg, but can be atomically swapped to the parent memcg.
368 * The caller must ensure that the returned memcg won't be released:
373 return READ_ONCE(objcg->memcg);
427 * For a non-kmem folio any of the following ensures folio and memcg binding
436 * For a kmem folio a caller should hold an rcu read lock to protect memcg
489 * For a non-kmem folio any of the following ensures folio and memcg binding
498 * For a kmem folio a caller should hold an rcu read lock to protect memcg
531 struct mem_cgroup *memcg;
535 memcg = obj_cgroup_memcg(objcg);
536 if (unlikely(!css_tryget(&memcg->css)))
540 return memcg;
573 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
575 return (memcg == root_mem_cgroup);
584 struct mem_cgroup *memcg,
626 if (root == memcg)
629 *min = READ_ONCE(memcg->memory.emin);
630 *low = READ_ONCE(memcg->memory.elow);
634 struct mem_cgroup *memcg);
637 struct mem_cgroup *memcg)
640 * The root memcg doesn't account charges, and doesn't support
641 * protection. The target memcg's protection is ignored, see
644 return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) ||
645 memcg == target;
649 struct mem_cgroup *memcg)
651 if (mem_cgroup_unprotected(target, memcg))
654 return READ_ONCE(memcg->memory.elow) >=
655 page_counter_read(&memcg->memory);
659 struct mem_cgroup *memcg)
661 if (mem_cgroup_unprotected(target, memcg))
664 return READ_ONCE(memcg->memory.emin) >=
665 page_counter_read(&memcg->memory);
668 void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
678 * Try to charge @folio to the memcg that @mm belongs to, reclaiming
680 * charge to the active memcg.
694 int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
724 void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages);
729 * mem_cgroup_lruvec - get the lru list vector for a memcg & node
730 * @memcg: memcg of the wanted lruvec
733 * Returns the lru list vector holding pages for a given @memcg &
737 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
748 if (!memcg)
749 memcg = root_mem_cgroup;
751 mz = memcg->nodeinfo[pgdat->node_id];
772 struct mem_cgroup *memcg = folio_memcg(folio);
774 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
775 return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
824 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
826 return !memcg || css_tryget(&memcg->css);
829 static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
831 return !memcg || css_tryget_online(&memcg->css);
834 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
836 if (memcg)
837 css_put(&memcg->css);
847 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
850 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
855 return memcg->id.id;
860 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
862 return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
881 return mz->memcg;
885 * parent_mem_cgroup - find the accounting parent of a memcg
886 * @memcg: memcg whose parent to find
888 * Returns the parent memcg, or NULL if this is the root.
890 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
892 return mem_cgroup_from_css(memcg->css.parent);
895 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
898 if (root == memcg)
900 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
904 struct mem_cgroup *memcg)
912 match = mem_cgroup_is_descendant(task_memcg, memcg);
920 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
924 return !!(memcg->css.flags & CSS_ONLINE);
942 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
944 unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
946 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
949 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
971 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
976 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
978 /* try to stablize folio_memcg() for all the pages in a memcg */
979 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
983 if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
996 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1002 __mod_memcg_state(memcg, idx, val);
1009 struct mem_cgroup *memcg;
1015 memcg = page_memcg(page);
1016 if (memcg)
1017 mod_memcg_state(memcg, idx, val);
1021 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
1059 void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
1060 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
1086 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1089 static inline void count_memcg_events(struct mem_cgroup *memcg,
1096 __count_memcg_events(memcg, idx, count);
1103 struct mem_cgroup *memcg = folio_memcg(folio);
1105 if (memcg)
1106 count_memcg_events(memcg, idx, nr);
1112 struct mem_cgroup *memcg;
1118 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1119 if (likely(memcg))
1120 count_memcg_events(memcg, idx, 1);
1124 static inline void memcg_memory_event(struct mem_cgroup *memcg,
1130 atomic_long_inc(&memcg->memory_events_local[event]);
1132 cgroup_file_notify(&memcg->events_local_file);
1135 atomic_long_inc(&memcg->memory_events[event]);
1137 cgroup_file_notify(&memcg->swap_events_file);
1139 cgroup_file_notify(&memcg->events_file);
1145 } while ((memcg = parent_mem_cgroup(memcg)) &&
1146 !mem_cgroup_is_root(memcg));
1152 struct mem_cgroup *memcg;
1158 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1159 if (likely(memcg))
1160 memcg_memory_event(memcg, event);
1215 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1225 static inline void memcg_memory_event(struct mem_cgroup *memcg,
1236 struct mem_cgroup *memcg,
1244 struct mem_cgroup *memcg)
1249 struct mem_cgroup *memcg)
1254 struct mem_cgroup *memcg)
1260 struct mem_cgroup *memcg)
1266 struct mem_cgroup *memcg)
1276 static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg,
1300 static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
1314 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1331 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1337 struct mem_cgroup *memcg)
1362 static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
1367 static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
1372 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1414 static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1419 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
1432 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
1453 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1465 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1470 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1476 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1481 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1493 static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
1533 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1537 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1543 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1554 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1571 static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
1575 static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
1600 static inline void count_memcg_events(struct mem_cgroup *memcg,
1606 static inline void __count_memcg_events(struct mem_cgroup *memcg,
1647 struct mem_cgroup *memcg;
1649 memcg = lruvec_memcg(lruvec);
1650 if (!memcg)
1652 memcg = parent_mem_cgroup(memcg);
1653 if (!memcg)
1655 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1674 /* Test requires a stable page->memcg binding, see page_memcg() */
1723 struct mem_cgroup *memcg;
1728 memcg = folio_memcg(folio);
1729 if (unlikely(memcg && &memcg->css != wb->memcg_css))
1762 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
1764 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1770 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1773 return !!memcg->tcpmem_pressure;
1775 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
1777 } while ((memcg = parent_mem_cgroup(memcg)));
1781 int alloc_shrinker_info(struct mem_cgroup *memcg);
1782 void free_shrinker_info(struct mem_cgroup *memcg);
1783 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
1784 void reparent_shrinker_deferred(struct mem_cgroup *memcg);
1789 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1794 static inline void set_shrinker_bit(struct mem_cgroup *memcg,
1857 * A helper for accessing memcg's kmem_id, used for getting
1860 static inline int memcg_kmem_id(struct mem_cgroup *memcg)
1862 return memcg ? memcg->kmemcg_id : -1;
1871 struct mem_cgroup *memcg;
1877 memcg = obj_cgroup_memcg(objcg);
1878 count_memcg_events(memcg, idx, 1);
1923 static inline int memcg_kmem_id(struct mem_cgroup *memcg)
1949 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg);
1963 static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)