1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic show_mem() implementation
4 *
5 * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de>
6 */
7
8#include <linux/blkdev.h>
9#include <linux/cma.h>
10#include <linux/cpuset.h>
11#include <linux/highmem.h>
12#include <linux/hugetlb.h>
13#include <linux/mm.h>
14#include <linux/mmzone.h>
15#include <linux/swap.h>
16#include <linux/vmstat.h>
17
18#include "internal.h"
19#include "swap.h"
20
21atomic_long_t _totalram_pages __read_mostly;
22EXPORT_SYMBOL(_totalram_pages);
23unsigned long totalreserve_pages __read_mostly;
24unsigned long totalcma_pages __read_mostly;
25
26static inline void show_node(struct zone *zone)
27{
28	if (IS_ENABLED(CONFIG_NUMA))
29		printk("Node %d ", zone_to_nid(zone));
30}
31
32long si_mem_available(void)
33{
34	long available;
35	unsigned long pagecache;
36	unsigned long wmark_low = 0;
37	unsigned long reclaimable;
38	struct zone *zone;
39
40	for_each_zone(zone)
41		wmark_low += low_wmark_pages(zone);
42
43	/*
44	 * Estimate the amount of memory available for userspace allocations,
45	 * without causing swapping or OOM.
46	 */
47	available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
48
49	/*
50	 * Not all the page cache can be freed, otherwise the system will
51	 * start swapping or thrashing. Assume at least half of the page
52	 * cache, or the low watermark worth of cache, needs to stay.
53	 */
54	pagecache = global_node_page_state(NR_ACTIVE_FILE) +
55		global_node_page_state(NR_INACTIVE_FILE);
56	pagecache -= min(pagecache / 2, wmark_low);
57	available += pagecache;
58
59	/*
60	 * Part of the reclaimable slab and other kernel memory consists of
61	 * items that are in use, and cannot be freed. Cap this estimate at the
62	 * low watermark.
63	 */
64	reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
65		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
66	reclaimable -= min(reclaimable / 2, wmark_low);
67	available += reclaimable;
68
69	if (available < 0)
70		available = 0;
71	return available;
72}
73EXPORT_SYMBOL_GPL(si_mem_available);
74
75void si_meminfo(struct sysinfo *val)
76{
77	val->totalram = totalram_pages();
78	val->sharedram = global_node_page_state(NR_SHMEM);
79	val->freeram = global_zone_page_state(NR_FREE_PAGES);
80	val->bufferram = nr_blockdev_pages();
81	val->totalhigh = totalhigh_pages();
82	val->freehigh = nr_free_highpages();
83	val->mem_unit = PAGE_SIZE;
84}
85
86EXPORT_SYMBOL(si_meminfo);
87
88#ifdef CONFIG_NUMA
89void si_meminfo_node(struct sysinfo *val, int nid)
90{
91	int zone_type;		/* needs to be signed */
92	unsigned long managed_pages = 0;
93	unsigned long managed_highpages = 0;
94	unsigned long free_highpages = 0;
95	pg_data_t *pgdat = NODE_DATA(nid);
96
97	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
98		managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
99	val->totalram = managed_pages;
100	val->sharedram = node_page_state(pgdat, NR_SHMEM);
101	val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
102#ifdef CONFIG_HIGHMEM
103	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
104		struct zone *zone = &pgdat->node_zones[zone_type];
105
106		if (is_highmem(zone)) {
107			managed_highpages += zone_managed_pages(zone);
108			free_highpages += zone_page_state(zone, NR_FREE_PAGES);
109		}
110	}
111	val->totalhigh = managed_highpages;
112	val->freehigh = free_highpages;
113#else
114	val->totalhigh = managed_highpages;
115	val->freehigh = free_highpages;
116#endif
117	val->mem_unit = PAGE_SIZE;
118}
119#endif
120
121/*
122 * Determine whether the node should be displayed or not, depending on whether
123 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
124 */
125static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
126{
127	if (!(flags & SHOW_MEM_FILTER_NODES))
128		return false;
129
130	/*
131	 * no node mask - aka implicit memory numa policy. Do not bother with
132	 * the synchronization - read_mems_allowed_begin - because we do not
133	 * have to be precise here.
134	 */
135	if (!nodemask)
136		nodemask = &cpuset_current_mems_allowed;
137
138	return !node_isset(nid, *nodemask);
139}
140
141static void show_migration_types(unsigned char type)
142{
143	static const char types[MIGRATE_TYPES] = {
144		[MIGRATE_UNMOVABLE]	= 'U',
145		[MIGRATE_MOVABLE]	= 'M',
146		[MIGRATE_RECLAIMABLE]	= 'E',
147		[MIGRATE_HIGHATOMIC]	= 'H',
148#ifdef CONFIG_CMA
149		[MIGRATE_CMA]		= 'C',
150#endif
151#ifdef CONFIG_MEMORY_ISOLATION
152		[MIGRATE_ISOLATE]	= 'I',
153#endif
154	};
155	char tmp[MIGRATE_TYPES + 1];
156	char *p = tmp;
157	int i;
158
159	for (i = 0; i < MIGRATE_TYPES; i++) {
160		if (type & (1 << i))
161			*p++ = types[i];
162	}
163
164	*p = '\0';
165	printk(KERN_CONT "(%s) ", tmp);
166}
167
168static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx)
169{
170	int zone_idx;
171	for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++)
172		if (zone_managed_pages(pgdat->node_zones + zone_idx))
173			return true;
174	return false;
175}
176
177/*
178 * Show free area list (used inside shift_scroll-lock stuff)
179 * We also calculate the percentage fragmentation. We do this by counting the
180 * memory on each free list with the exception of the first item on the list.
181 *
182 * Bits in @filter:
183 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
184 *   cpuset.
185 */
186static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
187{
188	unsigned long free_pcp = 0;
189	int cpu, nid;
190	struct zone *zone;
191	pg_data_t *pgdat;
192
193	for_each_populated_zone(zone) {
194		if (zone_idx(zone) > max_zone_idx)
195			continue;
196		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
197			continue;
198
199		for_each_online_cpu(cpu)
200			free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
201	}
202
203	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
204		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
205		" unevictable:%lu dirty:%lu writeback:%lu\n"
206		" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
207		" mapped:%lu shmem:%lu pagetables:%lu\n"
208		" sec_pagetables:%lu bounce:%lu\n"
209		" kernel_misc_reclaimable:%lu\n"
210		" free:%lu free_pcp:%lu free_cma:%lu\n",
211		global_node_page_state(NR_ACTIVE_ANON),
212		global_node_page_state(NR_INACTIVE_ANON),
213		global_node_page_state(NR_ISOLATED_ANON),
214		global_node_page_state(NR_ACTIVE_FILE),
215		global_node_page_state(NR_INACTIVE_FILE),
216		global_node_page_state(NR_ISOLATED_FILE),
217		global_node_page_state(NR_UNEVICTABLE),
218		global_node_page_state(NR_FILE_DIRTY),
219		global_node_page_state(NR_WRITEBACK),
220		global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
221		global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
222		global_node_page_state(NR_FILE_MAPPED),
223		global_node_page_state(NR_SHMEM),
224		global_node_page_state(NR_PAGETABLE),
225		global_node_page_state(NR_SECONDARY_PAGETABLE),
226		global_zone_page_state(NR_BOUNCE),
227		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
228		global_zone_page_state(NR_FREE_PAGES),
229		free_pcp,
230		global_zone_page_state(NR_FREE_CMA_PAGES));
231
232	for_each_online_pgdat(pgdat) {
233		if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
234			continue;
235		if (!node_has_managed_zones(pgdat, max_zone_idx))
236			continue;
237
238		printk("Node %d"
239			" active_anon:%lukB"
240			" inactive_anon:%lukB"
241			" active_file:%lukB"
242			" inactive_file:%lukB"
243			" unevictable:%lukB"
244			" isolated(anon):%lukB"
245			" isolated(file):%lukB"
246			" mapped:%lukB"
247			" dirty:%lukB"
248			" writeback:%lukB"
249			" shmem:%lukB"
250#ifdef CONFIG_TRANSPARENT_HUGEPAGE
251			" shmem_thp:%lukB"
252			" shmem_pmdmapped:%lukB"
253			" anon_thp:%lukB"
254#endif
255			" writeback_tmp:%lukB"
256			" kernel_stack:%lukB"
257#ifdef CONFIG_SHADOW_CALL_STACK
258			" shadow_call_stack:%lukB"
259#endif
260			" pagetables:%lukB"
261			" sec_pagetables:%lukB"
262			" all_unreclaimable? %s"
263			"\n",
264			pgdat->node_id,
265			K(node_page_state(pgdat, NR_ACTIVE_ANON)),
266			K(node_page_state(pgdat, NR_INACTIVE_ANON)),
267			K(node_page_state(pgdat, NR_ACTIVE_FILE)),
268			K(node_page_state(pgdat, NR_INACTIVE_FILE)),
269			K(node_page_state(pgdat, NR_UNEVICTABLE)),
270			K(node_page_state(pgdat, NR_ISOLATED_ANON)),
271			K(node_page_state(pgdat, NR_ISOLATED_FILE)),
272			K(node_page_state(pgdat, NR_FILE_MAPPED)),
273			K(node_page_state(pgdat, NR_FILE_DIRTY)),
274			K(node_page_state(pgdat, NR_WRITEBACK)),
275			K(node_page_state(pgdat, NR_SHMEM)),
276#ifdef CONFIG_TRANSPARENT_HUGEPAGE
277			K(node_page_state(pgdat, NR_SHMEM_THPS)),
278			K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
279			K(node_page_state(pgdat, NR_ANON_THPS)),
280#endif
281			K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
282			node_page_state(pgdat, NR_KERNEL_STACK_KB),
283#ifdef CONFIG_SHADOW_CALL_STACK
284			node_page_state(pgdat, NR_KERNEL_SCS_KB),
285#endif
286			K(node_page_state(pgdat, NR_PAGETABLE)),
287			K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
288			pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
289				"yes" : "no");
290	}
291
292	for_each_populated_zone(zone) {
293		int i;
294
295		if (zone_idx(zone) > max_zone_idx)
296			continue;
297		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
298			continue;
299
300		free_pcp = 0;
301		for_each_online_cpu(cpu)
302			free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
303
304		show_node(zone);
305		printk(KERN_CONT
306			"%s"
307			" free:%lukB"
308			" boost:%lukB"
309			" min:%lukB"
310			" low:%lukB"
311			" high:%lukB"
312			" reserved_highatomic:%luKB"
313			" active_anon:%lukB"
314			" inactive_anon:%lukB"
315			" active_file:%lukB"
316			" inactive_file:%lukB"
317			" unevictable:%lukB"
318			" writepending:%lukB"
319			" present:%lukB"
320			" managed:%lukB"
321			" mlocked:%lukB"
322			" bounce:%lukB"
323			" free_pcp:%lukB"
324			" local_pcp:%ukB"
325			" free_cma:%lukB"
326			"\n",
327			zone->name,
328			K(zone_page_state(zone, NR_FREE_PAGES)),
329			K(zone->watermark_boost),
330			K(min_wmark_pages(zone)),
331			K(low_wmark_pages(zone)),
332			K(high_wmark_pages(zone)),
333			K(zone->nr_reserved_highatomic),
334			K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
335			K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
336			K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
337			K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
338			K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
339			K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
340			K(zone->present_pages),
341			K(zone_managed_pages(zone)),
342			K(zone_page_state(zone, NR_MLOCK)),
343			K(zone_page_state(zone, NR_BOUNCE)),
344			K(free_pcp),
345			K(this_cpu_read(zone->per_cpu_pageset->count)),
346			K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
347		printk("lowmem_reserve[]:");
348		for (i = 0; i < MAX_NR_ZONES; i++)
349			printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
350		printk(KERN_CONT "\n");
351	}
352
353	for_each_populated_zone(zone) {
354		unsigned int order;
355		unsigned long nr[NR_PAGE_ORDERS], flags, total = 0;
356		unsigned char types[NR_PAGE_ORDERS];
357
358		if (zone_idx(zone) > max_zone_idx)
359			continue;
360		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
361			continue;
362		show_node(zone);
363		printk(KERN_CONT "%s: ", zone->name);
364
365		spin_lock_irqsave(&zone->lock, flags);
366		for (order = 0; order < NR_PAGE_ORDERS; order++) {
367			struct free_area *area = &zone->free_area[order];
368			int type;
369
370			nr[order] = area->nr_free;
371			total += nr[order] << order;
372
373			types[order] = 0;
374			for (type = 0; type < MIGRATE_TYPES; type++) {
375				if (!free_area_empty(area, type))
376					types[order] |= 1 << type;
377			}
378		}
379		spin_unlock_irqrestore(&zone->lock, flags);
380		for (order = 0; order < NR_PAGE_ORDERS; order++) {
381			printk(KERN_CONT "%lu*%lukB ",
382			       nr[order], K(1UL) << order);
383			if (nr[order])
384				show_migration_types(types[order]);
385		}
386		printk(KERN_CONT "= %lukB\n", K(total));
387	}
388
389	for_each_online_node(nid) {
390		if (show_mem_node_skip(filter, nid, nodemask))
391			continue;
392		hugetlb_show_meminfo_node(nid);
393	}
394
395	printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
396
397	show_swap_cache_info();
398}
399
400void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
401{
402	unsigned long total = 0, reserved = 0, highmem = 0;
403	struct zone *zone;
404
405	printk("Mem-Info:\n");
406	show_free_areas(filter, nodemask, max_zone_idx);
407
408	for_each_populated_zone(zone) {
409
410		total += zone->present_pages;
411		reserved += zone->present_pages - zone_managed_pages(zone);
412
413		if (is_highmem(zone))
414			highmem += zone->present_pages;
415	}
416
417	printk("%lu pages RAM\n", total);
418	printk("%lu pages HighMem/MovableOnly\n", highmem);
419	printk("%lu pages reserved\n", reserved);
420#ifdef CONFIG_CMA
421	printk("%lu pages cma reserved\n", totalcma_pages);
422#endif
423#ifdef CONFIG_MEMORY_FAILURE
424	printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
425#endif
426}
427