1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/mm/mmzone.c
4 *
5 * management codes for pgdats, zones and page flags
6 */
7
8
9#include <linux/stddef.h>
10#include <linux/mm.h>
11#include <linux/mmzone.h>
12
13struct pglist_data *first_online_pgdat(void)
14{
15	return NODE_DATA(first_online_node);
16}
17
18struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
19{
20	int nid = next_online_node(pgdat->node_id);
21
22	if (nid == MAX_NUMNODES)
23		return NULL;
24	return NODE_DATA(nid);
25}
26
27/*
28 * next_zone - helper magic for for_each_zone()
29 */
30struct zone *next_zone(struct zone *zone)
31{
32	pg_data_t *pgdat = zone->zone_pgdat;
33
34	if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
35		zone++;
36	else {
37		pgdat = next_online_pgdat(pgdat);
38		if (pgdat)
39			zone = pgdat->node_zones;
40		else
41			zone = NULL;
42	}
43	return zone;
44}
45
46static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
47{
48#ifdef CONFIG_NUMA
49	return node_isset(zonelist_node_idx(zref), *nodes);
50#else
51	return 1;
52#endif /* CONFIG_NUMA */
53}
54
55/* Returns the next zone at or below highest_zoneidx in a zonelist */
56struct zoneref *__next_zones_zonelist(struct zoneref *z,
57					enum zone_type highest_zoneidx,
58					nodemask_t *nodes)
59{
60	/*
61	 * Find the next suitable zone to use for the allocation.
62	 * Only filter based on nodemask if it's set
63	 */
64	if (unlikely(nodes == NULL))
65		while (zonelist_zone_idx(z) > highest_zoneidx)
66			z++;
67	else
68		while (zonelist_zone_idx(z) > highest_zoneidx ||
69				(z->zone && !zref_in_nodemask(z, nodes)))
70			z++;
71
72	return z;
73}
74
75void lruvec_init(struct lruvec *lruvec)
76{
77	enum lru_list lru;
78
79	memset(lruvec, 0, sizeof(struct lruvec));
80	spin_lock_init(&lruvec->lru_lock);
81	zswap_lruvec_state_init(lruvec);
82
83	for_each_lru(lru)
84		INIT_LIST_HEAD(&lruvec->lists[lru]);
85	/*
86	 * The "Unevictable LRU" is imaginary: though its size is maintained,
87	 * it is never scanned, and unevictable pages are not threaded on it
88	 * (so that their lru fields can be reused to hold mlock_count).
89	 * Poison its list head, so that any operations on it would crash.
90	 */
91	list_del(&lruvec->lists[LRU_UNEVICTABLE]);
92
93	lru_gen_init_lruvec(lruvec);
94}
95
96#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
97int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
98{
99	unsigned long old_flags, flags;
100	int last_cpupid;
101
102	old_flags = READ_ONCE(folio->flags);
103	do {
104		flags = old_flags;
105		last_cpupid = (flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
106
107		flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
108		flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
109	} while (unlikely(!try_cmpxchg(&folio->flags, &old_flags, flags)));
110
111	return last_cpupid;
112}
113#endif
114