1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm_init.c - Memory initialisation verification and debugging
4 *
5 * Copyright 2008 IBM Corporation, 2008
6 * Author Mel Gorman <mel@csn.ul.ie>
7 *
8 */
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/kobject.h>
12#include <linux/export.h>
13#include <linux/memory.h>
14#include <linux/notifier.h>
15#include <linux/sched.h>
16#include <linux/mman.h>
17#include <linux/memblock.h>
18#include <linux/page-isolation.h>
19#include <linux/padata.h>
20#include <linux/nmi.h>
21#include <linux/buffer_head.h>
22#include <linux/kmemleak.h>
23#include <linux/kfence.h>
24#include <linux/page_ext.h>
25#include <linux/pti.h>
26#include <linux/pgtable.h>
27#include <linux/swap.h>
28#include <linux/cma.h>
29#include <linux/crash_dump.h>
30#include "internal.h"
31#include "slab.h"
32#include "shuffle.h"
33
34#include <asm/setup.h>
35
36#ifdef CONFIG_DEBUG_MEMORY_INIT
37int __meminitdata mminit_loglevel;
38
39/* The zonelists are simply reported, validation is manual. */
40void __init mminit_verify_zonelist(void)
41{
42	int nid;
43
44	if (mminit_loglevel < MMINIT_VERIFY)
45		return;
46
47	for_each_online_node(nid) {
48		pg_data_t *pgdat = NODE_DATA(nid);
49		struct zone *zone;
50		struct zoneref *z;
51		struct zonelist *zonelist;
52		int i, listid, zoneid;
53
54		BUILD_BUG_ON(MAX_ZONELISTS > 2);
55		for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
56
57			/* Identify the zone and nodelist */
58			zoneid = i % MAX_NR_ZONES;
59			listid = i / MAX_NR_ZONES;
60			zonelist = &pgdat->node_zonelists[listid];
61			zone = &pgdat->node_zones[zoneid];
62			if (!populated_zone(zone))
63				continue;
64
65			/* Print information about the zonelist */
66			printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
67				listid > 0 ? "thisnode" : "general", nid,
68				zone->name);
69
70			/* Iterate the zonelist */
71			for_each_zone_zonelist(zone, z, zonelist, zoneid)
72				pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
73			pr_cont("\n");
74		}
75	}
76}
77
78void __init mminit_verify_pageflags_layout(void)
79{
80	int shift, width;
81	unsigned long or_mask, add_mask;
82
83	shift = BITS_PER_LONG;
84	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
85		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
86	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
87		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
88		SECTIONS_WIDTH,
89		NODES_WIDTH,
90		ZONES_WIDTH,
91		LAST_CPUPID_WIDTH,
92		KASAN_TAG_WIDTH,
93		LRU_GEN_WIDTH,
94		LRU_REFS_WIDTH,
95		NR_PAGEFLAGS);
96	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
97		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
98		SECTIONS_SHIFT,
99		NODES_SHIFT,
100		ZONES_SHIFT,
101		LAST_CPUPID_SHIFT,
102		KASAN_TAG_WIDTH);
103	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
104		"Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
105		(unsigned long)SECTIONS_PGSHIFT,
106		(unsigned long)NODES_PGSHIFT,
107		(unsigned long)ZONES_PGSHIFT,
108		(unsigned long)LAST_CPUPID_PGSHIFT,
109		(unsigned long)KASAN_TAG_PGSHIFT);
110	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
111		"Node/Zone ID: %lu -> %lu\n",
112		(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
113		(unsigned long)ZONEID_PGOFF);
114	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
115		"location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
116		shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
117#ifdef NODE_NOT_IN_PAGE_FLAGS
118	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
119		"Node not in page flags");
120#endif
121#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
122	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
123		"Last cpupid not in page flags");
124#endif
125
126	if (SECTIONS_WIDTH) {
127		shift -= SECTIONS_WIDTH;
128		BUG_ON(shift != SECTIONS_PGSHIFT);
129	}
130	if (NODES_WIDTH) {
131		shift -= NODES_WIDTH;
132		BUG_ON(shift != NODES_PGSHIFT);
133	}
134	if (ZONES_WIDTH) {
135		shift -= ZONES_WIDTH;
136		BUG_ON(shift != ZONES_PGSHIFT);
137	}
138
139	/* Check for bitmask overlaps */
140	or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
141			(NODES_MASK << NODES_PGSHIFT) |
142			(SECTIONS_MASK << SECTIONS_PGSHIFT);
143	add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
144			(NODES_MASK << NODES_PGSHIFT) +
145			(SECTIONS_MASK << SECTIONS_PGSHIFT);
146	BUG_ON(or_mask != add_mask);
147}
148
149static __init int set_mminit_loglevel(char *str)
150{
151	get_option(&str, &mminit_loglevel);
152	return 0;
153}
154early_param("mminit_loglevel", set_mminit_loglevel);
155#endif /* CONFIG_DEBUG_MEMORY_INIT */
156
157struct kobject *mm_kobj;
158
159#ifdef CONFIG_SMP
160s32 vm_committed_as_batch = 32;
161
162void mm_compute_batch(int overcommit_policy)
163{
164	u64 memsized_batch;
165	s32 nr = num_present_cpus();
166	s32 batch = max_t(s32, nr*2, 32);
167	unsigned long ram_pages = totalram_pages();
168
169	/*
170	 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
171	 * (total memory/#cpus), and lift it to 25% for other policies
172	 * to easy the possible lock contention for percpu_counter
173	 * vm_committed_as, while the max limit is INT_MAX
174	 */
175	if (overcommit_policy == OVERCOMMIT_NEVER)
176		memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
177	else
178		memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX);
179
180	vm_committed_as_batch = max_t(s32, memsized_batch, batch);
181}
182
183static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
184					unsigned long action, void *arg)
185{
186	switch (action) {
187	case MEM_ONLINE:
188	case MEM_OFFLINE:
189		mm_compute_batch(sysctl_overcommit_memory);
190		break;
191	default:
192		break;
193	}
194	return NOTIFY_OK;
195}
196
197static int __init mm_compute_batch_init(void)
198{
199	mm_compute_batch(sysctl_overcommit_memory);
200	hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI);
201	return 0;
202}
203
204__initcall(mm_compute_batch_init);
205
206#endif
207
208static int __init mm_sysfs_init(void)
209{
210	mm_kobj = kobject_create_and_add("mm", kernel_kobj);
211	if (!mm_kobj)
212		return -ENOMEM;
213
214	return 0;
215}
216postcore_initcall(mm_sysfs_init);
217
218static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
219static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
220static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
221
222static unsigned long required_kernelcore __initdata;
223static unsigned long required_kernelcore_percent __initdata;
224static unsigned long required_movablecore __initdata;
225static unsigned long required_movablecore_percent __initdata;
226
227static unsigned long nr_kernel_pages __initdata;
228static unsigned long nr_all_pages __initdata;
229static unsigned long dma_reserve __initdata;
230
231static bool deferred_struct_pages __meminitdata;
232
233static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
234
235static int __init cmdline_parse_core(char *p, unsigned long *core,
236				     unsigned long *percent)
237{
238	unsigned long long coremem;
239	char *endptr;
240
241	if (!p)
242		return -EINVAL;
243
244	/* Value may be a percentage of total memory, otherwise bytes */
245	coremem = simple_strtoull(p, &endptr, 0);
246	if (*endptr == '%') {
247		/* Paranoid check for percent values greater than 100 */
248		WARN_ON(coremem > 100);
249
250		*percent = coremem;
251	} else {
252		coremem = memparse(p, &p);
253		/* Paranoid check that UL is enough for the coremem value */
254		WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
255
256		*core = coremem >> PAGE_SHIFT;
257		*percent = 0UL;
258	}
259	return 0;
260}
261
262bool mirrored_kernelcore __initdata_memblock;
263
264/*
265 * kernelcore=size sets the amount of memory for use for allocations that
266 * cannot be reclaimed or migrated.
267 */
268static int __init cmdline_parse_kernelcore(char *p)
269{
270	/* parse kernelcore=mirror */
271	if (parse_option_str(p, "mirror")) {
272		mirrored_kernelcore = true;
273		return 0;
274	}
275
276	return cmdline_parse_core(p, &required_kernelcore,
277				  &required_kernelcore_percent);
278}
279early_param("kernelcore", cmdline_parse_kernelcore);
280
281/*
282 * movablecore=size sets the amount of memory for use for allocations that
283 * can be reclaimed or migrated.
284 */
285static int __init cmdline_parse_movablecore(char *p)
286{
287	return cmdline_parse_core(p, &required_movablecore,
288				  &required_movablecore_percent);
289}
290early_param("movablecore", cmdline_parse_movablecore);
291
292/*
293 * early_calculate_totalpages()
294 * Sum pages in active regions for movable zone.
295 * Populate N_MEMORY for calculating usable_nodes.
296 */
297static unsigned long __init early_calculate_totalpages(void)
298{
299	unsigned long totalpages = 0;
300	unsigned long start_pfn, end_pfn;
301	int i, nid;
302
303	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
304		unsigned long pages = end_pfn - start_pfn;
305
306		totalpages += pages;
307		if (pages)
308			node_set_state(nid, N_MEMORY);
309	}
310	return totalpages;
311}
312
313/*
314 * This finds a zone that can be used for ZONE_MOVABLE pages. The
315 * assumption is made that zones within a node are ordered in monotonic
316 * increasing memory addresses so that the "highest" populated zone is used
317 */
318static void __init find_usable_zone_for_movable(void)
319{
320	int zone_index;
321	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
322		if (zone_index == ZONE_MOVABLE)
323			continue;
324
325		if (arch_zone_highest_possible_pfn[zone_index] >
326				arch_zone_lowest_possible_pfn[zone_index])
327			break;
328	}
329
330	VM_BUG_ON(zone_index == -1);
331	movable_zone = zone_index;
332}
333
334/*
335 * Find the PFN the Movable zone begins in each node. Kernel memory
336 * is spread evenly between nodes as long as the nodes have enough
337 * memory. When they don't, some nodes will have more kernelcore than
338 * others
339 */
340static void __init find_zone_movable_pfns_for_nodes(void)
341{
342	int i, nid;
343	unsigned long usable_startpfn;
344	unsigned long kernelcore_node, kernelcore_remaining;
345	/* save the state before borrow the nodemask */
346	nodemask_t saved_node_state = node_states[N_MEMORY];
347	unsigned long totalpages = early_calculate_totalpages();
348	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
349	struct memblock_region *r;
350
351	/* Need to find movable_zone earlier when movable_node is specified. */
352	find_usable_zone_for_movable();
353
354	/*
355	 * If movable_node is specified, ignore kernelcore and movablecore
356	 * options.
357	 */
358	if (movable_node_is_enabled()) {
359		for_each_mem_region(r) {
360			if (!memblock_is_hotpluggable(r))
361				continue;
362
363			nid = memblock_get_region_node(r);
364
365			usable_startpfn = PFN_DOWN(r->base);
366			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
367				min(usable_startpfn, zone_movable_pfn[nid]) :
368				usable_startpfn;
369		}
370
371		goto out2;
372	}
373
374	/*
375	 * If kernelcore=mirror is specified, ignore movablecore option
376	 */
377	if (mirrored_kernelcore) {
378		bool mem_below_4gb_not_mirrored = false;
379
380		if (!memblock_has_mirror()) {
381			pr_warn("The system has no mirror memory, ignore kernelcore=mirror.\n");
382			goto out;
383		}
384
385		if (is_kdump_kernel()) {
386			pr_warn("The system is under kdump, ignore kernelcore=mirror.\n");
387			goto out;
388		}
389
390		for_each_mem_region(r) {
391			if (memblock_is_mirror(r))
392				continue;
393
394			nid = memblock_get_region_node(r);
395
396			usable_startpfn = memblock_region_memory_base_pfn(r);
397
398			if (usable_startpfn < PHYS_PFN(SZ_4G)) {
399				mem_below_4gb_not_mirrored = true;
400				continue;
401			}
402
403			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
404				min(usable_startpfn, zone_movable_pfn[nid]) :
405				usable_startpfn;
406		}
407
408		if (mem_below_4gb_not_mirrored)
409			pr_warn("This configuration results in unmirrored kernel memory.\n");
410
411		goto out2;
412	}
413
414	/*
415	 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
416	 * amount of necessary memory.
417	 */
418	if (required_kernelcore_percent)
419		required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
420				       10000UL;
421	if (required_movablecore_percent)
422		required_movablecore = (totalpages * 100 * required_movablecore_percent) /
423					10000UL;
424
425	/*
426	 * If movablecore= was specified, calculate what size of
427	 * kernelcore that corresponds so that memory usable for
428	 * any allocation type is evenly spread. If both kernelcore
429	 * and movablecore are specified, then the value of kernelcore
430	 * will be used for required_kernelcore if it's greater than
431	 * what movablecore would have allowed.
432	 */
433	if (required_movablecore) {
434		unsigned long corepages;
435
436		/*
437		 * Round-up so that ZONE_MOVABLE is at least as large as what
438		 * was requested by the user
439		 */
440		required_movablecore =
441			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
442		required_movablecore = min(totalpages, required_movablecore);
443		corepages = totalpages - required_movablecore;
444
445		required_kernelcore = max(required_kernelcore, corepages);
446	}
447
448	/*
449	 * If kernelcore was not specified or kernelcore size is larger
450	 * than totalpages, there is no ZONE_MOVABLE.
451	 */
452	if (!required_kernelcore || required_kernelcore >= totalpages)
453		goto out;
454
455	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
456	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
457
458restart:
459	/* Spread kernelcore memory as evenly as possible throughout nodes */
460	kernelcore_node = required_kernelcore / usable_nodes;
461	for_each_node_state(nid, N_MEMORY) {
462		unsigned long start_pfn, end_pfn;
463
464		/*
465		 * Recalculate kernelcore_node if the division per node
466		 * now exceeds what is necessary to satisfy the requested
467		 * amount of memory for the kernel
468		 */
469		if (required_kernelcore < kernelcore_node)
470			kernelcore_node = required_kernelcore / usable_nodes;
471
472		/*
473		 * As the map is walked, we track how much memory is usable
474		 * by the kernel using kernelcore_remaining. When it is
475		 * 0, the rest of the node is usable by ZONE_MOVABLE
476		 */
477		kernelcore_remaining = kernelcore_node;
478
479		/* Go through each range of PFNs within this node */
480		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
481			unsigned long size_pages;
482
483			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
484			if (start_pfn >= end_pfn)
485				continue;
486
487			/* Account for what is only usable for kernelcore */
488			if (start_pfn < usable_startpfn) {
489				unsigned long kernel_pages;
490				kernel_pages = min(end_pfn, usable_startpfn)
491								- start_pfn;
492
493				kernelcore_remaining -= min(kernel_pages,
494							kernelcore_remaining);
495				required_kernelcore -= min(kernel_pages,
496							required_kernelcore);
497
498				/* Continue if range is now fully accounted */
499				if (end_pfn <= usable_startpfn) {
500
501					/*
502					 * Push zone_movable_pfn to the end so
503					 * that if we have to rebalance
504					 * kernelcore across nodes, we will
505					 * not double account here
506					 */
507					zone_movable_pfn[nid] = end_pfn;
508					continue;
509				}
510				start_pfn = usable_startpfn;
511			}
512
513			/*
514			 * The usable PFN range for ZONE_MOVABLE is from
515			 * start_pfn->end_pfn. Calculate size_pages as the
516			 * number of pages used as kernelcore
517			 */
518			size_pages = end_pfn - start_pfn;
519			if (size_pages > kernelcore_remaining)
520				size_pages = kernelcore_remaining;
521			zone_movable_pfn[nid] = start_pfn + size_pages;
522
523			/*
524			 * Some kernelcore has been met, update counts and
525			 * break if the kernelcore for this node has been
526			 * satisfied
527			 */
528			required_kernelcore -= min(required_kernelcore,
529								size_pages);
530			kernelcore_remaining -= size_pages;
531			if (!kernelcore_remaining)
532				break;
533		}
534	}
535
536	/*
537	 * If there is still required_kernelcore, we do another pass with one
538	 * less node in the count. This will push zone_movable_pfn[nid] further
539	 * along on the nodes that still have memory until kernelcore is
540	 * satisfied
541	 */
542	usable_nodes--;
543	if (usable_nodes && required_kernelcore > usable_nodes)
544		goto restart;
545
546out2:
547	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
548	for (nid = 0; nid < MAX_NUMNODES; nid++) {
549		unsigned long start_pfn, end_pfn;
550
551		zone_movable_pfn[nid] =
552			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
553
554		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
555		if (zone_movable_pfn[nid] >= end_pfn)
556			zone_movable_pfn[nid] = 0;
557	}
558
559out:
560	/* restore the node_state */
561	node_states[N_MEMORY] = saved_node_state;
562}
563
564void __meminit __init_single_page(struct page *page, unsigned long pfn,
565				unsigned long zone, int nid)
566{
567	mm_zero_struct_page(page);
568	set_page_links(page, zone, nid, pfn);
569	init_page_count(page);
570	page_mapcount_reset(page);
571	page_cpupid_reset_last(page);
572	page_kasan_tag_reset(page);
573
574	INIT_LIST_HEAD(&page->lru);
575#ifdef WANT_PAGE_VIRTUAL
576	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
577	if (!is_highmem_idx(zone))
578		set_page_address(page, __va(pfn << PAGE_SHIFT));
579#endif
580}
581
582#ifdef CONFIG_NUMA
583/*
584 * During memory init memblocks map pfns to nids. The search is expensive and
585 * this caches recent lookups. The implementation of __early_pfn_to_nid
586 * treats start/end as pfns.
587 */
588struct mminit_pfnnid_cache {
589	unsigned long last_start;
590	unsigned long last_end;
591	int last_nid;
592};
593
594static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
595
596/*
597 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
598 */
599static int __meminit __early_pfn_to_nid(unsigned long pfn,
600					struct mminit_pfnnid_cache *state)
601{
602	unsigned long start_pfn, end_pfn;
603	int nid;
604
605	if (state->last_start <= pfn && pfn < state->last_end)
606		return state->last_nid;
607
608	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
609	if (nid != NUMA_NO_NODE) {
610		state->last_start = start_pfn;
611		state->last_end = end_pfn;
612		state->last_nid = nid;
613	}
614
615	return nid;
616}
617
618int __meminit early_pfn_to_nid(unsigned long pfn)
619{
620	static DEFINE_SPINLOCK(early_pfn_lock);
621	int nid;
622
623	spin_lock(&early_pfn_lock);
624	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
625	if (nid < 0)
626		nid = first_online_node;
627	spin_unlock(&early_pfn_lock);
628
629	return nid;
630}
631
632int hashdist = HASHDIST_DEFAULT;
633
634static int __init set_hashdist(char *str)
635{
636	if (!str)
637		return 0;
638	hashdist = simple_strtoul(str, &str, 0);
639	return 1;
640}
641__setup("hashdist=", set_hashdist);
642
643static inline void fixup_hashdist(void)
644{
645	if (num_node_state(N_MEMORY) == 1)
646		hashdist = 0;
647}
648#else
649static inline void fixup_hashdist(void) {}
650#endif /* CONFIG_NUMA */
651
652#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
653static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
654{
655	pgdat->first_deferred_pfn = ULONG_MAX;
656}
657
658/* Returns true if the struct page for the pfn is initialised */
659static inline bool __meminit early_page_initialised(unsigned long pfn, int nid)
660{
661	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
662		return false;
663
664	return true;
665}
666
667/*
668 * Returns true when the remaining initialisation should be deferred until
669 * later in the boot cycle when it can be parallelised.
670 */
671static bool __meminit
672defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
673{
674	static unsigned long prev_end_pfn, nr_initialised;
675
676	if (early_page_ext_enabled())
677		return false;
678	/*
679	 * prev_end_pfn static that contains the end of previous zone
680	 * No need to protect because called very early in boot before smp_init.
681	 */
682	if (prev_end_pfn != end_pfn) {
683		prev_end_pfn = end_pfn;
684		nr_initialised = 0;
685	}
686
687	/* Always populate low zones for address-constrained allocations */
688	if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
689		return false;
690
691	if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
692		return true;
693	/*
694	 * We start only with one section of pages, more pages are added as
695	 * needed until the rest of deferred pages are initialized.
696	 */
697	nr_initialised++;
698	if ((nr_initialised > PAGES_PER_SECTION) &&
699	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
700		NODE_DATA(nid)->first_deferred_pfn = pfn;
701		return true;
702	}
703	return false;
704}
705
706static void __meminit init_reserved_page(unsigned long pfn, int nid)
707{
708	pg_data_t *pgdat;
709	int zid;
710
711	if (early_page_initialised(pfn, nid))
712		return;
713
714	pgdat = NODE_DATA(nid);
715
716	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
717		struct zone *zone = &pgdat->node_zones[zid];
718
719		if (zone_spans_pfn(zone, pfn))
720			break;
721	}
722	__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
723}
724#else
725static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
726
727static inline bool early_page_initialised(unsigned long pfn, int nid)
728{
729	return true;
730}
731
732static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
733{
734	return false;
735}
736
737static inline void init_reserved_page(unsigned long pfn, int nid)
738{
739}
740#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
741
742/*
743 * Initialised pages do not have PageReserved set. This function is
744 * called for each range allocated by the bootmem allocator and
745 * marks the pages PageReserved. The remaining valid pages are later
746 * sent to the buddy page allocator.
747 */
748void __meminit reserve_bootmem_region(phys_addr_t start,
749				      phys_addr_t end, int nid)
750{
751	unsigned long start_pfn = PFN_DOWN(start);
752	unsigned long end_pfn = PFN_UP(end);
753
754	for (; start_pfn < end_pfn; start_pfn++) {
755		if (pfn_valid(start_pfn)) {
756			struct page *page = pfn_to_page(start_pfn);
757
758			init_reserved_page(start_pfn, nid);
759
760			/* Avoid false-positive PageTail() */
761			INIT_LIST_HEAD(&page->lru);
762
763			/*
764			 * no need for atomic set_bit because the struct
765			 * page is not visible yet so nobody should
766			 * access it yet.
767			 */
768			__SetPageReserved(page);
769		}
770	}
771}
772
773/* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
774static bool __meminit
775overlap_memmap_init(unsigned long zone, unsigned long *pfn)
776{
777	static struct memblock_region *r;
778
779	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
780		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
781			for_each_mem_region(r) {
782				if (*pfn < memblock_region_memory_end_pfn(r))
783					break;
784			}
785		}
786		if (*pfn >= memblock_region_memory_base_pfn(r) &&
787		    memblock_is_mirror(r)) {
788			*pfn = memblock_region_memory_end_pfn(r);
789			return true;
790		}
791	}
792	return false;
793}
794
795/*
796 * Only struct pages that correspond to ranges defined by memblock.memory
797 * are zeroed and initialized by going through __init_single_page() during
798 * memmap_init_zone_range().
799 *
800 * But, there could be struct pages that correspond to holes in
801 * memblock.memory. This can happen because of the following reasons:
802 * - physical memory bank size is not necessarily the exact multiple of the
803 *   arbitrary section size
804 * - early reserved memory may not be listed in memblock.memory
805 * - non-memory regions covered by the contigious flatmem mapping
806 * - memory layouts defined with memmap= kernel parameter may not align
807 *   nicely with memmap sections
808 *
809 * Explicitly initialize those struct pages so that:
810 * - PG_Reserved is set
811 * - zone and node links point to zone and node that span the page if the
812 *   hole is in the middle of a zone
813 * - zone and node links point to adjacent zone/node if the hole falls on
814 *   the zone boundary; the pages in such holes will be prepended to the
815 *   zone/node above the hole except for the trailing pages in the last
816 *   section that will be appended to the zone/node below.
817 */
818static void __init init_unavailable_range(unsigned long spfn,
819					  unsigned long epfn,
820					  int zone, int node)
821{
822	unsigned long pfn;
823	u64 pgcnt = 0;
824
825	for (pfn = spfn; pfn < epfn; pfn++) {
826		if (!pfn_valid(pageblock_start_pfn(pfn))) {
827			pfn = pageblock_end_pfn(pfn) - 1;
828			continue;
829		}
830		__init_single_page(pfn_to_page(pfn), pfn, zone, node);
831		__SetPageReserved(pfn_to_page(pfn));
832		pgcnt++;
833	}
834
835	if (pgcnt)
836		pr_info("On node %d, zone %s: %lld pages in unavailable ranges\n",
837			node, zone_names[zone], pgcnt);
838}
839
840/*
841 * Initially all pages are reserved - free ones are freed
842 * up by memblock_free_all() once the early boot process is
843 * done. Non-atomic initialization, single-pass.
844 *
845 * All aligned pageblocks are initialized to the specified migratetype
846 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
847 * zone stats (e.g., nr_isolate_pageblock) are touched.
848 */
849void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
850		unsigned long start_pfn, unsigned long zone_end_pfn,
851		enum meminit_context context,
852		struct vmem_altmap *altmap, int migratetype)
853{
854	unsigned long pfn, end_pfn = start_pfn + size;
855	struct page *page;
856
857	if (highest_memmap_pfn < end_pfn - 1)
858		highest_memmap_pfn = end_pfn - 1;
859
860#ifdef CONFIG_ZONE_DEVICE
861	/*
862	 * Honor reservation requested by the driver for this ZONE_DEVICE
863	 * memory. We limit the total number of pages to initialize to just
864	 * those that might contain the memory mapping. We will defer the
865	 * ZONE_DEVICE page initialization until after we have released
866	 * the hotplug lock.
867	 */
868	if (zone == ZONE_DEVICE) {
869		if (!altmap)
870			return;
871
872		if (start_pfn == altmap->base_pfn)
873			start_pfn += altmap->reserve;
874		end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
875	}
876#endif
877
878	for (pfn = start_pfn; pfn < end_pfn; ) {
879		/*
880		 * There can be holes in boot-time mem_map[]s handed to this
881		 * function.  They do not exist on hotplugged memory.
882		 */
883		if (context == MEMINIT_EARLY) {
884			if (overlap_memmap_init(zone, &pfn))
885				continue;
886			if (defer_init(nid, pfn, zone_end_pfn)) {
887				deferred_struct_pages = true;
888				break;
889			}
890		}
891
892		page = pfn_to_page(pfn);
893		__init_single_page(page, pfn, zone, nid);
894		if (context == MEMINIT_HOTPLUG)
895			__SetPageReserved(page);
896
897		/*
898		 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
899		 * such that unmovable allocations won't be scattered all
900		 * over the place during system boot.
901		 */
902		if (pageblock_aligned(pfn)) {
903			set_pageblock_migratetype(page, migratetype);
904			cond_resched();
905		}
906		pfn++;
907	}
908}
909
910static void __init memmap_init_zone_range(struct zone *zone,
911					  unsigned long start_pfn,
912					  unsigned long end_pfn,
913					  unsigned long *hole_pfn)
914{
915	unsigned long zone_start_pfn = zone->zone_start_pfn;
916	unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
917	int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
918
919	start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
920	end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
921
922	if (start_pfn >= end_pfn)
923		return;
924
925	memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
926			  zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
927
928	if (*hole_pfn < start_pfn)
929		init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
930
931	*hole_pfn = end_pfn;
932}
933
934static void __init memmap_init(void)
935{
936	unsigned long start_pfn, end_pfn;
937	unsigned long hole_pfn = 0;
938	int i, j, zone_id = 0, nid;
939
940	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
941		struct pglist_data *node = NODE_DATA(nid);
942
943		for (j = 0; j < MAX_NR_ZONES; j++) {
944			struct zone *zone = node->node_zones + j;
945
946			if (!populated_zone(zone))
947				continue;
948
949			memmap_init_zone_range(zone, start_pfn, end_pfn,
950					       &hole_pfn);
951			zone_id = j;
952		}
953	}
954
955#ifdef CONFIG_SPARSEMEM
956	/*
957	 * Initialize the memory map for hole in the range [memory_end,
958	 * section_end].
959	 * Append the pages in this hole to the highest zone in the last
960	 * node.
961	 * The call to init_unavailable_range() is outside the ifdef to
962	 * silence the compiler warining about zone_id set but not used;
963	 * for FLATMEM it is a nop anyway
964	 */
965	end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
966	if (hole_pfn < end_pfn)
967#endif
968		init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
969}
970
971#ifdef CONFIG_ZONE_DEVICE
972static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
973					  unsigned long zone_idx, int nid,
974					  struct dev_pagemap *pgmap)
975{
976
977	__init_single_page(page, pfn, zone_idx, nid);
978
979	/*
980	 * Mark page reserved as it will need to wait for onlining
981	 * phase for it to be fully associated with a zone.
982	 *
983	 * We can use the non-atomic __set_bit operation for setting
984	 * the flag as we are still initializing the pages.
985	 */
986	__SetPageReserved(page);
987
988	/*
989	 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
990	 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
991	 * ever freed or placed on a driver-private list.
992	 */
993	page->pgmap = pgmap;
994	page->zone_device_data = NULL;
995
996	/*
997	 * Mark the block movable so that blocks are reserved for
998	 * movable at startup. This will force kernel allocations
999	 * to reserve their blocks rather than leaking throughout
1000	 * the address space during boot when many long-lived
1001	 * kernel allocations are made.
1002	 *
1003	 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
1004	 * because this is done early in section_activate()
1005	 */
1006	if (pageblock_aligned(pfn)) {
1007		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1008		cond_resched();
1009	}
1010
1011	/*
1012	 * ZONE_DEVICE pages are released directly to the driver page allocator
1013	 * which will set the page count to 1 when allocating the page.
1014	 */
1015	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
1016	    pgmap->type == MEMORY_DEVICE_COHERENT)
1017		set_page_count(page, 0);
1018}
1019
1020/*
1021 * With compound page geometry and when struct pages are stored in ram most
1022 * tail pages are reused. Consequently, the amount of unique struct pages to
1023 * initialize is a lot smaller that the total amount of struct pages being
1024 * mapped. This is a paired / mild layering violation with explicit knowledge
1025 * of how the sparse_vmemmap internals handle compound pages in the lack
1026 * of an altmap. See vmemmap_populate_compound_pages().
1027 */
1028static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
1029					      struct dev_pagemap *pgmap)
1030{
1031	if (!vmemmap_can_optimize(altmap, pgmap))
1032		return pgmap_vmemmap_nr(pgmap);
1033
1034	return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page));
1035}
1036
1037static void __ref memmap_init_compound(struct page *head,
1038				       unsigned long head_pfn,
1039				       unsigned long zone_idx, int nid,
1040				       struct dev_pagemap *pgmap,
1041				       unsigned long nr_pages)
1042{
1043	unsigned long pfn, end_pfn = head_pfn + nr_pages;
1044	unsigned int order = pgmap->vmemmap_shift;
1045
1046	__SetPageHead(head);
1047	for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) {
1048		struct page *page = pfn_to_page(pfn);
1049
1050		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1051		prep_compound_tail(head, pfn - head_pfn);
1052		set_page_count(page, 0);
1053
1054		/*
1055		 * The first tail page stores important compound page info.
1056		 * Call prep_compound_head() after the first tail page has
1057		 * been initialized, to not have the data overwritten.
1058		 */
1059		if (pfn == head_pfn + 1)
1060			prep_compound_head(head, order);
1061	}
1062}
1063
1064void __ref memmap_init_zone_device(struct zone *zone,
1065				   unsigned long start_pfn,
1066				   unsigned long nr_pages,
1067				   struct dev_pagemap *pgmap)
1068{
1069	unsigned long pfn, end_pfn = start_pfn + nr_pages;
1070	struct pglist_data *pgdat = zone->zone_pgdat;
1071	struct vmem_altmap *altmap = pgmap_altmap(pgmap);
1072	unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap);
1073	unsigned long zone_idx = zone_idx(zone);
1074	unsigned long start = jiffies;
1075	int nid = pgdat->node_id;
1076
1077	if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE))
1078		return;
1079
1080	/*
1081	 * The call to memmap_init should have already taken care
1082	 * of the pages reserved for the memmap, so we can just jump to
1083	 * the end of that region and start processing the device pages.
1084	 */
1085	if (altmap) {
1086		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
1087		nr_pages = end_pfn - start_pfn;
1088	}
1089
1090	for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) {
1091		struct page *page = pfn_to_page(pfn);
1092
1093		__init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1094
1095		if (pfns_per_compound == 1)
1096			continue;
1097
1098		memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
1099				     compound_nr_pages(altmap, pgmap));
1100	}
1101
1102	pr_debug("%s initialised %lu pages in %ums\n", __func__,
1103		nr_pages, jiffies_to_msecs(jiffies - start));
1104}
1105#endif
1106
1107/*
1108 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
1109 * because it is sized independent of architecture. Unlike the other zones,
1110 * the starting point for ZONE_MOVABLE is not fixed. It may be different
1111 * in each node depending on the size of each node and how evenly kernelcore
1112 * is distributed. This helper function adjusts the zone ranges
1113 * provided by the architecture for a given node by using the end of the
1114 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
1115 * zones within a node are in order of monotonic increases memory addresses
1116 */
1117static void __init adjust_zone_range_for_zone_movable(int nid,
1118					unsigned long zone_type,
1119					unsigned long node_end_pfn,
1120					unsigned long *zone_start_pfn,
1121					unsigned long *zone_end_pfn)
1122{
1123	/* Only adjust if ZONE_MOVABLE is on this node */
1124	if (zone_movable_pfn[nid]) {
1125		/* Size ZONE_MOVABLE */
1126		if (zone_type == ZONE_MOVABLE) {
1127			*zone_start_pfn = zone_movable_pfn[nid];
1128			*zone_end_pfn = min(node_end_pfn,
1129				arch_zone_highest_possible_pfn[movable_zone]);
1130
1131		/* Adjust for ZONE_MOVABLE starting within this range */
1132		} else if (!mirrored_kernelcore &&
1133			*zone_start_pfn < zone_movable_pfn[nid] &&
1134			*zone_end_pfn > zone_movable_pfn[nid]) {
1135			*zone_end_pfn = zone_movable_pfn[nid];
1136
1137		/* Check if this whole range is within ZONE_MOVABLE */
1138		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
1139			*zone_start_pfn = *zone_end_pfn;
1140	}
1141}
1142
1143/*
1144 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
1145 * then all holes in the requested range will be accounted for.
1146 */
1147unsigned long __init __absent_pages_in_range(int nid,
1148				unsigned long range_start_pfn,
1149				unsigned long range_end_pfn)
1150{
1151	unsigned long nr_absent = range_end_pfn - range_start_pfn;
1152	unsigned long start_pfn, end_pfn;
1153	int i;
1154
1155	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
1156		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
1157		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
1158		nr_absent -= end_pfn - start_pfn;
1159	}
1160	return nr_absent;
1161}
1162
1163/**
1164 * absent_pages_in_range - Return number of page frames in holes within a range
1165 * @start_pfn: The start PFN to start searching for holes
1166 * @end_pfn: The end PFN to stop searching for holes
1167 *
1168 * Return: the number of pages frames in memory holes within a range.
1169 */
1170unsigned long __init absent_pages_in_range(unsigned long start_pfn,
1171							unsigned long end_pfn)
1172{
1173	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
1174}
1175
1176/* Return the number of page frames in holes in a zone on a node */
1177static unsigned long __init zone_absent_pages_in_node(int nid,
1178					unsigned long zone_type,
1179					unsigned long zone_start_pfn,
1180					unsigned long zone_end_pfn)
1181{
1182	unsigned long nr_absent;
1183
1184	/* zone is empty, we don't have any absent pages */
1185	if (zone_start_pfn == zone_end_pfn)
1186		return 0;
1187
1188	nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
1189
1190	/*
1191	 * ZONE_MOVABLE handling.
1192	 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
1193	 * and vice versa.
1194	 */
1195	if (mirrored_kernelcore && zone_movable_pfn[nid]) {
1196		unsigned long start_pfn, end_pfn;
1197		struct memblock_region *r;
1198
1199		for_each_mem_region(r) {
1200			start_pfn = clamp(memblock_region_memory_base_pfn(r),
1201					  zone_start_pfn, zone_end_pfn);
1202			end_pfn = clamp(memblock_region_memory_end_pfn(r),
1203					zone_start_pfn, zone_end_pfn);
1204
1205			if (zone_type == ZONE_MOVABLE &&
1206			    memblock_is_mirror(r))
1207				nr_absent += end_pfn - start_pfn;
1208
1209			if (zone_type == ZONE_NORMAL &&
1210			    !memblock_is_mirror(r))
1211				nr_absent += end_pfn - start_pfn;
1212		}
1213	}
1214
1215	return nr_absent;
1216}
1217
1218/*
1219 * Return the number of pages a zone spans in a node, including holes
1220 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
1221 */
1222static unsigned long __init zone_spanned_pages_in_node(int nid,
1223					unsigned long zone_type,
1224					unsigned long node_start_pfn,
1225					unsigned long node_end_pfn,
1226					unsigned long *zone_start_pfn,
1227					unsigned long *zone_end_pfn)
1228{
1229	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
1230	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
1231
1232	/* Get the start and end of the zone */
1233	*zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
1234	*zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
1235	adjust_zone_range_for_zone_movable(nid, zone_type, node_end_pfn,
1236					   zone_start_pfn, zone_end_pfn);
1237
1238	/* Check that this node has pages within the zone's required range */
1239	if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
1240		return 0;
1241
1242	/* Move the zone boundaries inside the node if necessary */
1243	*zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
1244	*zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
1245
1246	/* Return the spanned pages */
1247	return *zone_end_pfn - *zone_start_pfn;
1248}
1249
1250static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat)
1251{
1252	struct zone *z;
1253
1254	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) {
1255		z->zone_start_pfn = 0;
1256		z->spanned_pages = 0;
1257		z->present_pages = 0;
1258#if defined(CONFIG_MEMORY_HOTPLUG)
1259		z->present_early_pages = 0;
1260#endif
1261	}
1262
1263	pgdat->node_spanned_pages = 0;
1264	pgdat->node_present_pages = 0;
1265	pr_debug("On node %d totalpages: 0\n", pgdat->node_id);
1266}
1267
1268static void __init calculate_node_totalpages(struct pglist_data *pgdat,
1269						unsigned long node_start_pfn,
1270						unsigned long node_end_pfn)
1271{
1272	unsigned long realtotalpages = 0, totalpages = 0;
1273	enum zone_type i;
1274
1275	for (i = 0; i < MAX_NR_ZONES; i++) {
1276		struct zone *zone = pgdat->node_zones + i;
1277		unsigned long zone_start_pfn, zone_end_pfn;
1278		unsigned long spanned, absent;
1279		unsigned long real_size;
1280
1281		spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
1282						     node_start_pfn,
1283						     node_end_pfn,
1284						     &zone_start_pfn,
1285						     &zone_end_pfn);
1286		absent = zone_absent_pages_in_node(pgdat->node_id, i,
1287						   zone_start_pfn,
1288						   zone_end_pfn);
1289
1290		real_size = spanned - absent;
1291
1292		if (spanned)
1293			zone->zone_start_pfn = zone_start_pfn;
1294		else
1295			zone->zone_start_pfn = 0;
1296		zone->spanned_pages = spanned;
1297		zone->present_pages = real_size;
1298#if defined(CONFIG_MEMORY_HOTPLUG)
1299		zone->present_early_pages = real_size;
1300#endif
1301
1302		totalpages += spanned;
1303		realtotalpages += real_size;
1304	}
1305
1306	pgdat->node_spanned_pages = totalpages;
1307	pgdat->node_present_pages = realtotalpages;
1308	pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
1309}
1310
1311static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
1312						unsigned long present_pages)
1313{
1314	unsigned long pages = spanned_pages;
1315
1316	/*
1317	 * Provide a more accurate estimation if there are holes within
1318	 * the zone and SPARSEMEM is in use. If there are holes within the
1319	 * zone, each populated memory region may cost us one or two extra
1320	 * memmap pages due to alignment because memmap pages for each
1321	 * populated regions may not be naturally aligned on page boundary.
1322	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
1323	 */
1324	if (spanned_pages > present_pages + (present_pages >> 4) &&
1325	    IS_ENABLED(CONFIG_SPARSEMEM))
1326		pages = present_pages;
1327
1328	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
1329}
1330
1331#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1332static void pgdat_init_split_queue(struct pglist_data *pgdat)
1333{
1334	struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
1335
1336	spin_lock_init(&ds_queue->split_queue_lock);
1337	INIT_LIST_HEAD(&ds_queue->split_queue);
1338	ds_queue->split_queue_len = 0;
1339}
1340#else
1341static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
1342#endif
1343
1344#ifdef CONFIG_COMPACTION
1345static void pgdat_init_kcompactd(struct pglist_data *pgdat)
1346{
1347	init_waitqueue_head(&pgdat->kcompactd_wait);
1348}
1349#else
1350static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
1351#endif
1352
1353static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
1354{
1355	int i;
1356
1357	pgdat_resize_init(pgdat);
1358	pgdat_kswapd_lock_init(pgdat);
1359
1360	pgdat_init_split_queue(pgdat);
1361	pgdat_init_kcompactd(pgdat);
1362
1363	init_waitqueue_head(&pgdat->kswapd_wait);
1364	init_waitqueue_head(&pgdat->pfmemalloc_wait);
1365
1366	for (i = 0; i < NR_VMSCAN_THROTTLE; i++)
1367		init_waitqueue_head(&pgdat->reclaim_wait[i]);
1368
1369	pgdat_page_ext_init(pgdat);
1370	lruvec_init(&pgdat->__lruvec);
1371}
1372
1373static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
1374							unsigned long remaining_pages)
1375{
1376	atomic_long_set(&zone->managed_pages, remaining_pages);
1377	zone_set_nid(zone, nid);
1378	zone->name = zone_names[idx];
1379	zone->zone_pgdat = NODE_DATA(nid);
1380	spin_lock_init(&zone->lock);
1381	zone_seqlock_init(zone);
1382	zone_pcp_init(zone);
1383}
1384
1385static void __meminit zone_init_free_lists(struct zone *zone)
1386{
1387	unsigned int order, t;
1388	for_each_migratetype_order(order, t) {
1389		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
1390		zone->free_area[order].nr_free = 0;
1391	}
1392
1393#ifdef CONFIG_UNACCEPTED_MEMORY
1394	INIT_LIST_HEAD(&zone->unaccepted_pages);
1395#endif
1396}
1397
1398void __meminit init_currently_empty_zone(struct zone *zone,
1399					unsigned long zone_start_pfn,
1400					unsigned long size)
1401{
1402	struct pglist_data *pgdat = zone->zone_pgdat;
1403	int zone_idx = zone_idx(zone) + 1;
1404
1405	if (zone_idx > pgdat->nr_zones)
1406		pgdat->nr_zones = zone_idx;
1407
1408	zone->zone_start_pfn = zone_start_pfn;
1409
1410	mminit_dprintk(MMINIT_TRACE, "memmap_init",
1411			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
1412			pgdat->node_id,
1413			(unsigned long)zone_idx(zone),
1414			zone_start_pfn, (zone_start_pfn + size));
1415
1416	zone_init_free_lists(zone);
1417	zone->initialized = 1;
1418}
1419
1420#ifndef CONFIG_SPARSEMEM
1421/*
1422 * Calculate the size of the zone->blockflags rounded to an unsigned long
1423 * Start by making sure zonesize is a multiple of pageblock_order by rounding
1424 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
1425 * round what is now in bits to nearest long in bits, then return it in
1426 * bytes.
1427 */
1428static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
1429{
1430	unsigned long usemapsize;
1431
1432	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
1433	usemapsize = roundup(zonesize, pageblock_nr_pages);
1434	usemapsize = usemapsize >> pageblock_order;
1435	usemapsize *= NR_PAGEBLOCK_BITS;
1436	usemapsize = roundup(usemapsize, BITS_PER_LONG);
1437
1438	return usemapsize / BITS_PER_BYTE;
1439}
1440
1441static void __ref setup_usemap(struct zone *zone)
1442{
1443	unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
1444					       zone->spanned_pages);
1445	zone->pageblock_flags = NULL;
1446	if (usemapsize) {
1447		zone->pageblock_flags =
1448			memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
1449					    zone_to_nid(zone));
1450		if (!zone->pageblock_flags)
1451			panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
1452			      usemapsize, zone->name, zone_to_nid(zone));
1453	}
1454}
1455#else
1456static inline void setup_usemap(struct zone *zone) {}
1457#endif /* CONFIG_SPARSEMEM */
1458
1459#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
1460
1461/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
1462void __init set_pageblock_order(void)
1463{
1464	unsigned int order = MAX_PAGE_ORDER;
1465
1466	/* Check that pageblock_nr_pages has not already been setup */
1467	if (pageblock_order)
1468		return;
1469
1470	/* Don't let pageblocks exceed the maximum allocation granularity. */
1471	if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order)
1472		order = HUGETLB_PAGE_ORDER;
1473
1474	/*
1475	 * Assume the largest contiguous order of interest is a huge page.
1476	 * This value may be variable depending on boot parameters on powerpc.
1477	 */
1478	pageblock_order = order;
1479}
1480#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1481
1482/*
1483 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
1484 * is unused as pageblock_order is set at compile-time. See
1485 * include/linux/pageblock-flags.h for the values of pageblock_order based on
1486 * the kernel config
1487 */
1488void __init set_pageblock_order(void)
1489{
1490}
1491
1492#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
1493
1494/*
1495 * Set up the zone data structures
1496 * - init pgdat internals
1497 * - init all zones belonging to this node
1498 *
1499 * NOTE: this function is only called during memory hotplug
1500 */
1501#ifdef CONFIG_MEMORY_HOTPLUG
1502void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
1503{
1504	int nid = pgdat->node_id;
1505	enum zone_type z;
1506	int cpu;
1507
1508	pgdat_init_internals(pgdat);
1509
1510	if (pgdat->per_cpu_nodestats == &boot_nodestats)
1511		pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
1512
1513	/*
1514	 * Reset the nr_zones, order and highest_zoneidx before reuse.
1515	 * Note that kswapd will init kswapd_highest_zoneidx properly
1516	 * when it starts in the near future.
1517	 */
1518	pgdat->nr_zones = 0;
1519	pgdat->kswapd_order = 0;
1520	pgdat->kswapd_highest_zoneidx = 0;
1521	pgdat->node_start_pfn = 0;
1522	pgdat->node_present_pages = 0;
1523
1524	for_each_online_cpu(cpu) {
1525		struct per_cpu_nodestat *p;
1526
1527		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
1528		memset(p, 0, sizeof(*p));
1529	}
1530
1531	/*
1532	 * When memory is hot-added, all the memory is in offline state. So
1533	 * clear all zones' present_pages and managed_pages because they will
1534	 * be updated in online_pages() and offline_pages().
1535	 */
1536	for (z = 0; z < MAX_NR_ZONES; z++) {
1537		struct zone *zone = pgdat->node_zones + z;
1538
1539		zone->present_pages = 0;
1540		zone_init_internals(zone, z, nid, 0);
1541	}
1542}
1543#endif
1544
1545/*
1546 * Set up the zone data structures:
1547 *   - mark all pages reserved
1548 *   - mark all memory queues empty
1549 *   - clear the memory bitmaps
1550 *
1551 * NOTE: pgdat should get zeroed by caller.
1552 * NOTE: this function is only called during early init.
1553 */
1554static void __init free_area_init_core(struct pglist_data *pgdat)
1555{
1556	enum zone_type j;
1557	int nid = pgdat->node_id;
1558
1559	pgdat_init_internals(pgdat);
1560	pgdat->per_cpu_nodestats = &boot_nodestats;
1561
1562	for (j = 0; j < MAX_NR_ZONES; j++) {
1563		struct zone *zone = pgdat->node_zones + j;
1564		unsigned long size, freesize, memmap_pages;
1565
1566		size = zone->spanned_pages;
1567		freesize = zone->present_pages;
1568
1569		/*
1570		 * Adjust freesize so that it accounts for how much memory
1571		 * is used by this zone for memmap. This affects the watermark
1572		 * and per-cpu initialisations
1573		 */
1574		memmap_pages = calc_memmap_size(size, freesize);
1575		if (!is_highmem_idx(j)) {
1576			if (freesize >= memmap_pages) {
1577				freesize -= memmap_pages;
1578				if (memmap_pages)
1579					pr_debug("  %s zone: %lu pages used for memmap\n",
1580						 zone_names[j], memmap_pages);
1581			} else
1582				pr_warn("  %s zone: %lu memmap pages exceeds freesize %lu\n",
1583					zone_names[j], memmap_pages, freesize);
1584		}
1585
1586		/* Account for reserved pages */
1587		if (j == 0 && freesize > dma_reserve) {
1588			freesize -= dma_reserve;
1589			pr_debug("  %s zone: %lu pages reserved\n", zone_names[0], dma_reserve);
1590		}
1591
1592		if (!is_highmem_idx(j))
1593			nr_kernel_pages += freesize;
1594		/* Charge for highmem memmap if there are enough kernel pages */
1595		else if (nr_kernel_pages > memmap_pages * 2)
1596			nr_kernel_pages -= memmap_pages;
1597		nr_all_pages += freesize;
1598
1599		/*
1600		 * Set an approximate value for lowmem here, it will be adjusted
1601		 * when the bootmem allocator frees pages into the buddy system.
1602		 * And all highmem pages will be managed by the buddy system.
1603		 */
1604		zone_init_internals(zone, j, nid, freesize);
1605
1606		if (!size)
1607			continue;
1608
1609		setup_usemap(zone);
1610		init_currently_empty_zone(zone, zone->zone_start_pfn, size);
1611	}
1612}
1613
1614void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
1615			  phys_addr_t min_addr, int nid, bool exact_nid)
1616{
1617	void *ptr;
1618
1619	if (exact_nid)
1620		ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
1621						   MEMBLOCK_ALLOC_ACCESSIBLE,
1622						   nid);
1623	else
1624		ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
1625						 MEMBLOCK_ALLOC_ACCESSIBLE,
1626						 nid);
1627
1628	if (ptr && size > 0)
1629		page_init_poison(ptr, size);
1630
1631	return ptr;
1632}
1633
1634#ifdef CONFIG_FLATMEM
1635static void __init alloc_node_mem_map(struct pglist_data *pgdat)
1636{
1637	unsigned long start, offset, size, end;
1638	struct page *map;
1639
1640	/* Skip empty nodes */
1641	if (!pgdat->node_spanned_pages)
1642		return;
1643
1644	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
1645	offset = pgdat->node_start_pfn - start;
1646	/*
1647		 * The zone's endpoints aren't required to be MAX_PAGE_ORDER
1648	 * aligned but the node_mem_map endpoints must be in order
1649	 * for the buddy allocator to function correctly.
1650	 */
1651	end = ALIGN(pgdat_end_pfn(pgdat), MAX_ORDER_NR_PAGES);
1652	size =  (end - start) * sizeof(struct page);
1653	map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
1654			   pgdat->node_id, false);
1655	if (!map)
1656		panic("Failed to allocate %ld bytes for node %d memory map\n",
1657		      size, pgdat->node_id);
1658	pgdat->node_mem_map = map + offset;
1659	pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
1660		 __func__, pgdat->node_id, (unsigned long)pgdat,
1661		 (unsigned long)pgdat->node_mem_map);
1662#ifndef CONFIG_NUMA
1663	/* the global mem_map is just set as node 0's */
1664	if (pgdat == NODE_DATA(0)) {
1665		mem_map = NODE_DATA(0)->node_mem_map;
1666		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
1667			mem_map -= offset;
1668	}
1669#endif
1670}
1671#else
1672static inline void alloc_node_mem_map(struct pglist_data *pgdat) { }
1673#endif /* CONFIG_FLATMEM */
1674
1675/**
1676 * get_pfn_range_for_nid - Return the start and end page frames for a node
1677 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
1678 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
1679 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
1680 *
1681 * It returns the start and end page frame of a node based on information
1682 * provided by memblock_set_node(). If called for a node
1683 * with no available memory, the start and end PFNs will be 0.
1684 */
1685void __init get_pfn_range_for_nid(unsigned int nid,
1686			unsigned long *start_pfn, unsigned long *end_pfn)
1687{
1688	unsigned long this_start_pfn, this_end_pfn;
1689	int i;
1690
1691	*start_pfn = -1UL;
1692	*end_pfn = 0;
1693
1694	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
1695		*start_pfn = min(*start_pfn, this_start_pfn);
1696		*end_pfn = max(*end_pfn, this_end_pfn);
1697	}
1698
1699	if (*start_pfn == -1UL)
1700		*start_pfn = 0;
1701}
1702
1703static void __init free_area_init_node(int nid)
1704{
1705	pg_data_t *pgdat = NODE_DATA(nid);
1706	unsigned long start_pfn = 0;
1707	unsigned long end_pfn = 0;
1708
1709	/* pg_data_t should be reset to zero when it's allocated */
1710	WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
1711
1712	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1713
1714	pgdat->node_id = nid;
1715	pgdat->node_start_pfn = start_pfn;
1716	pgdat->per_cpu_nodestats = NULL;
1717
1718	if (start_pfn != end_pfn) {
1719		pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
1720			(u64)start_pfn << PAGE_SHIFT,
1721			end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
1722
1723		calculate_node_totalpages(pgdat, start_pfn, end_pfn);
1724	} else {
1725		pr_info("Initmem setup node %d as memoryless\n", nid);
1726
1727		reset_memoryless_node_totalpages(pgdat);
1728	}
1729
1730	alloc_node_mem_map(pgdat);
1731	pgdat_set_deferred_range(pgdat);
1732
1733	free_area_init_core(pgdat);
1734	lru_gen_init_pgdat(pgdat);
1735}
1736
1737/* Any regular or high memory on that node ? */
1738static void __init check_for_memory(pg_data_t *pgdat)
1739{
1740	enum zone_type zone_type;
1741
1742	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
1743		struct zone *zone = &pgdat->node_zones[zone_type];
1744		if (populated_zone(zone)) {
1745			if (IS_ENABLED(CONFIG_HIGHMEM))
1746				node_set_state(pgdat->node_id, N_HIGH_MEMORY);
1747			if (zone_type <= ZONE_NORMAL)
1748				node_set_state(pgdat->node_id, N_NORMAL_MEMORY);
1749			break;
1750		}
1751	}
1752}
1753
1754#if MAX_NUMNODES > 1
1755/*
1756 * Figure out the number of possible node ids.
1757 */
1758void __init setup_nr_node_ids(void)
1759{
1760	unsigned int highest;
1761
1762	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
1763	nr_node_ids = highest + 1;
1764}
1765#endif
1766
1767/*
1768 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
1769 * such cases we allow max_zone_pfn sorted in the descending order
1770 */
1771static bool arch_has_descending_max_zone_pfns(void)
1772{
1773	return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40);
1774}
1775
1776/**
1777 * free_area_init - Initialise all pg_data_t and zone data
1778 * @max_zone_pfn: an array of max PFNs for each zone
1779 *
1780 * This will call free_area_init_node() for each active node in the system.
1781 * Using the page ranges provided by memblock_set_node(), the size of each
1782 * zone in each node and their holes is calculated. If the maximum PFN
1783 * between two adjacent zones match, it is assumed that the zone is empty.
1784 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
1785 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
1786 * starts where the previous one ended. For example, ZONE_DMA32 starts
1787 * at arch_max_dma_pfn.
1788 */
1789void __init free_area_init(unsigned long *max_zone_pfn)
1790{
1791	unsigned long start_pfn, end_pfn;
1792	int i, nid, zone;
1793	bool descending;
1794
1795	/* Record where the zone boundaries are */
1796	memset(arch_zone_lowest_possible_pfn, 0,
1797				sizeof(arch_zone_lowest_possible_pfn));
1798	memset(arch_zone_highest_possible_pfn, 0,
1799				sizeof(arch_zone_highest_possible_pfn));
1800
1801	start_pfn = PHYS_PFN(memblock_start_of_DRAM());
1802	descending = arch_has_descending_max_zone_pfns();
1803
1804	for (i = 0; i < MAX_NR_ZONES; i++) {
1805		if (descending)
1806			zone = MAX_NR_ZONES - i - 1;
1807		else
1808			zone = i;
1809
1810		if (zone == ZONE_MOVABLE)
1811			continue;
1812
1813		end_pfn = max(max_zone_pfn[zone], start_pfn);
1814		arch_zone_lowest_possible_pfn[zone] = start_pfn;
1815		arch_zone_highest_possible_pfn[zone] = end_pfn;
1816
1817		start_pfn = end_pfn;
1818	}
1819
1820	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
1821	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
1822	find_zone_movable_pfns_for_nodes();
1823
1824	/* Print out the zone ranges */
1825	pr_info("Zone ranges:\n");
1826	for (i = 0; i < MAX_NR_ZONES; i++) {
1827		if (i == ZONE_MOVABLE)
1828			continue;
1829		pr_info("  %-8s ", zone_names[i]);
1830		if (arch_zone_lowest_possible_pfn[i] ==
1831				arch_zone_highest_possible_pfn[i])
1832			pr_cont("empty\n");
1833		else
1834			pr_cont("[mem %#018Lx-%#018Lx]\n",
1835				(u64)arch_zone_lowest_possible_pfn[i]
1836					<< PAGE_SHIFT,
1837				((u64)arch_zone_highest_possible_pfn[i]
1838					<< PAGE_SHIFT) - 1);
1839	}
1840
1841	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
1842	pr_info("Movable zone start for each node\n");
1843	for (i = 0; i < MAX_NUMNODES; i++) {
1844		if (zone_movable_pfn[i])
1845			pr_info("  Node %d: %#018Lx\n", i,
1846			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
1847	}
1848
1849	/*
1850	 * Print out the early node map, and initialize the
1851	 * subsection-map relative to active online memory ranges to
1852	 * enable future "sub-section" extensions of the memory map.
1853	 */
1854	pr_info("Early memory node ranges\n");
1855	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
1856		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
1857			(u64)start_pfn << PAGE_SHIFT,
1858			((u64)end_pfn << PAGE_SHIFT) - 1);
1859		subsection_map_init(start_pfn, end_pfn - start_pfn);
1860	}
1861
1862	/* Initialise every node */
1863	mminit_verify_pageflags_layout();
1864	setup_nr_node_ids();
1865	set_pageblock_order();
1866
1867	for_each_node(nid) {
1868		pg_data_t *pgdat;
1869
1870		if (!node_online(nid)) {
1871			/* Allocator not initialized yet */
1872			pgdat = arch_alloc_nodedata(nid);
1873			if (!pgdat)
1874				panic("Cannot allocate %zuB for node %d.\n",
1875				       sizeof(*pgdat), nid);
1876			arch_refresh_nodedata(nid, pgdat);
1877			free_area_init_node(nid);
1878
1879			/*
1880			 * We do not want to confuse userspace by sysfs
1881			 * files/directories for node without any memory
1882			 * attached to it, so this node is not marked as
1883			 * N_MEMORY and not marked online so that no sysfs
1884			 * hierarchy will be created via register_one_node for
1885			 * it. The pgdat will get fully initialized by
1886			 * hotadd_init_pgdat() when memory is hotplugged into
1887			 * this node.
1888			 */
1889			continue;
1890		}
1891
1892		pgdat = NODE_DATA(nid);
1893		free_area_init_node(nid);
1894
1895		/* Any memory on that node */
1896		if (pgdat->node_present_pages)
1897			node_set_state(nid, N_MEMORY);
1898		check_for_memory(pgdat);
1899	}
1900
1901	memmap_init();
1902
1903	/* disable hash distribution for systems with a single node */
1904	fixup_hashdist();
1905}
1906
1907/**
1908 * node_map_pfn_alignment - determine the maximum internode alignment
1909 *
1910 * This function should be called after node map is populated and sorted.
1911 * It calculates the maximum power of two alignment which can distinguish
1912 * all the nodes.
1913 *
1914 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
1915 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
1916 * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
1917 * shifted, 1GiB is enough and this function will indicate so.
1918 *
1919 * This is used to test whether pfn -> nid mapping of the chosen memory
1920 * model has fine enough granularity to avoid incorrect mapping for the
1921 * populated node map.
1922 *
1923 * Return: the determined alignment in pfn's.  0 if there is no alignment
1924 * requirement (single node).
1925 */
1926unsigned long __init node_map_pfn_alignment(void)
1927{
1928	unsigned long accl_mask = 0, last_end = 0;
1929	unsigned long start, end, mask;
1930	int last_nid = NUMA_NO_NODE;
1931	int i, nid;
1932
1933	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
1934		if (!start || last_nid < 0 || last_nid == nid) {
1935			last_nid = nid;
1936			last_end = end;
1937			continue;
1938		}
1939
1940		/*
1941		 * Start with a mask granular enough to pin-point to the
1942		 * start pfn and tick off bits one-by-one until it becomes
1943		 * too coarse to separate the current node from the last.
1944		 */
1945		mask = ~((1 << __ffs(start)) - 1);
1946		while (mask && last_end <= (start & (mask << 1)))
1947			mask <<= 1;
1948
1949		/* accumulate all internode masks */
1950		accl_mask |= mask;
1951	}
1952
1953	/* convert mask to number of pages */
1954	return ~accl_mask + 1;
1955}
1956
1957#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1958static void __init deferred_free_range(unsigned long pfn,
1959				       unsigned long nr_pages)
1960{
1961	struct page *page;
1962	unsigned long i;
1963
1964	if (!nr_pages)
1965		return;
1966
1967	page = pfn_to_page(pfn);
1968
1969	/* Free a large naturally-aligned chunk if possible */
1970	if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
1971		for (i = 0; i < nr_pages; i += pageblock_nr_pages)
1972			set_pageblock_migratetype(page + i, MIGRATE_MOVABLE);
1973		__free_pages_core(page, MAX_PAGE_ORDER);
1974		return;
1975	}
1976
1977	/* Accept chunks smaller than MAX_PAGE_ORDER upfront */
1978	accept_memory(PFN_PHYS(pfn), PFN_PHYS(pfn + nr_pages));
1979
1980	for (i = 0; i < nr_pages; i++, page++, pfn++) {
1981		if (pageblock_aligned(pfn))
1982			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1983		__free_pages_core(page, 0);
1984	}
1985}
1986
1987/* Completion tracking for deferred_init_memmap() threads */
1988static atomic_t pgdat_init_n_undone __initdata;
1989static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1990
1991static inline void __init pgdat_init_report_one_done(void)
1992{
1993	if (atomic_dec_and_test(&pgdat_init_n_undone))
1994		complete(&pgdat_init_all_done_comp);
1995}
1996
1997/*
1998 * Returns true if page needs to be initialized or freed to buddy allocator.
1999 *
2000 * We check if a current MAX_PAGE_ORDER block is valid by only checking the
2001 * validity of the head pfn.
2002 */
2003static inline bool __init deferred_pfn_valid(unsigned long pfn)
2004{
2005	if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn))
2006		return false;
2007	return true;
2008}
2009
2010/*
2011 * Free pages to buddy allocator. Try to free aligned pages in
2012 * MAX_ORDER_NR_PAGES sizes.
2013 */
2014static void __init deferred_free_pages(unsigned long pfn,
2015				       unsigned long end_pfn)
2016{
2017	unsigned long nr_free = 0;
2018
2019	for (; pfn < end_pfn; pfn++) {
2020		if (!deferred_pfn_valid(pfn)) {
2021			deferred_free_range(pfn - nr_free, nr_free);
2022			nr_free = 0;
2023		} else if (IS_MAX_ORDER_ALIGNED(pfn)) {
2024			deferred_free_range(pfn - nr_free, nr_free);
2025			nr_free = 1;
2026		} else {
2027			nr_free++;
2028		}
2029	}
2030	/* Free the last block of pages to allocator */
2031	deferred_free_range(pfn - nr_free, nr_free);
2032}
2033
2034/*
2035 * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
2036 * by performing it only once every MAX_ORDER_NR_PAGES.
2037 * Return number of pages initialized.
2038 */
2039static unsigned long  __init deferred_init_pages(struct zone *zone,
2040						 unsigned long pfn,
2041						 unsigned long end_pfn)
2042{
2043	int nid = zone_to_nid(zone);
2044	unsigned long nr_pages = 0;
2045	int zid = zone_idx(zone);
2046	struct page *page = NULL;
2047
2048	for (; pfn < end_pfn; pfn++) {
2049		if (!deferred_pfn_valid(pfn)) {
2050			page = NULL;
2051			continue;
2052		} else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) {
2053			page = pfn_to_page(pfn);
2054		} else {
2055			page++;
2056		}
2057		__init_single_page(page, pfn, zid, nid);
2058		nr_pages++;
2059	}
2060	return (nr_pages);
2061}
2062
2063/*
2064 * This function is meant to pre-load the iterator for the zone init.
2065 * Specifically it walks through the ranges until we are caught up to the
2066 * first_init_pfn value and exits there. If we never encounter the value we
2067 * return false indicating there are no valid ranges left.
2068 */
2069static bool __init
2070deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
2071				    unsigned long *spfn, unsigned long *epfn,
2072				    unsigned long first_init_pfn)
2073{
2074	u64 j;
2075
2076	/*
2077	 * Start out by walking through the ranges in this zone that have
2078	 * already been initialized. We don't need to do anything with them
2079	 * so we just need to flush them out of the system.
2080	 */
2081	for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
2082		if (*epfn <= first_init_pfn)
2083			continue;
2084		if (*spfn < first_init_pfn)
2085			*spfn = first_init_pfn;
2086		*i = j;
2087		return true;
2088	}
2089
2090	return false;
2091}
2092
2093/*
2094 * Initialize and free pages. We do it in two loops: first we initialize
2095 * struct page, then free to buddy allocator, because while we are
2096 * freeing pages we can access pages that are ahead (computing buddy
2097 * page in __free_one_page()).
2098 *
2099 * In order to try and keep some memory in the cache we have the loop
2100 * broken along max page order boundaries. This way we will not cause
2101 * any issues with the buddy page computation.
2102 */
2103static unsigned long __init
2104deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
2105		       unsigned long *end_pfn)
2106{
2107	unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
2108	unsigned long spfn = *start_pfn, epfn = *end_pfn;
2109	unsigned long nr_pages = 0;
2110	u64 j = *i;
2111
2112	/* First we loop through and initialize the page values */
2113	for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
2114		unsigned long t;
2115
2116		if (mo_pfn <= *start_pfn)
2117			break;
2118
2119		t = min(mo_pfn, *end_pfn);
2120		nr_pages += deferred_init_pages(zone, *start_pfn, t);
2121
2122		if (mo_pfn < *end_pfn) {
2123			*start_pfn = mo_pfn;
2124			break;
2125		}
2126	}
2127
2128	/* Reset values and now loop through freeing pages as needed */
2129	swap(j, *i);
2130
2131	for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
2132		unsigned long t;
2133
2134		if (mo_pfn <= spfn)
2135			break;
2136
2137		t = min(mo_pfn, epfn);
2138		deferred_free_pages(spfn, t);
2139
2140		if (mo_pfn <= epfn)
2141			break;
2142	}
2143
2144	return nr_pages;
2145}
2146
2147static void __init
2148deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2149			   void *arg)
2150{
2151	unsigned long spfn, epfn;
2152	struct zone *zone = arg;
2153	u64 i;
2154
2155	deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
2156
2157	/*
2158	 * Initialize and free pages in MAX_PAGE_ORDER sized increments so that
2159	 * we can avoid introducing any issues with the buddy allocator.
2160	 */
2161	while (spfn < end_pfn) {
2162		deferred_init_maxorder(&i, zone, &spfn, &epfn);
2163		cond_resched();
2164	}
2165}
2166
2167/* An arch may override for more concurrency. */
2168__weak int __init
2169deferred_page_init_max_threads(const struct cpumask *node_cpumask)
2170{
2171	return 1;
2172}
2173
2174/* Initialise remaining memory on a node */
2175static int __init deferred_init_memmap(void *data)
2176{
2177	pg_data_t *pgdat = data;
2178	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2179	unsigned long spfn = 0, epfn = 0;
2180	unsigned long first_init_pfn, flags;
2181	unsigned long start = jiffies;
2182	struct zone *zone;
2183	int zid, max_threads;
2184	u64 i;
2185
2186	/* Bind memory initialisation thread to a local node if possible */
2187	if (!cpumask_empty(cpumask))
2188		set_cpus_allowed_ptr(current, cpumask);
2189
2190	pgdat_resize_lock(pgdat, &flags);
2191	first_init_pfn = pgdat->first_deferred_pfn;
2192	if (first_init_pfn == ULONG_MAX) {
2193		pgdat_resize_unlock(pgdat, &flags);
2194		pgdat_init_report_one_done();
2195		return 0;
2196	}
2197
2198	/* Sanity check boundaries */
2199	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
2200	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
2201	pgdat->first_deferred_pfn = ULONG_MAX;
2202
2203	/*
2204	 * Once we unlock here, the zone cannot be grown anymore, thus if an
2205	 * interrupt thread must allocate this early in boot, zone must be
2206	 * pre-grown prior to start of deferred page initialization.
2207	 */
2208	pgdat_resize_unlock(pgdat, &flags);
2209
2210	/* Only the highest zone is deferred so find it */
2211	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2212		zone = pgdat->node_zones + zid;
2213		if (first_init_pfn < zone_end_pfn(zone))
2214			break;
2215	}
2216
2217	/* If the zone is empty somebody else may have cleared out the zone */
2218	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2219						 first_init_pfn))
2220		goto zone_empty;
2221
2222	max_threads = deferred_page_init_max_threads(cpumask);
2223
2224	while (spfn < epfn) {
2225		unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
2226		struct padata_mt_job job = {
2227			.thread_fn   = deferred_init_memmap_chunk,
2228			.fn_arg      = zone,
2229			.start       = spfn,
2230			.size        = epfn_align - spfn,
2231			.align       = PAGES_PER_SECTION,
2232			.min_chunk   = PAGES_PER_SECTION,
2233			.max_threads = max_threads,
2234			.numa_aware  = false,
2235		};
2236
2237		padata_do_multithreaded(&job);
2238		deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2239						    epfn_align);
2240	}
2241zone_empty:
2242	/* Sanity check that the next zone really is unpopulated */
2243	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
2244
2245	pr_info("node %d deferred pages initialised in %ums\n",
2246		pgdat->node_id, jiffies_to_msecs(jiffies - start));
2247
2248	pgdat_init_report_one_done();
2249	return 0;
2250}
2251
2252/*
2253 * If this zone has deferred pages, try to grow it by initializing enough
2254 * deferred pages to satisfy the allocation specified by order, rounded up to
2255 * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
2256 * of SECTION_SIZE bytes by initializing struct pages in increments of
2257 * PAGES_PER_SECTION * sizeof(struct page) bytes.
2258 *
2259 * Return true when zone was grown, otherwise return false. We return true even
2260 * when we grow less than requested, to let the caller decide if there are
2261 * enough pages to satisfy the allocation.
2262 *
2263 * Note: We use noinline because this function is needed only during boot, and
2264 * it is called from a __ref function _deferred_grow_zone. This way we are
2265 * making sure that it is not inlined into permanent text section.
2266 */
2267bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
2268{
2269	unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2270	pg_data_t *pgdat = zone->zone_pgdat;
2271	unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2272	unsigned long spfn, epfn, flags;
2273	unsigned long nr_pages = 0;
2274	u64 i;
2275
2276	/* Only the last zone may have deferred pages */
2277	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2278		return false;
2279
2280	pgdat_resize_lock(pgdat, &flags);
2281
2282	/*
2283	 * If someone grew this zone while we were waiting for spinlock, return
2284	 * true, as there might be enough pages already.
2285	 */
2286	if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2287		pgdat_resize_unlock(pgdat, &flags);
2288		return true;
2289	}
2290
2291	/* If the zone is empty somebody else may have cleared out the zone */
2292	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2293						 first_deferred_pfn)) {
2294		pgdat->first_deferred_pfn = ULONG_MAX;
2295		pgdat_resize_unlock(pgdat, &flags);
2296		/* Retry only once. */
2297		return first_deferred_pfn != ULONG_MAX;
2298	}
2299
2300	/*
2301	 * Initialize and free pages in MAX_PAGE_ORDER sized increments so
2302	 * that we can avoid introducing any issues with the buddy
2303	 * allocator.
2304	 */
2305	while (spfn < epfn) {
2306		/* update our first deferred PFN for this section */
2307		first_deferred_pfn = spfn;
2308
2309		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2310		touch_nmi_watchdog();
2311
2312		/* We should only stop along section boundaries */
2313		if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2314			continue;
2315
2316		/* If our quota has been met we can stop here */
2317		if (nr_pages >= nr_pages_needed)
2318			break;
2319	}
2320
2321	pgdat->first_deferred_pfn = spfn;
2322	pgdat_resize_unlock(pgdat, &flags);
2323
2324	return nr_pages > 0;
2325}
2326
2327#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2328
2329#ifdef CONFIG_CMA
2330void __init init_cma_reserved_pageblock(struct page *page)
2331{
2332	unsigned i = pageblock_nr_pages;
2333	struct page *p = page;
2334
2335	do {
2336		__ClearPageReserved(p);
2337		set_page_count(p, 0);
2338	} while (++p, --i);
2339
2340	set_pageblock_migratetype(page, MIGRATE_CMA);
2341	set_page_refcounted(page);
2342	__free_pages(page, pageblock_order);
2343
2344	adjust_managed_page_count(page, pageblock_nr_pages);
2345	page_zone(page)->cma_pages += pageblock_nr_pages;
2346}
2347#endif
2348
2349void set_zone_contiguous(struct zone *zone)
2350{
2351	unsigned long block_start_pfn = zone->zone_start_pfn;
2352	unsigned long block_end_pfn;
2353
2354	block_end_pfn = pageblock_end_pfn(block_start_pfn);
2355	for (; block_start_pfn < zone_end_pfn(zone);
2356			block_start_pfn = block_end_pfn,
2357			 block_end_pfn += pageblock_nr_pages) {
2358
2359		block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
2360
2361		if (!__pageblock_pfn_to_page(block_start_pfn,
2362					     block_end_pfn, zone))
2363			return;
2364		cond_resched();
2365	}
2366
2367	/* We confirm that there is no hole */
2368	zone->contiguous = true;
2369}
2370
2371void __init page_alloc_init_late(void)
2372{
2373	struct zone *zone;
2374	int nid;
2375
2376#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2377
2378	/* There will be num_node_state(N_MEMORY) threads */
2379	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2380	for_each_node_state(nid, N_MEMORY) {
2381		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2382	}
2383
2384	/* Block until all are initialised */
2385	wait_for_completion(&pgdat_init_all_done_comp);
2386
2387	/*
2388	 * We initialized the rest of the deferred pages.  Permanently disable
2389	 * on-demand struct page initialization.
2390	 */
2391	static_branch_disable(&deferred_pages);
2392
2393	/* Reinit limits that are based on free pages after the kernel is up */
2394	files_maxfiles_init();
2395#endif
2396
2397	buffer_init();
2398
2399	/* Discard memblock private memory */
2400	memblock_discard();
2401
2402	for_each_node_state(nid, N_MEMORY)
2403		shuffle_free_memory(NODE_DATA(nid));
2404
2405	for_each_populated_zone(zone)
2406		set_zone_contiguous(zone);
2407
2408	/* Initialize page ext after all struct pages are initialized. */
2409	if (deferred_struct_pages)
2410		page_ext_init();
2411
2412	page_alloc_sysctl_init();
2413}
2414
2415#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2416/*
2417 * Returns the number of pages that arch has reserved but
2418 * is not known to alloc_large_system_hash().
2419 */
2420static unsigned long __init arch_reserved_kernel_pages(void)
2421{
2422	return 0;
2423}
2424#endif
2425
2426/*
2427 * Adaptive scale is meant to reduce sizes of hash tables on large memory
2428 * machines. As memory size is increased the scale is also increased but at
2429 * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
2430 * quadruples the scale is increased by one, which means the size of hash table
2431 * only doubles, instead of quadrupling as well.
2432 * Because 32-bit systems cannot have large physical memory, where this scaling
2433 * makes sense, it is disabled on such platforms.
2434 */
2435#if __BITS_PER_LONG > 32
2436#define ADAPT_SCALE_BASE	(64ul << 30)
2437#define ADAPT_SCALE_SHIFT	2
2438#define ADAPT_SCALE_NPAGES	(ADAPT_SCALE_BASE >> PAGE_SHIFT)
2439#endif
2440
2441/*
2442 * allocate a large system hash table from bootmem
2443 * - it is assumed that the hash table must contain an exact power-of-2
2444 *   quantity of entries
2445 * - limit is the number of hash buckets, not the total allocation size
2446 */
2447void *__init alloc_large_system_hash(const char *tablename,
2448				     unsigned long bucketsize,
2449				     unsigned long numentries,
2450				     int scale,
2451				     int flags,
2452				     unsigned int *_hash_shift,
2453				     unsigned int *_hash_mask,
2454				     unsigned long low_limit,
2455				     unsigned long high_limit)
2456{
2457	unsigned long long max = high_limit;
2458	unsigned long log2qty, size;
2459	void *table;
2460	gfp_t gfp_flags;
2461	bool virt;
2462	bool huge;
2463
2464	/* allow the kernel cmdline to have a say */
2465	if (!numentries) {
2466		/* round applicable memory size up to nearest megabyte */
2467		numentries = nr_kernel_pages;
2468		numentries -= arch_reserved_kernel_pages();
2469
2470		/* It isn't necessary when PAGE_SIZE >= 1MB */
2471		if (PAGE_SIZE < SZ_1M)
2472			numentries = round_up(numentries, SZ_1M / PAGE_SIZE);
2473
2474#if __BITS_PER_LONG > 32
2475		if (!high_limit) {
2476			unsigned long adapt;
2477
2478			for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
2479			     adapt <<= ADAPT_SCALE_SHIFT)
2480				scale++;
2481		}
2482#endif
2483
2484		/* limit to 1 bucket per 2^scale bytes of low memory */
2485		if (scale > PAGE_SHIFT)
2486			numentries >>= (scale - PAGE_SHIFT);
2487		else
2488			numentries <<= (PAGE_SHIFT - scale);
2489
2490		if (unlikely((numentries * bucketsize) < PAGE_SIZE))
2491			numentries = PAGE_SIZE / bucketsize;
2492	}
2493	numentries = roundup_pow_of_two(numentries);
2494
2495	/* limit allocation size to 1/16 total memory by default */
2496	if (max == 0) {
2497		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
2498		do_div(max, bucketsize);
2499	}
2500	max = min(max, 0x80000000ULL);
2501
2502	if (numentries < low_limit)
2503		numentries = low_limit;
2504	if (numentries > max)
2505		numentries = max;
2506
2507	log2qty = ilog2(numentries);
2508
2509	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
2510	do {
2511		virt = false;
2512		size = bucketsize << log2qty;
2513		if (flags & HASH_EARLY) {
2514			if (flags & HASH_ZERO)
2515				table = memblock_alloc(size, SMP_CACHE_BYTES);
2516			else
2517				table = memblock_alloc_raw(size,
2518							   SMP_CACHE_BYTES);
2519		} else if (get_order(size) > MAX_PAGE_ORDER || hashdist) {
2520			table = vmalloc_huge(size, gfp_flags);
2521			virt = true;
2522			if (table)
2523				huge = is_vm_area_hugepages(table);
2524		} else {
2525			/*
2526			 * If bucketsize is not a power-of-two, we may free
2527			 * some pages at the end of hash table which
2528			 * alloc_pages_exact() automatically does
2529			 */
2530			table = alloc_pages_exact(size, gfp_flags);
2531			kmemleak_alloc(table, size, 1, gfp_flags);
2532		}
2533	} while (!table && size > PAGE_SIZE && --log2qty);
2534
2535	if (!table)
2536		panic("Failed to allocate %s hash table\n", tablename);
2537
2538	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
2539		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
2540		virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
2541
2542	if (_hash_shift)
2543		*_hash_shift = log2qty;
2544	if (_hash_mask)
2545		*_hash_mask = (1 << log2qty) - 1;
2546
2547	return table;
2548}
2549
2550/**
2551 * set_dma_reserve - set the specified number of pages reserved in the first zone
2552 * @new_dma_reserve: The number of pages to mark reserved
2553 *
2554 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
2555 * In the DMA zone, a significant percentage may be consumed by kernel image
2556 * and other unfreeable allocations which can skew the watermarks badly. This
2557 * function may optionally be used to account for unfreeable pages in the
2558 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
2559 * smaller per-cpu batchsize.
2560 */
2561void __init set_dma_reserve(unsigned long new_dma_reserve)
2562{
2563	dma_reserve = new_dma_reserve;
2564}
2565
2566void __init memblock_free_pages(struct page *page, unsigned long pfn,
2567							unsigned int order)
2568{
2569
2570	if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) {
2571		int nid = early_pfn_to_nid(pfn);
2572
2573		if (!early_page_initialised(pfn, nid))
2574			return;
2575	}
2576
2577	if (!kmsan_memblock_free_pages(page, order)) {
2578		/* KMSAN will take care of these pages. */
2579		return;
2580	}
2581	__free_pages_core(page, order);
2582}
2583
2584DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
2585EXPORT_SYMBOL(init_on_alloc);
2586
2587DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
2588EXPORT_SYMBOL(init_on_free);
2589
2590static bool _init_on_alloc_enabled_early __read_mostly
2591				= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
2592static int __init early_init_on_alloc(char *buf)
2593{
2594
2595	return kstrtobool(buf, &_init_on_alloc_enabled_early);
2596}
2597early_param("init_on_alloc", early_init_on_alloc);
2598
2599static bool _init_on_free_enabled_early __read_mostly
2600				= IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
2601static int __init early_init_on_free(char *buf)
2602{
2603	return kstrtobool(buf, &_init_on_free_enabled_early);
2604}
2605early_param("init_on_free", early_init_on_free);
2606
2607DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
2608
2609/*
2610 * Enable static keys related to various memory debugging and hardening options.
2611 * Some override others, and depend on early params that are evaluated in the
2612 * order of appearance. So we need to first gather the full picture of what was
2613 * enabled, and then make decisions.
2614 */
2615static void __init mem_debugging_and_hardening_init(void)
2616{
2617	bool page_poisoning_requested = false;
2618	bool want_check_pages = false;
2619
2620#ifdef CONFIG_PAGE_POISONING
2621	/*
2622	 * Page poisoning is debug page alloc for some arches. If
2623	 * either of those options are enabled, enable poisoning.
2624	 */
2625	if (page_poisoning_enabled() ||
2626	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
2627	      debug_pagealloc_enabled())) {
2628		static_branch_enable(&_page_poisoning_enabled);
2629		page_poisoning_requested = true;
2630		want_check_pages = true;
2631	}
2632#endif
2633
2634	if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
2635	    page_poisoning_requested) {
2636		pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
2637			"will take precedence over init_on_alloc and init_on_free\n");
2638		_init_on_alloc_enabled_early = false;
2639		_init_on_free_enabled_early = false;
2640	}
2641
2642	if (_init_on_alloc_enabled_early) {
2643		want_check_pages = true;
2644		static_branch_enable(&init_on_alloc);
2645	} else {
2646		static_branch_disable(&init_on_alloc);
2647	}
2648
2649	if (_init_on_free_enabled_early) {
2650		want_check_pages = true;
2651		static_branch_enable(&init_on_free);
2652	} else {
2653		static_branch_disable(&init_on_free);
2654	}
2655
2656	if (IS_ENABLED(CONFIG_KMSAN) &&
2657	    (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
2658		pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
2659
2660#ifdef CONFIG_DEBUG_PAGEALLOC
2661	if (debug_pagealloc_enabled()) {
2662		want_check_pages = true;
2663		static_branch_enable(&_debug_pagealloc_enabled);
2664
2665		if (debug_guardpage_minorder())
2666			static_branch_enable(&_debug_guardpage_enabled);
2667	}
2668#endif
2669
2670	/*
2671	 * Any page debugging or hardening option also enables sanity checking
2672	 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
2673	 * enabled already.
2674	 */
2675	if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages)
2676		static_branch_enable(&check_pages_enabled);
2677}
2678
2679/* Report memory auto-initialization states for this boot. */
2680static void __init report_meminit(void)
2681{
2682	const char *stack;
2683
2684	if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN))
2685		stack = "all(pattern)";
2686	else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO))
2687		stack = "all(zero)";
2688	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL))
2689		stack = "byref_all(zero)";
2690	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF))
2691		stack = "byref(zero)";
2692	else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER))
2693		stack = "__user(zero)";
2694	else
2695		stack = "off";
2696
2697	pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
2698		stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off",
2699		want_init_on_free() ? "on" : "off");
2700	if (want_init_on_free())
2701		pr_info("mem auto-init: clearing system memory may take some time...\n");
2702}
2703
2704static void __init mem_init_print_info(void)
2705{
2706	unsigned long physpages, codesize, datasize, rosize, bss_size;
2707	unsigned long init_code_size, init_data_size;
2708
2709	physpages = get_num_physpages();
2710	codesize = _etext - _stext;
2711	datasize = _edata - _sdata;
2712	rosize = __end_rodata - __start_rodata;
2713	bss_size = __bss_stop - __bss_start;
2714	init_data_size = __init_end - __init_begin;
2715	init_code_size = _einittext - _sinittext;
2716
2717	/*
2718	 * Detect special cases and adjust section sizes accordingly:
2719	 * 1) .init.* may be embedded into .data sections
2720	 * 2) .init.text.* may be out of [__init_begin, __init_end],
2721	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
2722	 * 3) .rodata.* may be embedded into .text or .data sections.
2723	 */
2724#define adj_init_size(start, end, size, pos, adj) \
2725	do { \
2726		if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
2727			size -= adj; \
2728	} while (0)
2729
2730	adj_init_size(__init_begin, __init_end, init_data_size,
2731		     _sinittext, init_code_size);
2732	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
2733	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
2734	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
2735	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
2736
2737#undef	adj_init_size
2738
2739	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
2740#ifdef	CONFIG_HIGHMEM
2741		", %luK highmem"
2742#endif
2743		")\n",
2744		K(nr_free_pages()), K(physpages),
2745		codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K,
2746		(init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K,
2747		K(physpages - totalram_pages() - totalcma_pages),
2748		K(totalcma_pages)
2749#ifdef	CONFIG_HIGHMEM
2750		, K(totalhigh_pages())
2751#endif
2752		);
2753}
2754
2755/*
2756 * Set up kernel memory allocators
2757 */
2758void __init mm_core_init(void)
2759{
2760	/* Initializations relying on SMP setup */
2761	build_all_zonelists(NULL);
2762	page_alloc_init_cpuhp();
2763
2764	/*
2765	 * page_ext requires contiguous pages,
2766	 * bigger than MAX_PAGE_ORDER unless SPARSEMEM.
2767	 */
2768	page_ext_init_flatmem();
2769	mem_debugging_and_hardening_init();
2770	kfence_alloc_pool_and_metadata();
2771	report_meminit();
2772	kmsan_init_shadow();
2773	stack_depot_early_init();
2774	mem_init();
2775	mem_init_print_info();
2776	kmem_cache_init();
2777	/*
2778	 * page_owner must be initialized after buddy is ready, and also after
2779	 * slab is ready so that stack_depot_init() works properly
2780	 */
2781	page_ext_init_flatmem_late();
2782	kmemleak_init();
2783	ptlock_cache_init();
2784	pgtable_cache_init();
2785	debug_objects_mem_init();
2786	vmalloc_init();
2787	/* If no deferred init page_ext now, as vmap is fully initialized */
2788	if (!deferred_struct_pages)
2789		page_ext_init();
2790	/* Should be run before the first non-init thread is created */
2791	init_espfix_bsp();
2792	/* Should be run after espfix64 is set up. */
2793	pti_init();
2794	kmsan_init_runtime();
2795	mm_cache_init();
2796}
2797