1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions for working with the Flattened Device Tree data format
4 *
5 * Copyright 2009 Benjamin Herrenschmidt, IBM Corp
6 * benh@kernel.crashing.org
7 */
8
9#define pr_fmt(fmt)	"OF: fdt: " fmt
10
11#include <linux/acpi.h>
12#include <linux/crash_dump.h>
13#include <linux/crc32.h>
14#include <linux/kernel.h>
15#include <linux/initrd.h>
16#include <linux/memblock.h>
17#include <linux/mutex.h>
18#include <linux/of.h>
19#include <linux/of_fdt.h>
20#include <linux/sizes.h>
21#include <linux/string.h>
22#include <linux/errno.h>
23#include <linux/slab.h>
24#include <linux/libfdt.h>
25#include <linux/debugfs.h>
26#include <linux/serial_core.h>
27#include <linux/sysfs.h>
28#include <linux/random.h>
29
30#include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
31#include <asm/page.h>
32
33#include "of_private.h"
34
35/*
36 * __dtb_empty_root_begin[] and __dtb_empty_root_end[] magically created by
37 * cmd_dt_S_dtb in scripts/Makefile.lib
38 */
39extern uint8_t __dtb_empty_root_begin[];
40extern uint8_t __dtb_empty_root_end[];
41
42/*
43 * of_fdt_limit_memory - limit the number of regions in the /memory node
44 * @limit: maximum entries
45 *
46 * Adjust the flattened device tree to have at most 'limit' number of
47 * memory entries in the /memory node. This function may be called
48 * any time after initial_boot_param is set.
49 */
50void __init of_fdt_limit_memory(int limit)
51{
52	int memory;
53	int len;
54	const void *val;
55	int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
56	int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
57	const __be32 *addr_prop;
58	const __be32 *size_prop;
59	int root_offset;
60	int cell_size;
61
62	root_offset = fdt_path_offset(initial_boot_params, "/");
63	if (root_offset < 0)
64		return;
65
66	addr_prop = fdt_getprop(initial_boot_params, root_offset,
67				"#address-cells", NULL);
68	if (addr_prop)
69		nr_address_cells = fdt32_to_cpu(*addr_prop);
70
71	size_prop = fdt_getprop(initial_boot_params, root_offset,
72				"#size-cells", NULL);
73	if (size_prop)
74		nr_size_cells = fdt32_to_cpu(*size_prop);
75
76	cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
77
78	memory = fdt_path_offset(initial_boot_params, "/memory");
79	if (memory > 0) {
80		val = fdt_getprop(initial_boot_params, memory, "reg", &len);
81		if (len > limit*cell_size) {
82			len = limit*cell_size;
83			pr_debug("Limiting number of entries to %d\n", limit);
84			fdt_setprop(initial_boot_params, memory, "reg", val,
85					len);
86		}
87	}
88}
89
90bool of_fdt_device_is_available(const void *blob, unsigned long node)
91{
92	const char *status = fdt_getprop(blob, node, "status", NULL);
93
94	if (!status)
95		return true;
96
97	if (!strcmp(status, "ok") || !strcmp(status, "okay"))
98		return true;
99
100	return false;
101}
102
103static void *unflatten_dt_alloc(void **mem, unsigned long size,
104				       unsigned long align)
105{
106	void *res;
107
108	*mem = PTR_ALIGN(*mem, align);
109	res = *mem;
110	*mem += size;
111
112	return res;
113}
114
115static void populate_properties(const void *blob,
116				int offset,
117				void **mem,
118				struct device_node *np,
119				const char *nodename,
120				bool dryrun)
121{
122	struct property *pp, **pprev = NULL;
123	int cur;
124	bool has_name = false;
125
126	pprev = &np->properties;
127	for (cur = fdt_first_property_offset(blob, offset);
128	     cur >= 0;
129	     cur = fdt_next_property_offset(blob, cur)) {
130		const __be32 *val;
131		const char *pname;
132		u32 sz;
133
134		val = fdt_getprop_by_offset(blob, cur, &pname, &sz);
135		if (!val) {
136			pr_warn("Cannot locate property at 0x%x\n", cur);
137			continue;
138		}
139
140		if (!pname) {
141			pr_warn("Cannot find property name at 0x%x\n", cur);
142			continue;
143		}
144
145		if (!strcmp(pname, "name"))
146			has_name = true;
147
148		pp = unflatten_dt_alloc(mem, sizeof(struct property),
149					__alignof__(struct property));
150		if (dryrun)
151			continue;
152
153		/* We accept flattened tree phandles either in
154		 * ePAPR-style "phandle" properties, or the
155		 * legacy "linux,phandle" properties.  If both
156		 * appear and have different values, things
157		 * will get weird. Don't do that.
158		 */
159		if (!strcmp(pname, "phandle") ||
160		    !strcmp(pname, "linux,phandle")) {
161			if (!np->phandle)
162				np->phandle = be32_to_cpup(val);
163		}
164
165		/* And we process the "ibm,phandle" property
166		 * used in pSeries dynamic device tree
167		 * stuff
168		 */
169		if (!strcmp(pname, "ibm,phandle"))
170			np->phandle = be32_to_cpup(val);
171
172		pp->name   = (char *)pname;
173		pp->length = sz;
174		pp->value  = (__be32 *)val;
175		*pprev     = pp;
176		pprev      = &pp->next;
177	}
178
179	/* With version 0x10 we may not have the name property,
180	 * recreate it here from the unit name if absent
181	 */
182	if (!has_name) {
183		const char *p = nodename, *ps = p, *pa = NULL;
184		int len;
185
186		while (*p) {
187			if ((*p) == '@')
188				pa = p;
189			else if ((*p) == '/')
190				ps = p + 1;
191			p++;
192		}
193
194		if (pa < ps)
195			pa = p;
196		len = (pa - ps) + 1;
197		pp = unflatten_dt_alloc(mem, sizeof(struct property) + len,
198					__alignof__(struct property));
199		if (!dryrun) {
200			pp->name   = "name";
201			pp->length = len;
202			pp->value  = pp + 1;
203			*pprev     = pp;
204			memcpy(pp->value, ps, len - 1);
205			((char *)pp->value)[len - 1] = 0;
206			pr_debug("fixed up name for %s -> %s\n",
207				 nodename, (char *)pp->value);
208		}
209	}
210}
211
212static int populate_node(const void *blob,
213			  int offset,
214			  void **mem,
215			  struct device_node *dad,
216			  struct device_node **pnp,
217			  bool dryrun)
218{
219	struct device_node *np;
220	const char *pathp;
221	int len;
222
223	pathp = fdt_get_name(blob, offset, &len);
224	if (!pathp) {
225		*pnp = NULL;
226		return len;
227	}
228
229	len++;
230
231	np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len,
232				__alignof__(struct device_node));
233	if (!dryrun) {
234		char *fn;
235		of_node_init(np);
236		np->full_name = fn = ((char *)np) + sizeof(*np);
237
238		memcpy(fn, pathp, len);
239
240		if (dad != NULL) {
241			np->parent = dad;
242			np->sibling = dad->child;
243			dad->child = np;
244		}
245	}
246
247	populate_properties(blob, offset, mem, np, pathp, dryrun);
248	if (!dryrun) {
249		np->name = of_get_property(np, "name", NULL);
250		if (!np->name)
251			np->name = "<NULL>";
252	}
253
254	*pnp = np;
255	return 0;
256}
257
258static void reverse_nodes(struct device_node *parent)
259{
260	struct device_node *child, *next;
261
262	/* In-depth first */
263	child = parent->child;
264	while (child) {
265		reverse_nodes(child);
266
267		child = child->sibling;
268	}
269
270	/* Reverse the nodes in the child list */
271	child = parent->child;
272	parent->child = NULL;
273	while (child) {
274		next = child->sibling;
275
276		child->sibling = parent->child;
277		parent->child = child;
278		child = next;
279	}
280}
281
282/**
283 * unflatten_dt_nodes - Alloc and populate a device_node from the flat tree
284 * @blob: The parent device tree blob
285 * @mem: Memory chunk to use for allocating device nodes and properties
286 * @dad: Parent struct device_node
287 * @nodepp: The device_node tree created by the call
288 *
289 * Return: The size of unflattened device tree or error code
290 */
291static int unflatten_dt_nodes(const void *blob,
292			      void *mem,
293			      struct device_node *dad,
294			      struct device_node **nodepp)
295{
296	struct device_node *root;
297	int offset = 0, depth = 0, initial_depth = 0;
298#define FDT_MAX_DEPTH	64
299	struct device_node *nps[FDT_MAX_DEPTH];
300	void *base = mem;
301	bool dryrun = !base;
302	int ret;
303
304	if (nodepp)
305		*nodepp = NULL;
306
307	/*
308	 * We're unflattening device sub-tree if @dad is valid. There are
309	 * possibly multiple nodes in the first level of depth. We need
310	 * set @depth to 1 to make fdt_next_node() happy as it bails
311	 * immediately when negative @depth is found. Otherwise, the device
312	 * nodes except the first one won't be unflattened successfully.
313	 */
314	if (dad)
315		depth = initial_depth = 1;
316
317	root = dad;
318	nps[depth] = dad;
319
320	for (offset = 0;
321	     offset >= 0 && depth >= initial_depth;
322	     offset = fdt_next_node(blob, offset, &depth)) {
323		if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
324			continue;
325
326		if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
327		    !of_fdt_device_is_available(blob, offset))
328			continue;
329
330		ret = populate_node(blob, offset, &mem, nps[depth],
331				   &nps[depth+1], dryrun);
332		if (ret < 0)
333			return ret;
334
335		if (!dryrun && nodepp && !*nodepp)
336			*nodepp = nps[depth+1];
337		if (!dryrun && !root)
338			root = nps[depth+1];
339	}
340
341	if (offset < 0 && offset != -FDT_ERR_NOTFOUND) {
342		pr_err("Error %d processing FDT\n", offset);
343		return -EINVAL;
344	}
345
346	/*
347	 * Reverse the child list. Some drivers assumes node order matches .dts
348	 * node order
349	 */
350	if (!dryrun)
351		reverse_nodes(root);
352
353	return mem - base;
354}
355
356/**
357 * __unflatten_device_tree - create tree of device_nodes from flat blob
358 * @blob: The blob to expand
359 * @dad: Parent device node
360 * @mynodes: The device_node tree created by the call
361 * @dt_alloc: An allocator that provides a virtual address to memory
362 * for the resulting tree
363 * @detached: if true set OF_DETACHED on @mynodes
364 *
365 * unflattens a device-tree, creating the tree of struct device_node. It also
366 * fills the "name" and "type" pointers of the nodes so the normal device-tree
367 * walking functions can be used.
368 *
369 * Return: NULL on failure or the memory chunk containing the unflattened
370 * device tree on success.
371 */
372void *__unflatten_device_tree(const void *blob,
373			      struct device_node *dad,
374			      struct device_node **mynodes,
375			      void *(*dt_alloc)(u64 size, u64 align),
376			      bool detached)
377{
378	int size;
379	void *mem;
380	int ret;
381
382	if (mynodes)
383		*mynodes = NULL;
384
385	pr_debug(" -> unflatten_device_tree()\n");
386
387	if (!blob) {
388		pr_debug("No device tree pointer\n");
389		return NULL;
390	}
391
392	pr_debug("Unflattening device tree:\n");
393	pr_debug("magic: %08x\n", fdt_magic(blob));
394	pr_debug("size: %08x\n", fdt_totalsize(blob));
395	pr_debug("version: %08x\n", fdt_version(blob));
396
397	if (fdt_check_header(blob)) {
398		pr_err("Invalid device tree blob header\n");
399		return NULL;
400	}
401
402	/* First pass, scan for size */
403	size = unflatten_dt_nodes(blob, NULL, dad, NULL);
404	if (size <= 0)
405		return NULL;
406
407	size = ALIGN(size, 4);
408	pr_debug("  size is %d, allocating...\n", size);
409
410	/* Allocate memory for the expanded device tree */
411	mem = dt_alloc(size + 4, __alignof__(struct device_node));
412	if (!mem)
413		return NULL;
414
415	memset(mem, 0, size);
416
417	*(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
418
419	pr_debug("  unflattening %p...\n", mem);
420
421	/* Second pass, do actual unflattening */
422	ret = unflatten_dt_nodes(blob, mem, dad, mynodes);
423
424	if (be32_to_cpup(mem + size) != 0xdeadbeef)
425		pr_warn("End of tree marker overwritten: %08x\n",
426			be32_to_cpup(mem + size));
427
428	if (ret <= 0)
429		return NULL;
430
431	if (detached && mynodes && *mynodes) {
432		of_node_set_flag(*mynodes, OF_DETACHED);
433		pr_debug("unflattened tree is detached\n");
434	}
435
436	pr_debug(" <- unflatten_device_tree()\n");
437	return mem;
438}
439
440static void *kernel_tree_alloc(u64 size, u64 align)
441{
442	return kzalloc(size, GFP_KERNEL);
443}
444
445static DEFINE_MUTEX(of_fdt_unflatten_mutex);
446
447/**
448 * of_fdt_unflatten_tree - create tree of device_nodes from flat blob
449 * @blob: Flat device tree blob
450 * @dad: Parent device node
451 * @mynodes: The device tree created by the call
452 *
453 * unflattens the device-tree passed by the firmware, creating the
454 * tree of struct device_node. It also fills the "name" and "type"
455 * pointers of the nodes so the normal device-tree walking functions
456 * can be used.
457 *
458 * Return: NULL on failure or the memory chunk containing the unflattened
459 * device tree on success.
460 */
461void *of_fdt_unflatten_tree(const unsigned long *blob,
462			    struct device_node *dad,
463			    struct device_node **mynodes)
464{
465	void *mem;
466
467	mutex_lock(&of_fdt_unflatten_mutex);
468	mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc,
469				      true);
470	mutex_unlock(&of_fdt_unflatten_mutex);
471
472	return mem;
473}
474EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
475
476/* Everything below here references initial_boot_params directly. */
477int __initdata dt_root_addr_cells;
478int __initdata dt_root_size_cells;
479
480void *initial_boot_params __ro_after_init;
481
482#ifdef CONFIG_OF_EARLY_FLATTREE
483
484static u32 of_fdt_crc32;
485
486/*
487 * fdt_reserve_elfcorehdr() - reserves memory for elf core header
488 *
489 * This function reserves the memory occupied by an elf core header
490 * described in the device tree. This region contains all the
491 * information about primary kernel's core image and is used by a dump
492 * capture kernel to access the system memory on primary kernel.
493 */
494static void __init fdt_reserve_elfcorehdr(void)
495{
496	if (!IS_ENABLED(CONFIG_CRASH_DUMP) || !elfcorehdr_size)
497		return;
498
499	if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
500		pr_warn("elfcorehdr is overlapped\n");
501		return;
502	}
503
504	memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
505
506	pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
507		elfcorehdr_size >> 10, elfcorehdr_addr);
508}
509
510/**
511 * early_init_fdt_scan_reserved_mem() - create reserved memory regions
512 *
513 * This function grabs memory from early allocator for device exclusive use
514 * defined in device tree structures. It should be called by arch specific code
515 * once the early allocator (i.e. memblock) has been fully activated.
516 */
517void __init early_init_fdt_scan_reserved_mem(void)
518{
519	int n;
520	u64 base, size;
521
522	if (!initial_boot_params)
523		return;
524
525	fdt_scan_reserved_mem();
526	fdt_reserve_elfcorehdr();
527
528	/* Process header /memreserve/ fields */
529	for (n = 0; ; n++) {
530		fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
531		if (!size)
532			break;
533		memblock_reserve(base, size);
534	}
535
536	fdt_init_reserved_mem();
537}
538
539/**
540 * early_init_fdt_reserve_self() - reserve the memory used by the FDT blob
541 */
542void __init early_init_fdt_reserve_self(void)
543{
544	if (!initial_boot_params)
545		return;
546
547	/* Reserve the dtb region */
548	memblock_reserve(__pa(initial_boot_params),
549			 fdt_totalsize(initial_boot_params));
550}
551
552/**
553 * of_scan_flat_dt - scan flattened tree blob and call callback on each.
554 * @it: callback function
555 * @data: context data pointer
556 *
557 * This function is used to scan the flattened device-tree, it is
558 * used to extract the memory information at boot before we can
559 * unflatten the tree
560 */
561int __init of_scan_flat_dt(int (*it)(unsigned long node,
562				     const char *uname, int depth,
563				     void *data),
564			   void *data)
565{
566	const void *blob = initial_boot_params;
567	const char *pathp;
568	int offset, rc = 0, depth = -1;
569
570	if (!blob)
571		return 0;
572
573	for (offset = fdt_next_node(blob, -1, &depth);
574	     offset >= 0 && depth >= 0 && !rc;
575	     offset = fdt_next_node(blob, offset, &depth)) {
576
577		pathp = fdt_get_name(blob, offset, NULL);
578		rc = it(offset, pathp, depth, data);
579	}
580	return rc;
581}
582
583/**
584 * of_scan_flat_dt_subnodes - scan sub-nodes of a node call callback on each.
585 * @parent: parent node
586 * @it: callback function
587 * @data: context data pointer
588 *
589 * This function is used to scan sub-nodes of a node.
590 */
591int __init of_scan_flat_dt_subnodes(unsigned long parent,
592				    int (*it)(unsigned long node,
593					      const char *uname,
594					      void *data),
595				    void *data)
596{
597	const void *blob = initial_boot_params;
598	int node;
599
600	fdt_for_each_subnode(node, blob, parent) {
601		const char *pathp;
602		int rc;
603
604		pathp = fdt_get_name(blob, node, NULL);
605		rc = it(node, pathp, data);
606		if (rc)
607			return rc;
608	}
609	return 0;
610}
611
612/**
613 * of_get_flat_dt_subnode_by_name - get the subnode by given name
614 *
615 * @node: the parent node
616 * @uname: the name of subnode
617 * @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none
618 */
619
620int __init of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname)
621{
622	return fdt_subnode_offset(initial_boot_params, node, uname);
623}
624
625/*
626 * of_get_flat_dt_root - find the root node in the flat blob
627 */
628unsigned long __init of_get_flat_dt_root(void)
629{
630	return 0;
631}
632
633/*
634 * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
635 *
636 * This function can be used within scan_flattened_dt callback to get
637 * access to properties
638 */
639const void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
640				       int *size)
641{
642	return fdt_getprop(initial_boot_params, node, name, size);
643}
644
645/**
646 * of_fdt_is_compatible - Return true if given node from the given blob has
647 * compat in its compatible list
648 * @blob: A device tree blob
649 * @node: node to test
650 * @compat: compatible string to compare with compatible list.
651 *
652 * Return: a non-zero value on match with smaller values returned for more
653 * specific compatible values.
654 */
655static int of_fdt_is_compatible(const void *blob,
656		      unsigned long node, const char *compat)
657{
658	const char *cp;
659	int cplen;
660	unsigned long l, score = 0;
661
662	cp = fdt_getprop(blob, node, "compatible", &cplen);
663	if (cp == NULL)
664		return 0;
665	while (cplen > 0) {
666		score++;
667		if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
668			return score;
669		l = strlen(cp) + 1;
670		cp += l;
671		cplen -= l;
672	}
673
674	return 0;
675}
676
677/**
678 * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
679 * @node: node to test
680 * @compat: compatible string to compare with compatible list.
681 */
682int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
683{
684	return of_fdt_is_compatible(initial_boot_params, node, compat);
685}
686
687/*
688 * of_flat_dt_match - Return true if node matches a list of compatible values
689 */
690static int __init of_flat_dt_match(unsigned long node, const char *const *compat)
691{
692	unsigned int tmp, score = 0;
693
694	if (!compat)
695		return 0;
696
697	while (*compat) {
698		tmp = of_fdt_is_compatible(initial_boot_params, node, *compat);
699		if (tmp && (score == 0 || (tmp < score)))
700			score = tmp;
701		compat++;
702	}
703
704	return score;
705}
706
707/*
708 * of_get_flat_dt_phandle - Given a node in the flat blob, return the phandle
709 */
710uint32_t __init of_get_flat_dt_phandle(unsigned long node)
711{
712	return fdt_get_phandle(initial_boot_params, node);
713}
714
715const char * __init of_flat_dt_get_machine_name(void)
716{
717	const char *name;
718	unsigned long dt_root = of_get_flat_dt_root();
719
720	name = of_get_flat_dt_prop(dt_root, "model", NULL);
721	if (!name)
722		name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
723	return name;
724}
725
726/**
727 * of_flat_dt_match_machine - Iterate match tables to find matching machine.
728 *
729 * @default_match: A machine specific ptr to return in case of no match.
730 * @get_next_compat: callback function to return next compatible match table.
731 *
732 * Iterate through machine match tables to find the best match for the machine
733 * compatible string in the FDT.
734 */
735const void * __init of_flat_dt_match_machine(const void *default_match,
736		const void * (*get_next_compat)(const char * const**))
737{
738	const void *data = NULL;
739	const void *best_data = default_match;
740	const char *const *compat;
741	unsigned long dt_root;
742	unsigned int best_score = ~1, score = 0;
743
744	dt_root = of_get_flat_dt_root();
745	while ((data = get_next_compat(&compat))) {
746		score = of_flat_dt_match(dt_root, compat);
747		if (score > 0 && score < best_score) {
748			best_data = data;
749			best_score = score;
750		}
751	}
752	if (!best_data) {
753		const char *prop;
754		int size;
755
756		pr_err("\n unrecognized device tree list:\n[ ");
757
758		prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
759		if (prop) {
760			while (size > 0) {
761				printk("'%s' ", prop);
762				size -= strlen(prop) + 1;
763				prop += strlen(prop) + 1;
764			}
765		}
766		printk("]\n\n");
767		return NULL;
768	}
769
770	pr_info("Machine model: %s\n", of_flat_dt_get_machine_name());
771
772	return best_data;
773}
774
775static void __early_init_dt_declare_initrd(unsigned long start,
776					   unsigned long end)
777{
778	/*
779	 * __va() is not yet available this early on some platforms. In that
780	 * case, the platform uses phys_initrd_start/phys_initrd_size instead
781	 * and does the VA conversion itself.
782	 */
783	if (!IS_ENABLED(CONFIG_ARM64) &&
784	    !(IS_ENABLED(CONFIG_RISCV) && IS_ENABLED(CONFIG_64BIT))) {
785		initrd_start = (unsigned long)__va(start);
786		initrd_end = (unsigned long)__va(end);
787		initrd_below_start_ok = 1;
788	}
789}
790
791/**
792 * early_init_dt_check_for_initrd - Decode initrd location from flat tree
793 * @node: reference to node containing initrd location ('chosen')
794 */
795static void __init early_init_dt_check_for_initrd(unsigned long node)
796{
797	u64 start, end;
798	int len;
799	const __be32 *prop;
800
801	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
802		return;
803
804	pr_debug("Looking for initrd properties... ");
805
806	prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
807	if (!prop)
808		return;
809	start = of_read_number(prop, len/4);
810
811	prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
812	if (!prop)
813		return;
814	end = of_read_number(prop, len/4);
815	if (start > end)
816		return;
817
818	__early_init_dt_declare_initrd(start, end);
819	phys_initrd_start = start;
820	phys_initrd_size = end - start;
821
822	pr_debug("initrd_start=0x%llx  initrd_end=0x%llx\n", start, end);
823}
824
825/**
826 * early_init_dt_check_for_elfcorehdr - Decode elfcorehdr location from flat
827 * tree
828 * @node: reference to node containing elfcorehdr location ('chosen')
829 */
830static void __init early_init_dt_check_for_elfcorehdr(unsigned long node)
831{
832	const __be32 *prop;
833	int len;
834
835	if (!IS_ENABLED(CONFIG_CRASH_DUMP))
836		return;
837
838	pr_debug("Looking for elfcorehdr property... ");
839
840	prop = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
841	if (!prop || (len < (dt_root_addr_cells + dt_root_size_cells)))
842		return;
843
844	elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &prop);
845	elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &prop);
846
847	pr_debug("elfcorehdr_start=0x%llx elfcorehdr_size=0x%llx\n",
848		 elfcorehdr_addr, elfcorehdr_size);
849}
850
851static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND;
852
853/*
854 * The main usage of linux,usable-memory-range is for crash dump kernel.
855 * Originally, the number of usable-memory regions is one. Now there may
856 * be two regions, low region and high region.
857 * To make compatibility with existing user-space and older kdump, the low
858 * region is always the last range of linux,usable-memory-range if exist.
859 */
860#define MAX_USABLE_RANGES		2
861
862/**
863 * early_init_dt_check_for_usable_mem_range - Decode usable memory range
864 * location from flat tree
865 */
866void __init early_init_dt_check_for_usable_mem_range(void)
867{
868	struct memblock_region rgn[MAX_USABLE_RANGES] = {0};
869	const __be32 *prop, *endp;
870	int len, i;
871	unsigned long node = chosen_node_offset;
872
873	if ((long)node < 0)
874		return;
875
876	pr_debug("Looking for usable-memory-range property... ");
877
878	prop = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
879	if (!prop || (len % (dt_root_addr_cells + dt_root_size_cells)))
880		return;
881
882	endp = prop + (len / sizeof(__be32));
883	for (i = 0; i < MAX_USABLE_RANGES && prop < endp; i++) {
884		rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop);
885		rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop);
886
887		pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n",
888			 i, &rgn[i].base, &rgn[i].size);
889	}
890
891	memblock_cap_memory_range(rgn[0].base, rgn[0].size);
892	for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++)
893		memblock_add(rgn[i].base, rgn[i].size);
894}
895
896#ifdef CONFIG_SERIAL_EARLYCON
897
898int __init early_init_dt_scan_chosen_stdout(void)
899{
900	int offset;
901	const char *p, *q, *options = NULL;
902	int l;
903	const struct earlycon_id *match;
904	const void *fdt = initial_boot_params;
905	int ret;
906
907	offset = fdt_path_offset(fdt, "/chosen");
908	if (offset < 0)
909		offset = fdt_path_offset(fdt, "/chosen@0");
910	if (offset < 0)
911		return -ENOENT;
912
913	p = fdt_getprop(fdt, offset, "stdout-path", &l);
914	if (!p)
915		p = fdt_getprop(fdt, offset, "linux,stdout-path", &l);
916	if (!p || !l)
917		return -ENOENT;
918
919	q = strchrnul(p, ':');
920	if (*q != '\0')
921		options = q + 1;
922	l = q - p;
923
924	/* Get the node specified by stdout-path */
925	offset = fdt_path_offset_namelen(fdt, p, l);
926	if (offset < 0) {
927		pr_warn("earlycon: stdout-path %.*s not found\n", l, p);
928		return 0;
929	}
930
931	for (match = __earlycon_table; match < __earlycon_table_end; match++) {
932		if (!match->compatible[0])
933			continue;
934
935		if (fdt_node_check_compatible(fdt, offset, match->compatible))
936			continue;
937
938		ret = of_setup_earlycon(match, offset, options);
939		if (!ret || ret == -EALREADY)
940			return 0;
941	}
942	return -ENODEV;
943}
944#endif
945
946/*
947 * early_init_dt_scan_root - fetch the top level address and size cells
948 */
949int __init early_init_dt_scan_root(void)
950{
951	const __be32 *prop;
952	const void *fdt = initial_boot_params;
953	int node = fdt_path_offset(fdt, "/");
954
955	if (node < 0)
956		return -ENODEV;
957
958	dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
959	dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
960
961	prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
962	if (prop)
963		dt_root_size_cells = be32_to_cpup(prop);
964	pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
965
966	prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
967	if (prop)
968		dt_root_addr_cells = be32_to_cpup(prop);
969	pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
970
971	return 0;
972}
973
974u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
975{
976	const __be32 *p = *cellp;
977
978	*cellp = p + s;
979	return of_read_number(p, s);
980}
981
982/*
983 * early_init_dt_scan_memory - Look for and parse memory nodes
984 */
985int __init early_init_dt_scan_memory(void)
986{
987	int node, found_memory = 0;
988	const void *fdt = initial_boot_params;
989
990	fdt_for_each_subnode(node, fdt, 0) {
991		const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
992		const __be32 *reg, *endp;
993		int l;
994		bool hotpluggable;
995
996		/* We are scanning "memory" nodes only */
997		if (type == NULL || strcmp(type, "memory") != 0)
998			continue;
999
1000		if (!of_fdt_device_is_available(fdt, node))
1001			continue;
1002
1003		reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
1004		if (reg == NULL)
1005			reg = of_get_flat_dt_prop(node, "reg", &l);
1006		if (reg == NULL)
1007			continue;
1008
1009		endp = reg + (l / sizeof(__be32));
1010		hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
1011
1012		pr_debug("memory scan node %s, reg size %d,\n",
1013			 fdt_get_name(fdt, node, NULL), l);
1014
1015		while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1016			u64 base, size;
1017
1018			base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1019			size = dt_mem_next_cell(dt_root_size_cells, &reg);
1020
1021			if (size == 0)
1022				continue;
1023			pr_debug(" - %llx, %llx\n", base, size);
1024
1025			early_init_dt_add_memory_arch(base, size);
1026
1027			found_memory = 1;
1028
1029			if (!hotpluggable)
1030				continue;
1031
1032			if (memblock_mark_hotplug(base, size))
1033				pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
1034					base, base + size);
1035		}
1036	}
1037	return found_memory;
1038}
1039
1040int __init early_init_dt_scan_chosen(char *cmdline)
1041{
1042	int l, node;
1043	const char *p;
1044	const void *rng_seed;
1045	const void *fdt = initial_boot_params;
1046
1047	node = fdt_path_offset(fdt, "/chosen");
1048	if (node < 0)
1049		node = fdt_path_offset(fdt, "/chosen@0");
1050	if (node < 0)
1051		/* Handle the cmdline config options even if no /chosen node */
1052		goto handle_cmdline;
1053
1054	chosen_node_offset = node;
1055
1056	early_init_dt_check_for_initrd(node);
1057	early_init_dt_check_for_elfcorehdr(node);
1058
1059	rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
1060	if (rng_seed && l > 0) {
1061		add_bootloader_randomness(rng_seed, l);
1062
1063		/* try to clear seed so it won't be found. */
1064		fdt_nop_property(initial_boot_params, node, "rng-seed");
1065
1066		/* update CRC check value */
1067		of_fdt_crc32 = crc32_be(~0, initial_boot_params,
1068				fdt_totalsize(initial_boot_params));
1069	}
1070
1071	/* Retrieve command line */
1072	p = of_get_flat_dt_prop(node, "bootargs", &l);
1073	if (p != NULL && l > 0)
1074		strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
1075
1076handle_cmdline:
1077	/*
1078	 * CONFIG_CMDLINE is meant to be a default in case nothing else
1079	 * managed to set the command line, unless CONFIG_CMDLINE_FORCE
1080	 * is set in which case we override whatever was found earlier.
1081	 */
1082#ifdef CONFIG_CMDLINE
1083#if defined(CONFIG_CMDLINE_EXTEND)
1084	strlcat(cmdline, " ", COMMAND_LINE_SIZE);
1085	strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1086#elif defined(CONFIG_CMDLINE_FORCE)
1087	strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1088#else
1089	/* No arguments from boot loader, use kernel's  cmdl*/
1090	if (!((char *)cmdline)[0])
1091		strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1092#endif
1093#endif /* CONFIG_CMDLINE */
1094
1095	pr_debug("Command line is: %s\n", (char *)cmdline);
1096
1097	return 0;
1098}
1099
1100#ifndef MIN_MEMBLOCK_ADDR
1101#define MIN_MEMBLOCK_ADDR	__pa(PAGE_OFFSET)
1102#endif
1103#ifndef MAX_MEMBLOCK_ADDR
1104#define MAX_MEMBLOCK_ADDR	((phys_addr_t)~0)
1105#endif
1106
1107void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
1108{
1109	const u64 phys_offset = MIN_MEMBLOCK_ADDR;
1110
1111	if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
1112		pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1113			base, base + size);
1114		return;
1115	}
1116
1117	if (!PAGE_ALIGNED(base)) {
1118		size -= PAGE_SIZE - (base & ~PAGE_MASK);
1119		base = PAGE_ALIGN(base);
1120	}
1121	size &= PAGE_MASK;
1122
1123	if (base > MAX_MEMBLOCK_ADDR) {
1124		pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1125			base, base + size);
1126		return;
1127	}
1128
1129	if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
1130		pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
1131			((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
1132		size = MAX_MEMBLOCK_ADDR - base + 1;
1133	}
1134
1135	if (base + size < phys_offset) {
1136		pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1137			base, base + size);
1138		return;
1139	}
1140	if (base < phys_offset) {
1141		pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
1142			base, phys_offset);
1143		size -= phys_offset - base;
1144		base = phys_offset;
1145	}
1146	memblock_add(base, size);
1147}
1148
1149static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
1150{
1151	void *ptr = memblock_alloc(size, align);
1152
1153	if (!ptr)
1154		panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
1155		      __func__, size, align);
1156
1157	return ptr;
1158}
1159
1160bool __init early_init_dt_verify(void *params)
1161{
1162	if (!params)
1163		return false;
1164
1165	/* check device tree validity */
1166	if (fdt_check_header(params))
1167		return false;
1168
1169	/* Setup flat device-tree pointer */
1170	initial_boot_params = params;
1171	of_fdt_crc32 = crc32_be(~0, initial_boot_params,
1172				fdt_totalsize(initial_boot_params));
1173	return true;
1174}
1175
1176
1177void __init early_init_dt_scan_nodes(void)
1178{
1179	int rc;
1180
1181	/* Initialize {size,address}-cells info */
1182	early_init_dt_scan_root();
1183
1184	/* Retrieve various information from the /chosen node */
1185	rc = early_init_dt_scan_chosen(boot_command_line);
1186	if (rc)
1187		pr_warn("No chosen node found, continuing without\n");
1188
1189	/* Setup memory, calling early_init_dt_add_memory_arch */
1190	early_init_dt_scan_memory();
1191
1192	/* Handle linux,usable-memory-range property */
1193	early_init_dt_check_for_usable_mem_range();
1194}
1195
1196bool __init early_init_dt_scan(void *params)
1197{
1198	bool status;
1199
1200	status = early_init_dt_verify(params);
1201	if (!status)
1202		return false;
1203
1204	early_init_dt_scan_nodes();
1205	return true;
1206}
1207
1208static void *__init copy_device_tree(void *fdt)
1209{
1210	int size;
1211	void *dt;
1212
1213	size = fdt_totalsize(fdt);
1214	dt = early_init_dt_alloc_memory_arch(size,
1215					     roundup_pow_of_two(FDT_V17_SIZE));
1216
1217	if (dt)
1218		memcpy(dt, fdt, size);
1219
1220	return dt;
1221}
1222
1223/**
1224 * unflatten_device_tree - create tree of device_nodes from flat blob
1225 *
1226 * unflattens the device-tree passed by the firmware, creating the
1227 * tree of struct device_node. It also fills the "name" and "type"
1228 * pointers of the nodes so the normal device-tree walking functions
1229 * can be used.
1230 */
1231void __init unflatten_device_tree(void)
1232{
1233	void *fdt = initial_boot_params;
1234
1235	/* Don't use the bootloader provided DTB if ACPI is enabled */
1236	if (!acpi_disabled)
1237		fdt = NULL;
1238
1239	/*
1240	 * Populate an empty root node when ACPI is enabled or bootloader
1241	 * doesn't provide one.
1242	 */
1243	if (!fdt) {
1244		fdt = (void *) __dtb_empty_root_begin;
1245		/* fdt_totalsize() will be used for copy size */
1246		if (fdt_totalsize(fdt) >
1247		    __dtb_empty_root_end - __dtb_empty_root_begin) {
1248			pr_err("invalid size in dtb_empty_root\n");
1249			return;
1250		}
1251		of_fdt_crc32 = crc32_be(~0, fdt, fdt_totalsize(fdt));
1252		fdt = copy_device_tree(fdt);
1253	}
1254
1255	__unflatten_device_tree(fdt, NULL, &of_root,
1256				early_init_dt_alloc_memory_arch, false);
1257
1258	/* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
1259	of_alias_scan(early_init_dt_alloc_memory_arch);
1260
1261	unittest_unflatten_overlay_base();
1262}
1263
1264/**
1265 * unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob
1266 *
1267 * Copies and unflattens the device-tree passed by the firmware, creating the
1268 * tree of struct device_node. It also fills the "name" and "type"
1269 * pointers of the nodes so the normal device-tree walking functions
1270 * can be used. This should only be used when the FDT memory has not been
1271 * reserved such is the case when the FDT is built-in to the kernel init
1272 * section. If the FDT memory is reserved already then unflatten_device_tree
1273 * should be used instead.
1274 */
1275void __init unflatten_and_copy_device_tree(void)
1276{
1277	if (initial_boot_params)
1278		initial_boot_params = copy_device_tree(initial_boot_params);
1279
1280	unflatten_device_tree();
1281}
1282
1283#ifdef CONFIG_SYSFS
1284static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj,
1285			       struct bin_attribute *bin_attr,
1286			       char *buf, loff_t off, size_t count)
1287{
1288	memcpy(buf, initial_boot_params + off, count);
1289	return count;
1290}
1291
1292static int __init of_fdt_raw_init(void)
1293{
1294	static struct bin_attribute of_fdt_raw_attr =
1295		__BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0);
1296
1297	if (!initial_boot_params)
1298		return 0;
1299
1300	if (of_fdt_crc32 != crc32_be(~0, initial_boot_params,
1301				     fdt_totalsize(initial_boot_params))) {
1302		pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
1303		return 0;
1304	}
1305	of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
1306	return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
1307}
1308late_initcall(of_fdt_raw_init);
1309#endif
1310
1311#endif /* CONFIG_OF_EARLY_FLATTREE */
1312