1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) Paul Mackerras 1997.
4 *
5 * Updates for PPC64 by Todd Inglett, Dave Engebretsen & Peter Bergner.
6 */
7#include <stdarg.h>
8#include <stddef.h>
9#include "elf.h"
10#include "page.h"
11#include "string.h"
12#include "stdio.h"
13#include "ops.h"
14#include "reg.h"
15
16struct addr_range {
17	void *addr;
18	unsigned long size;
19};
20
21#undef DEBUG
22
23static struct addr_range prep_kernel(void)
24{
25	char elfheader[256];
26	unsigned char *vmlinuz_addr = (unsigned char *)_vmlinux_start;
27	unsigned long vmlinuz_size = _vmlinux_end - _vmlinux_start;
28	void *addr = 0;
29	struct elf_info ei;
30	long len;
31	int uncompressed_image = 0;
32
33	len = partial_decompress(vmlinuz_addr, vmlinuz_size,
34		elfheader, sizeof(elfheader), 0);
35	/* assume uncompressed data if -1 is returned */
36	if (len == -1) {
37		uncompressed_image = 1;
38		memcpy(elfheader, vmlinuz_addr, sizeof(elfheader));
39		printf("No valid compressed data found, assume uncompressed data\n\r");
40	}
41
42	if (!parse_elf64(elfheader, &ei) && !parse_elf32(elfheader, &ei))
43		fatal("Error: not a valid PPC32 or PPC64 ELF file!\n\r");
44
45	if (platform_ops.image_hdr)
46		platform_ops.image_hdr(elfheader);
47
48	/* We need to alloc the memsize: gzip will expand the kernel
49	 * text/data, then possible rubbish we don't care about. But
50	 * the kernel bss must be claimed (it will be zero'd by the
51	 * kernel itself)
52	 */
53	printf("Allocating 0x%lx bytes for kernel...\n\r", ei.memsize);
54
55	if (platform_ops.vmlinux_alloc) {
56		addr = platform_ops.vmlinux_alloc(ei.memsize);
57	} else {
58		/*
59		 * Check if the kernel image (without bss) would overwrite the
60		 * bootwrapper. The device tree has been moved in fdt_init()
61		 * to an area allocated with malloc() (somewhere past _end).
62		 */
63		if ((unsigned long)_start < ei.loadsize)
64			fatal("Insufficient memory for kernel at address 0!"
65			       " (_start=%p, uncompressed size=%08lx)\n\r",
66			       _start, ei.loadsize);
67
68		if ((unsigned long)_end < ei.memsize)
69			fatal("The final kernel image would overwrite the "
70					"device tree\n\r");
71	}
72
73	if (uncompressed_image) {
74		memcpy(addr, vmlinuz_addr + ei.elfoffset, ei.loadsize);
75		printf("0x%lx bytes of uncompressed data copied\n\r",
76		       ei.loadsize);
77		goto out;
78	}
79
80	/* Finally, decompress the kernel */
81	printf("Decompressing (0x%p <- 0x%p:0x%p)...\n\r", addr,
82	       vmlinuz_addr, vmlinuz_addr+vmlinuz_size);
83
84	len = partial_decompress(vmlinuz_addr, vmlinuz_size,
85		addr, ei.loadsize, ei.elfoffset);
86
87	if (len < 0)
88		fatal("Decompression failed with error code %ld\n\r", len);
89
90	if (len != ei.loadsize)
91		 fatal("Decompression error: got 0x%lx bytes, expected 0x%lx.\n\r",
92			 len, ei.loadsize);
93
94	printf("Done! Decompressed 0x%lx bytes\n\r", len);
95out:
96	flush_cache(addr, ei.loadsize);
97
98	return (struct addr_range){addr, ei.memsize};
99}
100
101static struct addr_range prep_initrd(struct addr_range vmlinux, void *chosen,
102				     unsigned long initrd_addr,
103				     unsigned long initrd_size)
104{
105	/* If we have an image attached to us, it overrides anything
106	 * supplied by the loader. */
107	if (&_initrd_end > &_initrd_start) {
108		printf("Attached initrd image at 0x%p-0x%p\n\r",
109		       _initrd_start, _initrd_end);
110		initrd_addr = (unsigned long)_initrd_start;
111		initrd_size = _initrd_end - _initrd_start;
112	} else if (initrd_size > 0) {
113		printf("Using loader supplied ramdisk at 0x%lx-0x%lx\n\r",
114		       initrd_addr, initrd_addr + initrd_size);
115	}
116
117	/* If there's no initrd at all, we're done */
118	if (! initrd_size)
119		return (struct addr_range){0, 0};
120
121	/*
122	 * If the initrd is too low it will be clobbered when the
123	 * kernel relocates to its final location.  In this case,
124	 * allocate a safer place and move it.
125	 */
126	if (initrd_addr < vmlinux.size) {
127		void *old_addr = (void *)initrd_addr;
128
129		printf("Allocating 0x%lx bytes for initrd ...\n\r",
130		       initrd_size);
131		initrd_addr = (unsigned long)malloc(initrd_size);
132		if (! initrd_addr)
133			fatal("Can't allocate memory for initial "
134			       "ramdisk !\n\r");
135		printf("Relocating initrd 0x%lx <- 0x%p (0x%lx bytes)\n\r",
136		       initrd_addr, old_addr, initrd_size);
137		memmove((void *)initrd_addr, old_addr, initrd_size);
138	}
139
140	printf("initrd head: 0x%lx\n\r", *((unsigned long *)initrd_addr));
141
142	/* Tell the kernel initrd address via device tree */
143	setprop_val(chosen, "linux,initrd-start", (u32)(initrd_addr));
144	setprop_val(chosen, "linux,initrd-end", (u32)(initrd_addr+initrd_size));
145
146	return (struct addr_range){(void *)initrd_addr, initrd_size};
147}
148
149#ifdef __powerpc64__
150static void prep_esm_blob(struct addr_range vmlinux, void *chosen)
151{
152	unsigned long esm_blob_addr, esm_blob_size;
153
154	/* Do we have an ESM (Enter Secure Mode) blob? */
155	if (&_esm_blob_end <= &_esm_blob_start)
156		return;
157
158	printf("Attached ESM blob at 0x%p-0x%p\n\r",
159	       _esm_blob_start, _esm_blob_end);
160	esm_blob_addr = (unsigned long)_esm_blob_start;
161	esm_blob_size = _esm_blob_end - _esm_blob_start;
162
163	/*
164	 * If the ESM blob is too low it will be clobbered when the
165	 * kernel relocates to its final location.  In this case,
166	 * allocate a safer place and move it.
167	 */
168	if (esm_blob_addr < vmlinux.size) {
169		void *old_addr = (void *)esm_blob_addr;
170
171		printf("Allocating 0x%lx bytes for esm_blob ...\n\r",
172		       esm_blob_size);
173		esm_blob_addr = (unsigned long)malloc(esm_blob_size);
174		if (!esm_blob_addr)
175			fatal("Can't allocate memory for ESM blob !\n\r");
176		printf("Relocating ESM blob 0x%lx <- 0x%p (0x%lx bytes)\n\r",
177		       esm_blob_addr, old_addr, esm_blob_size);
178		memmove((void *)esm_blob_addr, old_addr, esm_blob_size);
179	}
180
181	/* Tell the kernel ESM blob address via device tree. */
182	setprop_val(chosen, "linux,esm-blob-start", (u32)(esm_blob_addr));
183	setprop_val(chosen, "linux,esm-blob-end", (u32)(esm_blob_addr + esm_blob_size));
184}
185#else
186static inline void prep_esm_blob(struct addr_range vmlinux, void *chosen) { }
187#endif
188
189/* A buffer that may be edited by tools operating on a zImage binary so as to
190 * edit the command line passed to vmlinux (by setting /chosen/bootargs).
191 * The buffer is put in it's own section so that tools may locate it easier.
192 */
193static char cmdline[BOOT_COMMAND_LINE_SIZE]
194	__attribute__((__section__("__builtin_cmdline")));
195
196static void prep_cmdline(void *chosen)
197{
198	unsigned int getline_timeout = 5000;
199	int v;
200	int n;
201
202	/* Wait-for-input time */
203	n = getprop(chosen, "linux,cmdline-timeout", &v, sizeof(v));
204	if (n == sizeof(v))
205		getline_timeout = v;
206
207	if (cmdline[0] == '\0')
208		getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
209
210	printf("\n\rLinux/PowerPC load: %s", cmdline);
211
212	/* If possible, edit the command line */
213	if (console_ops.edit_cmdline && getline_timeout)
214		console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE, getline_timeout);
215
216	printf("\n\r");
217
218	/* Put the command line back into the devtree for the kernel */
219	setprop_str(chosen, "bootargs", cmdline);
220}
221
222struct platform_ops platform_ops;
223struct dt_ops dt_ops;
224struct console_ops console_ops;
225struct loader_info loader_info;
226
227void start(void)
228{
229	struct addr_range vmlinux, initrd;
230	kernel_entry_t kentry;
231	unsigned long ft_addr = 0;
232	void *chosen;
233
234	/* Do this first, because malloc() could clobber the loader's
235	 * command line.  Only use the loader command line if a
236	 * built-in command line wasn't set by an external tool */
237	if ((loader_info.cmdline_len > 0) && (cmdline[0] == '\0'))
238		memmove(cmdline, loader_info.cmdline,
239			min(loader_info.cmdline_len, BOOT_COMMAND_LINE_SIZE-1));
240
241	if (console_ops.open && (console_ops.open() < 0))
242		exit();
243	if (platform_ops.fixups)
244		platform_ops.fixups();
245
246	printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r",
247	       _start, get_sp());
248
249	/* Ensure that the device tree has a /chosen node */
250	chosen = finddevice("/chosen");
251	if (!chosen)
252		chosen = create_node(NULL, "chosen");
253
254	vmlinux = prep_kernel();
255	initrd = prep_initrd(vmlinux, chosen,
256			     loader_info.initrd_addr, loader_info.initrd_size);
257	prep_esm_blob(vmlinux, chosen);
258	prep_cmdline(chosen);
259
260	printf("Finalizing device tree...");
261	if (dt_ops.finalize)
262		ft_addr = dt_ops.finalize();
263	if (ft_addr)
264		printf(" flat tree at 0x%lx\n\r", ft_addr);
265	else
266		printf(" using OF tree (promptr=%p)\n\r", loader_info.promptr);
267
268	if (console_ops.close)
269		console_ops.close();
270
271	kentry = (kernel_entry_t) vmlinux.addr;
272	if (ft_addr) {
273		if(platform_ops.kentry)
274			platform_ops.kentry(ft_addr, vmlinux.addr);
275		else
276			kentry(ft_addr, 0, NULL);
277	}
278	else
279		kentry((unsigned long)initrd.addr, initrd.size,
280		       loader_info.promptr);
281
282	/* console closed so printf in fatal below may not work */
283	fatal("Error: Linux kernel returned to zImage boot wrapper!\n\r");
284}
285