1/*-
2 * Copyright (c) 2005 Olivier Houchard.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 */
24
25#include <sys/cdefs.h>
26__FBSDID("$FreeBSD$");
27#include <machine/asm.h>
28#include <sys/param.h>
29
30#if ELFSIZE == 64
31#include <sys/elf64.h>
32#else
33#include <sys/elf32.h>
34#endif
35
36/*
37 * Since we are compiled outside of the normal kernel build process, we
38 * need to include opt_global.h manually.
39 */
40#include "opt_global.h"
41
42#include <sys/inflate.h>
43#include <machine/elf.h>
44#include <machine/cpufunc.h>
45#include <machine/stdarg.h>
46
47#ifndef KERNNAME
48#error Kernel name not provided
49#endif
50
51extern char kernel_start[];
52extern char kernel_end[];
53
54static __inline void *
55memcpy(void *dst, const void *src, size_t len)
56{
57	const char *s = src;
58    	char *d = dst;
59
60	while (len) {
61		if (0 && len >= 4 && !((vm_offset_t)d & 3) &&
62		    !((vm_offset_t)s & 3)) {
63			*(uint32_t *)d = *(uint32_t *)s;
64			s += 4;
65			d += 4;
66			len -= 4;
67		} else {
68			*d++ = *s++;
69			len--;
70		}
71	}
72	return (dst);
73}
74
75static __inline void
76bzero(void *addr, size_t count)
77{
78	char *tmp = (char *)addr;
79
80	while (count > 0) {
81		if (count >= 4 && !((vm_offset_t)tmp & 3)) {
82			*(uint32_t *)tmp = 0;
83			tmp += 4;
84			count -= 4;
85		} else {
86			*tmp = 0;
87			tmp++;
88			count--;
89		}
90	}
91}
92
93/*
94 * Convert number to pointer, truncate on 64->32 case, sign extend
95 * in 32->64 case
96 */
97#define	mkptr(x)	((void *)(intptr_t)(int)(x))
98
99/*
100 * Relocate PT_LOAD segements of kernel ELF image to their respective
101 * virtual addresses and return entry point
102 */
103void *
104load_kernel(void * kstart)
105{
106#if ELFSIZE == 64
107	Elf64_Ehdr *eh;
108	Elf64_Phdr phdr[64] /* XXX */;
109	Elf64_Shdr shdr[64] /* XXX */;
110#else
111	Elf32_Ehdr *eh;
112	Elf32_Phdr phdr[64] /* XXX */;
113	Elf32_Shdr shdr[64] /* XXX */;
114#endif
115	int i, j;
116	void *entry_point;
117	vm_offset_t loadend = 0;
118	intptr_t lastaddr;
119	int symtabindex = -1;
120	int symstrindex = -1;
121	Elf_Size tmp;
122
123#if ELFSIZE == 64
124	eh = (Elf64_Ehdr *)kstart;
125#else
126	eh = (Elf32_Ehdr *)kstart;
127#endif
128	entry_point = mkptr(eh->e_entry);
129	memcpy(phdr, (void *)(kstart + eh->e_phoff),
130	    eh->e_phnum * sizeof(phdr[0]));
131
132	memcpy(shdr, (void *)(kstart + eh->e_shoff),
133	    sizeof(*shdr) * eh->e_shnum);
134
135	if (eh->e_shnum * eh->e_shentsize != 0 && eh->e_shoff != 0) {
136		for (i = 0; i < eh->e_shnum; i++) {
137			if (shdr[i].sh_type == SHT_SYMTAB) {
138				/*
139				 * XXX: check if .symtab is in PT_LOAD?
140				 */
141				if (shdr[i].sh_offset != 0 &&
142				    shdr[i].sh_size != 0) {
143					symtabindex = i;
144					symstrindex = shdr[i].sh_link;
145				}
146			}
147		}
148	}
149
150	/*
151	 * Copy loadable segments
152	 */
153	for (i = 0; i < eh->e_phnum; i++) {
154		volatile char c;
155
156		if (phdr[i].p_type != PT_LOAD)
157			continue;
158
159		memcpy(mkptr(phdr[i].p_vaddr),
160		    (void*)(kstart + phdr[i].p_offset), phdr[i].p_filesz);
161
162		/* Clean space from oversized segments, eg: bss. */
163		if (phdr[i].p_filesz < phdr[i].p_memsz)
164			bzero(mkptr(phdr[i].p_vaddr + phdr[i].p_filesz),
165			    phdr[i].p_memsz - phdr[i].p_filesz);
166
167		if (loadend < phdr[i].p_vaddr + phdr[i].p_memsz)
168			loadend = phdr[i].p_vaddr + phdr[i].p_memsz;
169	}
170
171	/* Now grab the symbol tables. */
172	lastaddr = (intptr_t)(int)loadend;
173	if (symtabindex >= 0 && symstrindex >= 0) {
174		tmp = SYMTAB_MAGIC;
175		memcpy((void *)lastaddr, &tmp, sizeof(tmp));
176		lastaddr += sizeof(Elf_Size);
177		tmp = shdr[symtabindex].sh_size +
178		    shdr[symstrindex].sh_size + 2*sizeof(Elf_Size);
179		memcpy((void *)lastaddr, &tmp, sizeof(tmp));
180		lastaddr += sizeof(Elf_Size);
181		/* .symtab size */
182		tmp = shdr[symtabindex].sh_size;
183		memcpy((void *)lastaddr, &tmp, sizeof(tmp));
184		lastaddr += sizeof(shdr[symtabindex].sh_size);
185		/* .symtab data */
186		memcpy((void*)lastaddr,
187		    shdr[symtabindex].sh_offset + kstart,
188		    shdr[symtabindex].sh_size);
189		lastaddr += shdr[symtabindex].sh_size;
190
191		/* .strtab size */
192		tmp = shdr[symstrindex].sh_size;
193		memcpy((void *)lastaddr, &tmp, sizeof(tmp));
194		lastaddr += sizeof(shdr[symstrindex].sh_size);
195
196		/* .strtab data */
197		memcpy((void*)lastaddr,
198		    shdr[symstrindex].sh_offset + kstart,
199		    shdr[symstrindex].sh_size);
200	} else {
201		/* Do not take any chances */
202		tmp = 0;
203		memcpy((void *)lastaddr, &tmp, sizeof(tmp));
204	}
205
206	return entry_point;
207}
208
209void
210_startC(register_t a0, register_t a1, register_t a2, register_t a3)
211{
212	unsigned int * code;
213	int i;
214	void (*entry_point)(register_t, register_t, register_t, register_t);
215
216	/*
217	 * Relocate segment to the predefined memory location
218	 * Most likely it will be KSEG0/KSEG1 address
219	 */
220	entry_point = load_kernel(kernel_start);
221
222	/* Pass saved registers to original _start */
223	entry_point(a0, a1, a2, a3);
224}
225