kobj_boot.c revision 580:70dfd36fd02c
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * Bootstrap the linker/loader.
30 */
31
32#include <sys/types.h>
33#include <sys/bootconf.h>
34#include <sys/link.h>
35#include <sys/auxv.h>
36#include <sys/kobj.h>
37#include <sys/elf.h>
38#include <sys/bootsvcs.h>
39#include <sys/kobj_impl.h>
40
41#if !defined(__GNUC__)
42
43/*
44 * We don't use the global offset table, but
45 * ld may throw in an UNDEFINED reference in
46 * our symbol table.
47 */
48#if !defined(_KERNEL)
49#pragma weak _GLOBAL_OFFSET_TABLE_
50#endif
51
52#else
53
54/*
55 * We -do- use the global offset table, but only by
56 * accident -- when you tell gcc to emit PIC code,
57 * it -always- generates a reference to the GOT in
58 * a register, even if the compilation unit never
59 * uses it.
60 *
61 * Rumoured to be fixed in a later version of gcc..
62 */
63
64long	_GLOBAL_OFFSET_TABLE_[1];
65
66#endif
67
68#define	roundup		ALIGN
69
70#define	MAXSECT		64	/* max # of sects. */
71
72#define	HIBITS		0xffffffff80000000	/* upper 32 bits */
73
74/*
75 * Boot transfers control here. At this point,
76 * we haven't relocated our own symbols, so the
77 * world (as we know it) is pretty small right now.
78 */
79void
80_kobj_boot(
81	struct boot_syscalls *syscallp,
82	void *dvec,
83	struct bootops *bootops,
84	Boot *ebp)
85{
86	Shdr *section[MAXSECT];	/* cache */
87	val_t bootaux[BA_NUM];
88	struct bootops *bop;
89	Phdr *phdr;
90	auxv_t *auxv = NULL;
91	Shdr *sh;
92	Half sh_num;
93	ulong_t end, edata = 0;
94	int i;
95
96	bop = (dvec) ? *(struct bootops **)bootops : bootops;
97
98	for (i = 0; i < BA_NUM; i++)
99		bootaux[i].ba_val = NULL;
100
101	/*
102	 * Check the bootstrap vector.
103	 */
104	for (; ebp->eb_tag != EB_NULL; ebp++) {
105		switch (ebp->eb_tag) {
106		case EB_AUXV:
107			auxv = (auxv_t *)ebp->eb_un.eb_ptr;
108			break;
109		case EB_DYNAMIC:
110			bootaux[BA_DYNAMIC].ba_ptr = (void *)ebp->eb_un.eb_ptr;
111			break;
112		default:
113			break;
114		}
115	}
116
117	if (auxv == NULL)
118		return;
119
120	/*
121	 * Now the aux vector.
122	 */
123	for (; auxv->a_type != AT_NULL; auxv++) {
124		switch (auxv->a_type) {
125		case AT_PHDR:
126			bootaux[BA_PHDR].ba_ptr = auxv->a_un.a_ptr;
127			break;
128		case AT_PHENT:
129			bootaux[BA_PHENT].ba_val = auxv->a_un.a_val;
130			break;
131		case AT_PHNUM:
132			bootaux[BA_PHNUM].ba_val = auxv->a_un.a_val;
133			break;
134		case AT_PAGESZ:
135			bootaux[BA_PAGESZ].ba_val = auxv->a_un.a_val;
136			break;
137		case AT_SUN_LDELF:
138			bootaux[BA_LDELF].ba_ptr = auxv->a_un.a_ptr;
139			break;
140		case AT_SUN_LDSHDR:
141			bootaux[BA_LDSHDR].ba_ptr = auxv->a_un.a_ptr;
142			break;
143		case AT_SUN_LDNAME:
144			bootaux[BA_LDNAME].ba_ptr = auxv->a_un.a_ptr;
145			break;
146		case AT_SUN_LPAGESZ:
147			bootaux[BA_LPAGESZ].ba_val = auxv->a_un.a_val;
148			break;
149		case AT_SUN_CPU:
150			bootaux[BA_CPU].ba_ptr = auxv->a_un.a_ptr;
151			break;
152		case AT_SUN_MMU:
153			bootaux[BA_MMU].ba_ptr = auxv->a_un.a_ptr;
154			break;
155		case AT_ENTRY:
156			bootaux[BA_ENTRY].ba_ptr = auxv->a_un.a_ptr;
157			break;
158		default:
159			break;
160		}
161	}
162
163
164	sh = (Shdr *)bootaux[BA_LDSHDR].ba_ptr;
165	sh_num = ((Ehdr *)bootaux[BA_LDELF].ba_ptr)->e_shnum;
166	/*
167	 * Make sure we won't overflow stack allocated cache
168	 */
169	if (sh_num >= MAXSECT)
170		return;
171
172	/*
173	 * Build cache table for section addresses.
174	 */
175	for (i = 0; i < sh_num; i++) {
176		section[i] = sh++;
177	}
178
179	/*
180	 * Find the end of data
181	 * (to allocate bss)
182	 */
183	phdr = (Phdr *)bootaux[BA_PHDR].ba_ptr;
184
185	for (i = 0; i < bootaux[BA_PHNUM].ba_val; i++) {
186		if (phdr->p_type == PT_LOAD &&
187		    (phdr->p_flags & PF_W) && (phdr->p_flags & PF_X)) {
188			edata = end = phdr->p_vaddr + phdr->p_memsz;
189			break;
190		}
191		phdr = (Phdr *)((ulong_t)phdr + bootaux[BA_PHENT].ba_val);
192	}
193	if (edata == NULL)
194		return;
195
196	/*
197	 * Find the symbol table, and then loop
198	 * through the symbols adjusting their
199	 * values to reflect where the sections
200	 * were loaded.
201	 */
202	for (i = 1; i < sh_num; i++) {
203		Shdr *shp;
204		Sym *sp;
205		ulong_t off;
206
207		shp = section[i];
208		if (shp->sh_type != SHT_SYMTAB)
209			continue;
210
211		for (off = 0; off < shp->sh_size; off += shp->sh_entsize) {
212			sp = (Sym *)(shp->sh_addr + off);
213
214			if (sp->st_shndx == SHN_ABS ||
215			    sp->st_shndx == SHN_UNDEF)
216				continue;
217
218			/*
219			 * Assign the addresses for COMMON
220			 * symbols even though we haven't
221			 * actually allocated bss yet.
222			 */
223			if (sp->st_shndx == SHN_COMMON) {
224				end = ALIGN(end, sp->st_value);
225				sp->st_value = end;
226				/*
227				 * Squirrel it away for later.
228				 */
229				if (bootaux[BA_BSS].ba_val == 0)
230					bootaux[BA_BSS].ba_val = end;
231				end += sp->st_size;
232				continue;
233			} else if (sp->st_shndx > (Half)sh_num) {
234				BSVC_PUTCHAR(syscallp, '>');
235				return;
236			}
237
238			/*
239			 * Symbol's new address.
240			 */
241			sp->st_value += section[sp->st_shndx]->sh_addr;
242		}
243	}
244
245	/*
246	 * Allocate bss for COMMON, if any.
247	 */
248	if (end > edata) {
249		unsigned long va, bva;
250		unsigned long asize;
251		unsigned long align;
252
253		if (bootaux[BA_LPAGESZ].ba_val) {
254			asize = bootaux[BA_LPAGESZ].ba_val;
255			align = bootaux[BA_LPAGESZ].ba_val;
256		} else {
257			asize = bootaux[BA_PAGESZ].ba_val;
258			align = BO_NO_ALIGN;
259		}
260		va = roundup(edata, asize);
261		bva = roundup(end, asize);
262
263		if (bva > va) {
264			bva = (unsigned long)BOP_ALLOC(bop, (caddr_t)va,
265				bva - va, align);
266			if (bva == NULL)
267				return;
268		}
269		/*
270		 * Zero it.
271		 */
272		for (va = edata; va < end; va++)
273			*(char *)va = 0;
274		/*
275		 * Update the size of data.
276		 */
277		phdr->p_memsz += (end - edata);
278	}
279
280	/*
281	 * Relocate our own symbols.  We'll handle the
282	 * undefined symbols later.
283	 */
284	for (i = 1; i < sh_num; i++) {
285		Shdr *rshp, *shp, *ssp;
286		unsigned long baseaddr, reladdr, rend;
287		long relocsize;
288
289		rshp = section[i];
290
291		if (rshp->sh_type != SHT_RELA)
292			continue;
293		/*
294		 * Get the section being relocated
295		 * and the symbol table.
296		 */
297		shp = section[rshp->sh_info];
298		ssp = section[rshp->sh_link];
299
300		/*
301		 * Only perform relocations against allocatable
302		 * sections.
303		 */
304		if ((shp->sh_flags & SHF_ALLOC) == 0)
305			continue;
306
307		reladdr = rshp->sh_addr;
308		baseaddr = shp->sh_addr;
309		rend = reladdr + rshp->sh_size;
310		relocsize = rshp->sh_entsize;
311		/*
312		 * Loop through relocations.
313		 */
314
315		while (reladdr < rend) {
316			Sym *symref;
317			Rela *reloc;
318			unsigned long stndx;
319			unsigned long off, *offptr;
320			long addend, value;
321			unsigned long symoff, symsize;
322			int rtype;
323
324			reloc = (Rela *)reladdr;
325			off = reloc->r_offset;
326			addend = (long)reloc->r_addend;
327			rtype = ELF_R_TYPE(reloc->r_info);
328			stndx = ELF_R_SYM(reloc->r_info);
329
330			reladdr += relocsize;
331
332			if (rtype == R_AMD64_NONE)
333				continue;
334
335			off += baseaddr;
336
337			symsize = ssp->sh_entsize;
338			symoff = stndx * symsize;
339
340			/*
341			 * Check for bad symbol index.
342			 */
343			if (symoff > ssp->sh_size)
344				return;
345
346			symref = (Sym *)(ssp->sh_addr + symoff);
347
348
349			/*
350			 * Just bind our own symbols at this point.
351			 */
352			if (symref->st_shndx == SHN_UNDEF)
353				continue;
354
355			value = symref->st_value;
356
357			if ((rtype == R_AMD64_PC32) ||
358			    (rtype == R_AMD64_PLT32))
359				/*
360				 * If PC-relative, subtract ref addr.
361				 */
362				value -= off;
363			else if (rtype == R_AMD64_32) {
364				/*
365				 * It's illegal to have any HIBITS
366				 * set for R_AMD64_32 reloc.
367				 */
368				if (value & HIBITS) {
369					BSVC_PUTCHAR(syscallp, 'h');
370					return;
371				}
372			} else if (rtype == R_AMD64_32S) {
373				/*
374				 * All HIBITS for R_AMD64_32S
375				 * *must* be set.
376				 */
377				if ((value & HIBITS) != HIBITS) {
378					BSVC_PUTCHAR(syscallp, 'H');
379					return;
380				}
381			}
382
383			offptr = (unsigned long *)off;
384			/*
385			 * insert value calculated at reference point
386			 * 2 cases - normal byte order aligned, normal byte
387			 * order unaligned.
388			 */
389			switch (rtype) {
390			case R_AMD64_64:
391				*(unsigned long *)offptr = value + addend;
392				break;
393			case R_AMD64_PC32:
394			case R_AMD64_32S:
395			case R_AMD64_PLT32:
396				*(uint_t *)offptr = (uint_t)(value + addend);
397				break;
398			case R_AMD64_GOT32:
399				BSVC_PUTCHAR(syscallp, 'G');
400				return;
401			case R_AMD64_32:
402				return;
403			default:
404				BSVC_PUTCHAR(syscallp, 'R');
405				return;
406			}
407			/*
408			 * We only need to do it once.
409			 */
410			reloc->r_info = ELF_R_INFO(stndx, R_AMD64_NONE);
411		} /* while */
412	}
413
414	/*
415	 * Done relocating all of our *defined*
416	 * symbols, so we hand off.
417	 */
418	kobj_init(syscallp, dvec, bootops, bootaux);
419}
420