reloc.c revision 97026
1/*-
2 * Copyright 1996, 1997, 1998, 1999 John D. Polstra.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * $FreeBSD: head/libexec/rtld-elf/ia64/reloc.c 97026 2002-05-21 00:04:08Z marcel $
26 */
27
28/*
29 * Dynamic linker for ELF.
30 *
31 * John Polstra <jdp@polstra.com>.
32 */
33
34#include <sys/param.h>
35#include <sys/mman.h>
36#include <machine/ia64_cpu.h>
37
38#include <dlfcn.h>
39#include <err.h>
40#include <errno.h>
41#include <fcntl.h>
42#include <stdarg.h>
43#include <stdio.h>
44#include <stdlib.h>
45#include <string.h>
46#include <unistd.h>
47
48#include "debug.h"
49#include "rtld.h"
50
51extern Elf_Dyn _DYNAMIC;
52
53/*
54 * Macros for loading/storing unaligned 64-bit values.  These are
55 * needed because relocations can point to unaligned data.  This
56 * occurs in the DWARF2 exception frame tables generated by the
57 * compiler, for instance.
58 *
59 * We don't use these when relocating jump slots and GOT entries,
60 * since they are guaranteed to be aligned.
61 *
62 * XXX dfr stub for now.
63 */
64#define load64(p)	(*(u_int64_t *) (p))
65#define store64(p, v)	(*(u_int64_t *) (p) = (v))
66
67/* Allocate an @fptr. */
68
69#define FPTR_CHUNK_SIZE		64
70
71struct fptr_chunk {
72	struct fptr fptrs[FPTR_CHUNK_SIZE];
73};
74
75static struct fptr_chunk first_chunk;
76static struct fptr_chunk *current_chunk = &first_chunk;
77static struct fptr *next_fptr = &first_chunk.fptrs[0];
78static struct fptr *last_fptr = &first_chunk.fptrs[FPTR_CHUNK_SIZE];
79
80/*
81 * We use static storage initially so that we don't have to call
82 * malloc during init_rtld().
83 */
84static struct fptr *
85alloc_fptr(Elf_Addr target, Elf_Addr gp)
86{
87	struct fptr* fptr;
88
89	if (next_fptr == last_fptr) {
90		current_chunk = malloc(sizeof(struct fptr_chunk));
91		next_fptr = &current_chunk->fptrs[0];
92		last_fptr = &current_chunk->fptrs[FPTR_CHUNK_SIZE];
93	}
94	fptr = next_fptr;
95	next_fptr++;
96	fptr->target = target;
97	fptr->gp = gp;
98	return fptr;
99}
100
101/* Relocate a non-PLT object with addend. */
102static int
103reloc_non_plt_obj(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela,
104		  SymCache *cache, struct fptr **fptrs)
105{
106	Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset);
107
108	switch (ELF_R_TYPE(rela->r_info)) {
109	case R_IA64_REL64LSB:
110		/*
111		 * We handle rtld's relocations in rtld_start.S
112		 */
113		if (obj != obj_rtld)
114			store64(where,
115				load64(where) + (Elf_Addr) obj->relocbase);
116		break;
117
118	case R_IA64_DIR64LSB: {
119		const Elf_Sym *def;
120		const Obj_Entry *defobj;
121		Elf_Addr target;
122
123		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
124				  false, cache);
125		if (def == NULL)
126			return -1;
127
128		target = (def->st_shndx != SHN_UNDEF)
129		    ? (Elf_Addr)(defobj->relocbase + def->st_value) : 0;
130		store64(where, target + rela->r_addend);
131		break;
132	}
133
134	case R_IA64_FPTR64LSB: {
135		/*
136		 * We have to make sure that all @fptr references to
137		 * the same function are identical so that code can
138		 * compare function pointers. We actually only bother
139		 * to ensure this within a single object. If the
140		 * caller's alloca failed, we don't even ensure that.
141		 */
142		const Elf_Sym *def;
143		const Obj_Entry *defobj;
144		struct fptr *fptr = 0;
145		Elf_Addr target, gp;
146
147		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
148				  false, cache);
149		if (def == NULL)
150			return -1;
151
152		if (def->st_shndx != SHN_UNDEF) {
153			target = (Elf_Addr)(defobj->relocbase + def->st_value);
154			gp = (Elf_Addr)defobj->pltgot;
155
156			/*
157			 * Find the @fptr, using fptrs as a helper.
158			 */
159			if (fptrs)
160				fptr = fptrs[ELF_R_SYM(rela->r_info)];
161			if (!fptr) {
162				fptr = alloc_fptr(target, gp);
163				if (fptrs)
164					fptrs[ELF_R_SYM(rela->r_info)] = fptr;
165			}
166		} else
167			fptr = NULL;
168
169		store64(where, (Elf_Addr)fptr);
170		break;
171	}
172
173	default:
174		_rtld_error("%s: Unsupported relocation type %d"
175			    " in non-PLT relocations\n", obj->path,
176			    ELF_R_TYPE(rela->r_info));
177		return -1;
178	}
179
180	return(0);
181}
182
183/* Process the non-PLT relocations. */
184int
185reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld)
186{
187	const Elf_Rel *rellim;
188	const Elf_Rel *rel;
189	const Elf_Rela *relalim;
190	const Elf_Rela *rela;
191	SymCache *cache;
192	struct fptr **fptrs;
193
194	cache = (SymCache *)alloca(obj->nchains * sizeof(SymCache));
195	if (cache != NULL)
196		memset(cache, 0, obj->nchains * sizeof(SymCache));
197
198	/*
199	 * When relocating rtld itself, we need to avoid using malloc.
200	 */
201        if (obj == obj_rtld)
202		fptrs = (struct fptr **)
203			alloca(obj->nchains * sizeof(struct fptr *));
204	else
205		fptrs = (struct fptr **)
206			malloc(obj->nchains * sizeof(struct fptr *));
207
208	if (fptrs == NULL)
209		return -1;
210	memset(fptrs, 0, obj->nchains * sizeof(struct fptr *));
211
212	/* Perform relocations without addend if there are any: */
213	rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize);
214	for (rel = obj->rel;  obj->rel != NULL && rel < rellim;  rel++) {
215		Elf_Rela locrela;
216
217		locrela.r_info = rel->r_info;
218		locrela.r_offset = rel->r_offset;
219		locrela.r_addend = 0;
220		if (reloc_non_plt_obj(obj_rtld, obj, &locrela, cache, fptrs))
221			return -1;
222	}
223
224	/* Perform relocations with addend if there are any: */
225	relalim = (const Elf_Rela *) ((caddr_t) obj->rela + obj->relasize);
226	for (rela = obj->rela;  obj->rela != NULL && rela < relalim;  rela++) {
227		if (reloc_non_plt_obj(obj_rtld, obj, rela, cache, fptrs))
228			return -1;
229	}
230
231	/*
232	 * Remember the fptrs in case of later calls to dlsym(). Don't
233	 * bother for rtld - we will lazily create a table in
234	 * make_function_pointer(). At this point we still can't risk
235	 * calling malloc().
236	 */
237	if (obj != obj_rtld)
238		obj->priv = fptrs;
239	else
240		obj->priv = NULL;
241
242	return 0;
243}
244
245/* Process the PLT relocations. */
246int
247reloc_plt(Obj_Entry *obj)
248{
249	/* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */
250	if (obj->pltrelsize != 0) {
251		const Elf_Rel *rellim;
252		const Elf_Rel *rel;
253
254		rellim = (const Elf_Rel *)
255			((char *)obj->pltrel + obj->pltrelsize);
256		for (rel = obj->pltrel;  rel < rellim;  rel++) {
257			Elf_Addr *where;
258
259			assert(ELF_R_TYPE(rel->r_info) == R_IA64_IPLTLSB);
260
261			/* Relocate the @fptr pointing into the PLT. */
262			where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
263			*where += (Elf_Addr)obj->relocbase;
264		}
265	} else {
266		const Elf_Rela *relalim;
267		const Elf_Rela *rela;
268
269		relalim = (const Elf_Rela *)
270			((char *)obj->pltrela + obj->pltrelasize);
271		for (rela = obj->pltrela;  rela < relalim;  rela++) {
272			Elf_Addr *where;
273
274			assert(ELF_R_TYPE(rela->r_info) == R_IA64_IPLTLSB);
275
276			/* Relocate the @fptr pointing into the PLT. */
277			where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
278			*where += (Elf_Addr)obj->relocbase;
279		}
280	}
281	return 0;
282}
283
284/* Relocate the jump slots in an object. */
285int
286reloc_jmpslots(Obj_Entry *obj)
287{
288	if (obj->jmpslots_done)
289		return 0;
290	/* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */
291	if (obj->pltrelsize != 0) {
292		const Elf_Rel *rellim;
293		const Elf_Rel *rel;
294
295		rellim = (const Elf_Rel *)
296			((char *)obj->pltrel + obj->pltrelsize);
297		for (rel = obj->pltrel;  rel < rellim;  rel++) {
298			Elf_Addr *where;
299			const Elf_Sym *def;
300			const Obj_Entry *defobj;
301
302			assert(ELF_R_TYPE(rel->r_info) == R_IA64_IPLTLSB);
303			where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
304			def = find_symdef(ELF_R_SYM(rel->r_info), obj,
305					  &defobj, true, NULL);
306			if (def == NULL)
307				return -1;
308			reloc_jmpslot(where,
309				      (Elf_Addr)(defobj->relocbase
310						 + def->st_value),
311				      defobj);
312		}
313	} else {
314		const Elf_Rela *relalim;
315		const Elf_Rela *rela;
316
317		relalim = (const Elf_Rela *)
318			((char *)obj->pltrela + obj->pltrelasize);
319		for (rela = obj->pltrela;  rela < relalim;  rela++) {
320			Elf_Addr *where;
321			const Elf_Sym *def;
322			const Obj_Entry *defobj;
323
324			where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
325			def = find_symdef(ELF_R_SYM(rela->r_info), obj,
326					  &defobj, true, NULL);
327			if (def == NULL)
328				return -1;
329			reloc_jmpslot(where,
330				      (Elf_Addr)(defobj->relocbase
331						 + def->st_value),
332				      defobj);
333		}
334	}
335	obj->jmpslots_done = true;
336	return 0;
337}
338
339/* Fixup the jump slot at "where" to transfer control to "target". */
340Elf_Addr
341reloc_jmpslot(Elf_Addr *where, Elf_Addr target, const Obj_Entry *obj)
342{
343	Elf_Addr stubaddr;
344
345	dbg(" reloc_jmpslot: where=%p, target=%p, gp=%p",
346	    (void *)where, (void *)target, (void *)obj->pltgot);
347	stubaddr = *where;
348	if (stubaddr != target) {
349
350		/*
351		 * Point this @fptr directly at the target. Update the
352		 * gp value first so that we don't break another cpu
353		 * which is currently executing the PLT entry.
354		 */
355		where[1] = (Elf_Addr) obj->pltgot;
356		ia64_mf();
357		where[0] = target;
358		ia64_mf();
359	}
360
361	/*
362	 * The caller needs an @fptr for the adjusted entry. The PLT
363	 * entry serves this purpose nicely.
364	 */
365	return (Elf_Addr) where;
366}
367
368/*
369 * XXX ia64 doesn't seem to have copy relocations.
370 *
371 * Returns 0 on success, -1 on failure.
372 */
373int
374do_copy_relocations(Obj_Entry *dstobj)
375{
376
377	return 0;
378}
379
380/*
381 * Return the @fptr representing a given function symbol.
382 */
383void *
384make_function_pointer(const Elf_Sym *sym, const Obj_Entry *obj)
385{
386	struct fptr **fptrs = obj->priv;
387	int index = sym - obj->symtab;
388
389	if (!fptrs) {
390		/*
391		 * This should only happen for something like
392		 * dlsym("dlopen"). Actually, I'm not sure it can ever
393		 * happen.
394		 */
395		fptrs = (struct fptr **)
396			malloc(obj->nchains * sizeof(struct fptr *));
397		memset(fptrs, 0, obj->nchains * sizeof(struct fptr *));
398		((Obj_Entry*) obj)->priv = fptrs;
399	}
400	if (!fptrs[index]) {
401		Elf_Addr target, gp;
402		target = (Elf_Addr) (obj->relocbase + sym->st_value);
403		gp = (Elf_Addr) obj->pltgot;
404		fptrs[index] = alloc_fptr(target, gp);
405	}
406	return fptrs[index];
407}
408
409void
410call_initfini_pointer(const Obj_Entry *obj, Elf_Addr target)
411{
412	struct fptr fptr;
413
414	fptr.gp = (Elf_Addr) obj->pltgot;
415	fptr.target = target;
416	dbg(" initfini: target=%p, gp=%p",
417	    (void *) fptr.target, (void *) fptr.gp);
418	((InitFunc) &fptr)();
419}
420
421/* Initialize the special PLT entries. */
422void
423init_pltgot(Obj_Entry *obj)
424{
425	const Elf_Dyn *dynp;
426	Elf_Addr *pltres = 0;
427
428	/*
429	 * Find the PLT RESERVE section.
430	 */
431	for (dynp = obj->dynamic;  dynp->d_tag != DT_NULL;  dynp++) {
432		if (dynp->d_tag == DT_IA64_PLT_RESERVE)
433			pltres = (u_int64_t *)
434				(obj->relocbase + dynp->d_un.d_ptr);
435	}
436	if (!pltres)
437		errx(1, "Can't find DT_IA64_PLT_RESERVE entry");
438
439	/*
440	 * The PLT RESERVE section is used to get values to pass to
441	 * _rtld_bind when lazy binding.
442	 */
443	pltres[0] = (Elf_Addr) obj;
444	pltres[1] = FPTR_TARGET(_rtld_bind_start);
445	pltres[2] = FPTR_GP(_rtld_bind_start);
446}
447