reloc.c revision 95540
1/*-
2 * Copyright 1996, 1997, 1998, 1999 John D. Polstra.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * $FreeBSD: head/libexec/rtld-elf/ia64/reloc.c 95540 2002-04-27 02:53:31Z marcel $
26 */
27
28/*
29 * Dynamic linker for ELF.
30 *
31 * John Polstra <jdp@polstra.com>.
32 */
33
34#include <sys/param.h>
35#include <sys/mman.h>
36
37#include <dlfcn.h>
38#include <err.h>
39#include <errno.h>
40#include <fcntl.h>
41#include <stdarg.h>
42#include <stdio.h>
43#include <stdlib.h>
44#include <string.h>
45#include <unistd.h>
46
47#include "debug.h"
48#include "rtld.h"
49
50extern Elf_Dyn _DYNAMIC;
51
52/*
53 * Macros for loading/storing unaligned 64-bit values.  These are
54 * needed because relocations can point to unaligned data.  This
55 * occurs in the DWARF2 exception frame tables generated by the
56 * compiler, for instance.
57 *
58 * We don't use these when relocating jump slots and GOT entries,
59 * since they are guaranteed to be aligned.
60 *
61 * XXX dfr stub for now.
62 */
63#define load64(p)	(*(u_int64_t *) (p))
64#define store64(p, v)	(*(u_int64_t *) (p) = (v))
65
66/* Allocate an @fptr. */
67
68#define FPTR_CHUNK_SIZE		64
69
70struct fptr_chunk {
71	struct fptr fptrs[FPTR_CHUNK_SIZE];
72};
73
74static struct fptr_chunk first_chunk;
75static struct fptr_chunk *current_chunk = &first_chunk;
76static struct fptr *next_fptr = &first_chunk.fptrs[0];
77static struct fptr *last_fptr = &first_chunk.fptrs[FPTR_CHUNK_SIZE];
78
79/*
80 * We use static storage initially so that we don't have to call
81 * malloc during init_rtld().
82 */
83static struct fptr *
84alloc_fptr(Elf_Addr target, Elf_Addr gp)
85{
86	struct fptr* fptr;
87
88	if (next_fptr == last_fptr) {
89		current_chunk = malloc(sizeof(struct fptr_chunk));
90		next_fptr = &current_chunk->fptrs[0];
91		last_fptr = &current_chunk->fptrs[FPTR_CHUNK_SIZE];
92	}
93	fptr = next_fptr;
94	next_fptr++;
95	fptr->target = target;
96	fptr->gp = gp;
97	return fptr;
98}
99
100/* Relocate a non-PLT object with addend. */
101static int
102reloc_non_plt_obj(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela,
103		  SymCache *cache, struct fptr **fptrs)
104{
105	Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset);
106
107	switch (ELF_R_TYPE(rela->r_info)) {
108	case R_IA64_REL64LSB:
109		/*
110		 * We handle rtld's relocations in rtld_start.S
111		 */
112		if (obj != obj_rtld)
113			store64(where,
114				load64(where) + (Elf_Addr) obj->relocbase);
115		break;
116
117	case R_IA64_DIR64LSB: {
118		const Elf_Sym *def;
119		const Obj_Entry *defobj;
120		Elf_Addr target;
121
122		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
123				  false, cache);
124		if (def == NULL)
125			return -1;
126		target = (Elf_Addr) (defobj->relocbase + def->st_value);
127		store64(where, target + rela->r_addend);
128		break;
129	}
130
131	case R_IA64_FPTR64LSB: {
132		/*
133		 * We have to make sure that all @fptr references to
134		 * the same function are identical so that code can
135		 * compare function pointers. We actually only bother
136		 * to ensure this within a single object. If the
137		 * caller's alloca failed, we don't even ensure that.
138		 */
139		const Elf_Sym *def, *ref;
140		const Obj_Entry *defobj;
141		struct fptr *fptr = 0;
142		Elf_Addr target, gp;
143
144		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
145				  false, cache);
146		if (def == NULL)
147			return -1;
148
149		/*
150		 * If this is an undefined weak reference, we need to
151		 * have a zero target,gp fptr, not pointing to relocbase.
152		 * This isn't quite right.  Maybe we should check
153		 * explicitly for def == &sym_zero.
154		 */
155		if (def->st_value == 0 &&
156		    (ref = obj->symtab + ELF_R_SYM(rela->r_info)) &&
157		    ELF_ST_BIND(ref->st_info) == STB_WEAK) {
158			target = 0;
159			gp = 0;
160		} else {
161			target = (Elf_Addr) (defobj->relocbase + def->st_value);
162			gp = (Elf_Addr) defobj->pltgot;
163		}
164
165		/*
166		 * Find the @fptr, using fptrs as a helper.
167		 */
168		if (fptrs)
169			fptr = fptrs[ELF_R_SYM(rela->r_info)];
170		if (!fptr) {
171			fptr = alloc_fptr(target, gp);
172			if (fptrs)
173				fptrs[ELF_R_SYM(rela->r_info)] = fptr;
174		}
175		store64(where, (Elf_Addr) fptr);
176		break;
177	}
178
179	default:
180		_rtld_error("%s: Unsupported relocation type %d"
181			    " in non-PLT relocations\n", obj->path,
182			    ELF_R_TYPE(rela->r_info));
183		return -1;
184	}
185
186	return(0);
187}
188
189/* Process the non-PLT relocations. */
190int
191reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld)
192{
193	const Elf_Rel *rellim;
194	const Elf_Rel *rel;
195	const Elf_Rela *relalim;
196	const Elf_Rela *rela;
197	SymCache *cache;
198	struct fptr **fptrs;
199
200	cache = (SymCache *)alloca(obj->nchains * sizeof(SymCache));
201	if (cache != NULL)
202		memset(cache, 0, obj->nchains * sizeof(SymCache));
203
204	/*
205	 * When relocating rtld itself, we need to avoid using malloc.
206	 */
207        if (obj == obj_rtld)
208		fptrs = (struct fptr **)
209			alloca(obj->nchains * sizeof(struct fptr *));
210	else
211		fptrs = (struct fptr **)
212			malloc(obj->nchains * sizeof(struct fptr *));
213
214	if (fptrs == NULL)
215		return -1;
216	memset(fptrs, 0, obj->nchains * sizeof(struct fptr *));
217
218	/* Perform relocations without addend if there are any: */
219	rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize);
220	for (rel = obj->rel;  obj->rel != NULL && rel < rellim;  rel++) {
221		Elf_Rela locrela;
222
223		locrela.r_info = rel->r_info;
224		locrela.r_offset = rel->r_offset;
225		locrela.r_addend = 0;
226		if (reloc_non_plt_obj(obj_rtld, obj, &locrela, cache, fptrs))
227			return -1;
228	}
229
230	/* Perform relocations with addend if there are any: */
231	relalim = (const Elf_Rela *) ((caddr_t) obj->rela + obj->relasize);
232	for (rela = obj->rela;  obj->rela != NULL && rela < relalim;  rela++) {
233		if (reloc_non_plt_obj(obj_rtld, obj, rela, cache, fptrs))
234			return -1;
235	}
236
237	/*
238	 * Remember the fptrs in case of later calls to dlsym(). Don't
239	 * bother for rtld - we will lazily create a table in
240	 * make_function_pointer(). At this point we still can't risk
241	 * calling malloc().
242	 */
243	if (obj != obj_rtld)
244		obj->priv = fptrs;
245	else
246		obj->priv = NULL;
247
248	return 0;
249}
250
251/* Process the PLT relocations. */
252int
253reloc_plt(Obj_Entry *obj)
254{
255	/* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */
256	if (obj->pltrelsize != 0) {
257		const Elf_Rel *rellim;
258		const Elf_Rel *rel;
259
260		rellim = (const Elf_Rel *)
261			((char *)obj->pltrel + obj->pltrelsize);
262		for (rel = obj->pltrel;  rel < rellim;  rel++) {
263			Elf_Addr *where;
264
265			assert(ELF_R_TYPE(rel->r_info) == R_IA64_IPLTLSB);
266
267			/* Relocate the @fptr pointing into the PLT. */
268			where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
269			*where += (Elf_Addr)obj->relocbase;
270		}
271	} else {
272		const Elf_Rela *relalim;
273		const Elf_Rela *rela;
274
275		relalim = (const Elf_Rela *)
276			((char *)obj->pltrela + obj->pltrelasize);
277		for (rela = obj->pltrela;  rela < relalim;  rela++) {
278			Elf_Addr *where;
279
280			assert(ELF_R_TYPE(rela->r_info) == R_IA64_IPLTLSB);
281
282			/* Relocate the @fptr pointing into the PLT. */
283			where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
284			*where += (Elf_Addr)obj->relocbase;
285		}
286	}
287	return 0;
288}
289
290/* Relocate the jump slots in an object. */
291int
292reloc_jmpslots(Obj_Entry *obj)
293{
294	if (obj->jmpslots_done)
295		return 0;
296	/* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */
297	if (obj->pltrelsize != 0) {
298		const Elf_Rel *rellim;
299		const Elf_Rel *rel;
300
301		rellim = (const Elf_Rel *)
302			((char *)obj->pltrel + obj->pltrelsize);
303		for (rel = obj->pltrel;  rel < rellim;  rel++) {
304			Elf_Addr *where;
305			const Elf_Sym *def;
306			const Obj_Entry *defobj;
307
308			assert(ELF_R_TYPE(rel->r_info) == R_IA64_IPLTLSB);
309			where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
310			def = find_symdef(ELF_R_SYM(rel->r_info), obj,
311					  &defobj, true, NULL);
312			if (def == NULL)
313				return -1;
314			reloc_jmpslot(where,
315				      (Elf_Addr)(defobj->relocbase
316						 + def->st_value),
317				      defobj);
318		}
319	} else {
320		const Elf_Rela *relalim;
321		const Elf_Rela *rela;
322
323		relalim = (const Elf_Rela *)
324			((char *)obj->pltrela + obj->pltrelasize);
325		for (rela = obj->pltrela;  rela < relalim;  rela++) {
326			Elf_Addr *where;
327			const Elf_Sym *def;
328			const Obj_Entry *defobj;
329
330			/* assert(ELF_R_TYPE(rela->r_info) == R_ALPHA_JMP_SLOT); */
331			where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
332			def = find_symdef(ELF_R_SYM(rela->r_info), obj,
333					  &defobj, true, NULL);
334			if (def == NULL)
335				return -1;
336			reloc_jmpslot(where,
337				      (Elf_Addr)(defobj->relocbase
338						 + def->st_value),
339				      defobj);
340		}
341	}
342	obj->jmpslots_done = true;
343	return 0;
344}
345
346/* Fixup the jump slot at "where" to transfer control to "target". */
347Elf_Addr
348reloc_jmpslot(Elf_Addr *where, Elf_Addr target, const Obj_Entry *obj)
349{
350	Elf_Addr stubaddr;
351
352	dbg(" reloc_jmpslot: where=%p, target=%p, gp=%p",
353	    (void *)where, (void *)target, (void *)obj->pltgot);
354	stubaddr = *where;
355	if (stubaddr != target) {
356
357		/*
358		 * Point this @fptr directly at the target. Update the
359		 * gp value first so that we don't break another cpu
360		 * which is currently executing the PLT entry.
361		 */
362		where[1] = (Elf_Addr) obj->pltgot;
363		ia64_mf();
364		where[0] = target;
365		ia64_mf();
366	}
367
368	/*
369	 * The caller needs an @fptr for the adjusted entry. The PLT
370	 * entry serves this purpose nicely.
371	 */
372	return (Elf_Addr) where;
373}
374
375/*
376 * XXX ia64 doesn't seem to have copy relocations.
377 *
378 * Returns 0 on success, -1 on failure.
379 */
380int
381do_copy_relocations(Obj_Entry *dstobj)
382{
383
384	return 0;
385}
386
387/*
388 * Return the @fptr representing a given function symbol.
389 */
390void *
391make_function_pointer(const Elf_Sym *sym, const Obj_Entry *obj)
392{
393	struct fptr **fptrs = obj->priv;
394	int index = sym - obj->symtab;
395
396	if (!fptrs) {
397		/*
398		 * This should only happen for something like
399		 * dlsym("dlopen"). Actually, I'm not sure it can ever
400		 * happen.
401		 */
402		fptrs = (struct fptr **)
403			malloc(obj->nchains * sizeof(struct fptr *));
404		memset(fptrs, 0, obj->nchains * sizeof(struct fptr *));
405		((Obj_Entry*) obj)->priv = fptrs;
406	}
407	if (!fptrs[index]) {
408		Elf_Addr target, gp;
409		target = (Elf_Addr) (obj->relocbase + sym->st_value);
410		gp = (Elf_Addr) obj->pltgot;
411		fptrs[index] = alloc_fptr(target, gp);
412	}
413	return fptrs[index];
414}
415
416void
417call_initfini_pointer(const Obj_Entry *obj, Elf_Addr target)
418{
419	struct fptr fptr;
420
421	fptr.gp = (Elf_Addr) obj->pltgot;
422	fptr.target = target;
423	dbg(" initfini: target=%p, gp=%p",
424	    (void *) fptr.target, (void *) fptr.gp);
425	((InitFunc) &fptr)();
426}
427
428/* Initialize the special PLT entries. */
429void
430init_pltgot(Obj_Entry *obj)
431{
432	const Elf_Dyn *dynp;
433	Elf_Addr *pltres = 0;
434
435	/*
436	 * Find the PLT RESERVE section.
437	 */
438	for (dynp = obj->dynamic;  dynp->d_tag != DT_NULL;  dynp++) {
439		if (dynp->d_tag == DT_IA64_PLT_RESERVE)
440			pltres = (u_int64_t *)
441				(obj->relocbase + dynp->d_un.d_ptr);
442	}
443	if (!pltres)
444		errx(1, "Can't find DT_IA64_PLT_RESERVE entry");
445
446	/*
447	 * The PLT RESERVE section is used to get values to pass to
448	 * _rtld_bind when lazy binding.
449	 */
450	pltres[0] = (Elf_Addr) obj;
451	pltres[1] = FPTR_TARGET(_rtld_bind_start);
452	pltres[2] = FPTR_GP(_rtld_bind_start);
453}
454