reloc.c revision 233231
1/*-
2 * Copyright 1996, 1997, 1998, 1999 John D. Polstra.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * $FreeBSD: head/libexec/rtld-elf/ia64/reloc.c 233231 2012-03-20 13:20:49Z kib $
26 */
27
28/*
29 * Dynamic linker for ELF.
30 *
31 * John Polstra <jdp@polstra.com>.
32 */
33
34#include <sys/param.h>
35#include <sys/mman.h>
36#include <machine/ia64_cpu.h>
37
38#include <dlfcn.h>
39#include <err.h>
40#include <errno.h>
41#include <fcntl.h>
42#include <stdarg.h>
43#include <stdio.h>
44#include <stdlib.h>
45#include <string.h>
46#include <unistd.h>
47
48#include "debug.h"
49#include "rtld.h"
50
51extern Elf_Dyn _DYNAMIC;
52
53/*
54 * Macros for loading/storing unaligned 64-bit values.  These are
55 * needed because relocations can point to unaligned data.  This
56 * occurs in the DWARF2 exception frame tables generated by the
57 * compiler, for instance.
58 *
59 * We don't use these when relocating jump slots and GOT entries,
60 * since they are guaranteed to be aligned.
61 *
62 * XXX dfr stub for now.
63 */
64#define load64(p)	(*(u_int64_t *) (p))
65#define store64(p, v)	(*(u_int64_t *) (p) = (v))
66
67/* Allocate an @fptr. */
68
69#define FPTR_CHUNK_SIZE		64
70
71struct fptr_chunk {
72	struct fptr fptrs[FPTR_CHUNK_SIZE];
73};
74
75static struct fptr_chunk first_chunk;
76static struct fptr_chunk *current_chunk = &first_chunk;
77static struct fptr *next_fptr = &first_chunk.fptrs[0];
78static struct fptr *last_fptr = &first_chunk.fptrs[FPTR_CHUNK_SIZE];
79
80/*
81 * We use static storage initially so that we don't have to call
82 * malloc during init_rtld().
83 */
84static struct fptr *
85alloc_fptr(Elf_Addr target, Elf_Addr gp)
86{
87	struct fptr* fptr;
88
89	if (next_fptr == last_fptr) {
90		current_chunk = malloc(sizeof(struct fptr_chunk));
91		next_fptr = &current_chunk->fptrs[0];
92		last_fptr = &current_chunk->fptrs[FPTR_CHUNK_SIZE];
93	}
94	fptr = next_fptr;
95	next_fptr++;
96	fptr->target = target;
97	fptr->gp = gp;
98	return fptr;
99}
100
101static struct fptr **
102alloc_fptrs(Obj_Entry *obj, bool mapped)
103{
104	struct fptr **fptrs;
105	size_t fbytes;
106
107	fbytes = obj->nchains * sizeof(struct fptr *);
108
109	/*
110	 * Avoid malloc, if requested. Happens when relocating
111	 * rtld itself on startup.
112	 */
113	if (mapped) {
114		fptrs = mmap(NULL, fbytes, PROT_READ|PROT_WRITE,
115	    	    MAP_ANON, -1, 0);
116		if (fptrs == MAP_FAILED)
117			fptrs = NULL;
118	} else {
119		fptrs = malloc(fbytes);
120		if (fptrs != NULL)
121 			memset(fptrs, 0, fbytes);
122	}
123
124	/*
125	 * This assertion is necessary to guarantee function pointer
126	 * uniqueness
127 	 */
128	assert(fptrs != NULL);
129
130	return (obj->priv = fptrs);
131}
132
133static void
134free_fptrs(Obj_Entry *obj, bool mapped)
135{
136	struct fptr **fptrs;
137	size_t fbytes;
138
139	fptrs  = obj->priv;
140	if (fptrs == NULL)
141		return;
142
143	fbytes = obj->nchains * sizeof(struct fptr *);
144	if (mapped)
145		munmap(fptrs, fbytes);
146	else
147		free(fptrs);
148	obj->priv = NULL;
149}
150
151/* Relocate a non-PLT object with addend. */
152static int
153reloc_non_plt_obj(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela,
154    SymCache *cache, int flags, RtldLockState *lockstate)
155{
156	struct fptr **fptrs;
157	Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset);
158
159	switch (ELF_R_TYPE(rela->r_info)) {
160	case R_IA_64_REL64LSB:
161		/*
162		 * We handle rtld's relocations in rtld_start.S
163		 */
164		if (obj != obj_rtld)
165			store64(where,
166				load64(where) + (Elf_Addr) obj->relocbase);
167		break;
168
169	case R_IA_64_DIR64LSB: {
170		const Elf_Sym *def;
171		const Obj_Entry *defobj;
172		Elf_Addr target;
173
174		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
175		    flags, cache, lockstate);
176		if (def == NULL)
177			return -1;
178
179		target = (def->st_shndx != SHN_UNDEF)
180		    ? (Elf_Addr)(defobj->relocbase + def->st_value) : 0;
181		store64(where, target + rela->r_addend);
182		break;
183	}
184
185	case R_IA_64_FPTR64LSB: {
186		/*
187		 * We have to make sure that all @fptr references to
188		 * the same function are identical so that code can
189		 * compare function pointers.
190		 */
191		const Elf_Sym *def;
192		const Obj_Entry *defobj;
193		struct fptr *fptr = 0;
194		Elf_Addr target, gp;
195		int sym_index;
196
197		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
198		    SYMLOOK_IN_PLT | flags, cache, lockstate);
199		if (def == NULL) {
200			/*
201			 * XXX r_debug_state is problematic and find_symdef()
202			 * returns NULL for it. This probably has something to
203			 * do with symbol versioning (r_debug_state is in the
204			 * symbol map). If we return -1 in that case we abort
205			 * relocating rtld, which typically is fatal. So, for
206			 * now just skip the symbol when we're relocating
207			 * rtld. We don't care about r_debug_state unless we
208			 * are being debugged.
209			 */
210			if (obj != obj_rtld)
211				return -1;
212			break;
213		}
214
215		if (def->st_shndx != SHN_UNDEF) {
216			target = (Elf_Addr)(defobj->relocbase + def->st_value);
217			gp = (Elf_Addr)defobj->pltgot;
218
219			/* rtld is allowed to reference itself only */
220			assert(!obj->rtld || obj == defobj);
221			fptrs = defobj->priv;
222			if (fptrs == NULL)
223				fptrs = alloc_fptrs((Obj_Entry *) defobj,
224				    obj->rtld);
225
226			sym_index = def - defobj->symtab;
227
228			/*
229			 * Find the @fptr, using fptrs as a helper.
230			 */
231			if (fptrs)
232				fptr = fptrs[sym_index];
233			if (!fptr) {
234				fptr = alloc_fptr(target, gp);
235				if (fptrs)
236					fptrs[sym_index] = fptr;
237			}
238		} else
239			fptr = NULL;
240
241		store64(where, (Elf_Addr)fptr);
242		break;
243	}
244
245	case R_IA_64_IPLTLSB: {
246		/*
247		 * Relocation typically used to populate C++ virtual function
248		 * tables. It creates a 128-bit function descriptor at the
249		 * specified memory address.
250		 */
251		const Elf_Sym *def;
252		const Obj_Entry *defobj;
253		struct fptr *fptr;
254		Elf_Addr target, gp;
255
256		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
257		    flags, cache, lockstate);
258		if (def == NULL)
259			return -1;
260
261		if (def->st_shndx != SHN_UNDEF) {
262			target = (Elf_Addr)(defobj->relocbase + def->st_value);
263			gp = (Elf_Addr)defobj->pltgot;
264		} else {
265			target = 0;
266			gp = 0;
267		}
268
269		fptr = (void*)where;
270		store64(&fptr->target, target);
271		store64(&fptr->gp, gp);
272		break;
273	}
274
275	case R_IA_64_DTPMOD64LSB: {
276		const Elf_Sym *def;
277		const Obj_Entry *defobj;
278
279		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
280		    flags, cache, lockstate);
281		if (def == NULL)
282			return -1;
283
284		store64(where, defobj->tlsindex);
285		break;
286	}
287
288	case R_IA_64_DTPREL64LSB: {
289		const Elf_Sym *def;
290		const Obj_Entry *defobj;
291
292		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
293		    flags, cache, lockstate);
294		if (def == NULL)
295			return -1;
296
297		store64(where, def->st_value + rela->r_addend);
298		break;
299	}
300
301	case R_IA_64_TPREL64LSB: {
302		const Elf_Sym *def;
303		const Obj_Entry *defobj;
304
305		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
306		    flags, cache, lockstate);
307		if (def == NULL)
308			return -1;
309
310		/*
311		 * We lazily allocate offsets for static TLS as we
312		 * see the first relocation that references the
313		 * TLS block. This allows us to support (small
314		 * amounts of) static TLS in dynamically loaded
315		 * modules. If we run out of space, we generate an
316		 * error.
317		 */
318		if (!defobj->tls_done) {
319			if (!allocate_tls_offset((Obj_Entry*) defobj)) {
320				_rtld_error("%s: No space available for static "
321				    "Thread Local Storage", obj->path);
322				return -1;
323			}
324		}
325
326		store64(where, defobj->tlsoffset + def->st_value + rela->r_addend);
327		break;
328	}
329
330	case R_IA_64_NONE:
331		break;
332
333	default:
334		_rtld_error("%s: Unsupported relocation type %u"
335			    " in non-PLT relocations\n", obj->path,
336			    (unsigned int)ELF_R_TYPE(rela->r_info));
337		return -1;
338	}
339
340	return(0);
341}
342
343/* Process the non-PLT relocations. */
344int
345reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
346    RtldLockState *lockstate)
347{
348	const Elf_Rel *rellim;
349	const Elf_Rel *rel;
350	const Elf_Rela *relalim;
351	const Elf_Rela *rela;
352	SymCache *cache;
353	int bytes = obj->nchains * sizeof(SymCache);
354	int r = -1;
355
356	/*
357	 * The dynamic loader may be called from a thread, we have
358	 * limited amounts of stack available so we cannot use alloca().
359	 */
360	cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON, -1, 0);
361	if (cache == MAP_FAILED)
362		cache = NULL;
363
364	/* Perform relocations without addend if there are any: */
365	rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize);
366	for (rel = obj->rel;  obj->rel != NULL && rel < rellim;  rel++) {
367		Elf_Rela locrela;
368
369		locrela.r_info = rel->r_info;
370		locrela.r_offset = rel->r_offset;
371		locrela.r_addend = 0;
372		if (reloc_non_plt_obj(obj_rtld, obj, &locrela, cache, flags,
373		    lockstate))
374			goto done;
375	}
376
377	/* Perform relocations with addend if there are any: */
378	relalim = (const Elf_Rela *) ((caddr_t) obj->rela + obj->relasize);
379	for (rela = obj->rela;  obj->rela != NULL && rela < relalim;  rela++) {
380		if (reloc_non_plt_obj(obj_rtld, obj, rela, cache, flags,
381		    lockstate))
382			goto done;
383	}
384
385	r = 0;
386done:
387	if (cache)
388		munmap(cache, bytes);
389
390	/*
391	 * Release temporarily mapped fptrs if relocating
392	 * rtld object itself. A new table will be created
393	 * in make_function_pointer using malloc when needed.
394	 */
395	if (obj->rtld && obj->priv)
396		free_fptrs(obj, true);
397
398	return (r);
399}
400
401/* Process the PLT relocations. */
402int
403reloc_plt(Obj_Entry *obj)
404{
405	/* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */
406	if (obj->pltrelsize != 0) {
407		const Elf_Rel *rellim;
408		const Elf_Rel *rel;
409
410		rellim = (const Elf_Rel *)
411			((char *)obj->pltrel + obj->pltrelsize);
412		for (rel = obj->pltrel;  rel < rellim;  rel++) {
413			Elf_Addr *where;
414
415			assert(ELF_R_TYPE(rel->r_info) == R_IA_64_IPLTLSB);
416
417			/* Relocate the @fptr pointing into the PLT. */
418			where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
419			*where += (Elf_Addr)obj->relocbase;
420		}
421	} else {
422		const Elf_Rela *relalim;
423		const Elf_Rela *rela;
424
425		relalim = (const Elf_Rela *)
426			((char *)obj->pltrela + obj->pltrelasize);
427		for (rela = obj->pltrela;  rela < relalim;  rela++) {
428			Elf_Addr *where;
429
430			assert(ELF_R_TYPE(rela->r_info) == R_IA_64_IPLTLSB);
431
432			/* Relocate the @fptr pointing into the PLT. */
433			where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
434			*where += (Elf_Addr)obj->relocbase;
435		}
436	}
437	return 0;
438}
439
440int
441reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
442{
443
444	/* XXX not implemented */
445	return (0);
446}
447
448int
449reloc_gnu_ifunc(Obj_Entry *obj, int flags,
450    struct Struct_RtldLockState *lockstate)
451{
452
453	/* XXX not implemented */
454	return (0);
455}
456
457/* Relocate the jump slots in an object. */
458int
459reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
460{
461	if (obj->jmpslots_done)
462		return 0;
463	/* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */
464	if (obj->pltrelsize != 0) {
465		const Elf_Rel *rellim;
466		const Elf_Rel *rel;
467
468		rellim = (const Elf_Rel *)
469			((char *)obj->pltrel + obj->pltrelsize);
470		for (rel = obj->pltrel;  rel < rellim;  rel++) {
471			Elf_Addr *where;
472			const Elf_Sym *def;
473			const Obj_Entry *defobj;
474
475			assert(ELF_R_TYPE(rel->r_info) == R_IA_64_IPLTLSB);
476			where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
477			def = find_symdef(ELF_R_SYM(rel->r_info), obj,
478			    &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate);
479			if (def == NULL)
480				return -1;
481			reloc_jmpslot(where,
482				      (Elf_Addr)(defobj->relocbase
483						 + def->st_value),
484				      defobj, obj, rel);
485		}
486	} else {
487		const Elf_Rela *relalim;
488		const Elf_Rela *rela;
489
490		relalim = (const Elf_Rela *)
491			((char *)obj->pltrela + obj->pltrelasize);
492		for (rela = obj->pltrela;  rela < relalim;  rela++) {
493			Elf_Addr *where;
494			const Elf_Sym *def;
495			const Obj_Entry *defobj;
496
497			where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
498			def = find_symdef(ELF_R_SYM(rela->r_info), obj,
499			    &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate);
500			if (def == NULL)
501				return -1;
502			reloc_jmpslot(where,
503				      (Elf_Addr)(defobj->relocbase
504						 + def->st_value),
505				      defobj, obj, (Elf_Rel *)rela);
506		}
507	}
508	obj->jmpslots_done = true;
509	return 0;
510}
511
512/* Fixup the jump slot at "where" to transfer control to "target". */
513Elf_Addr
514reloc_jmpslot(Elf_Addr *where, Elf_Addr target, const Obj_Entry *obj,
515	      const Obj_Entry *refobj, const Elf_Rel *rel)
516{
517	Elf_Addr stubaddr;
518
519	dbg(" reloc_jmpslot: where=%p, target=%p, gp=%p",
520	    (void *)where, (void *)target, (void *)obj->pltgot);
521	stubaddr = *where;
522	if (stubaddr != target) {
523
524		/*
525		 * Point this @fptr directly at the target. Update the
526		 * gp value first so that we don't break another cpu
527		 * which is currently executing the PLT entry.
528		 */
529		where[1] = (Elf_Addr) obj->pltgot;
530		ia64_mf();
531		where[0] = target;
532		ia64_mf();
533	}
534
535	/*
536	 * The caller needs an @fptr for the adjusted entry. The PLT
537	 * entry serves this purpose nicely.
538	 */
539	return (Elf_Addr) where;
540}
541
542/*
543 * XXX ia64 doesn't seem to have copy relocations.
544 *
545 * Returns 0 on success, -1 on failure.
546 */
547int
548do_copy_relocations(Obj_Entry *dstobj)
549{
550
551	return 0;
552}
553
554/*
555 * Return the @fptr representing a given function symbol.
556 */
557void *
558make_function_pointer(const Elf_Sym *sym, const Obj_Entry *obj)
559{
560	struct fptr **fptrs = obj->priv;
561	int index = sym - obj->symtab;
562
563	if (!fptrs) {
564		/*
565		 * This should only happen for something like
566		 * dlsym("dlopen"). Actually, I'm not sure it can ever
567		 * happen.
568		 */
569		fptrs = alloc_fptrs((Obj_Entry *) obj, false);
570	}
571	if (!fptrs[index]) {
572		Elf_Addr target, gp;
573		target = (Elf_Addr) (obj->relocbase + sym->st_value);
574		gp = (Elf_Addr) obj->pltgot;
575		fptrs[index] = alloc_fptr(target, gp);
576	}
577	return fptrs[index];
578}
579
580void
581call_initfini_pointer(const Obj_Entry *obj, Elf_Addr target)
582{
583	struct fptr fptr;
584
585	fptr.gp = (Elf_Addr) obj->pltgot;
586	fptr.target = target;
587	dbg(" initfini: target=%p, gp=%p",
588	    (void *) fptr.target, (void *) fptr.gp);
589	((InitFunc) &fptr)();
590}
591
592void
593call_init_pointer(const Obj_Entry *obj, Elf_Addr target)
594{
595	struct fptr fptr;
596
597	fptr.gp = (Elf_Addr) obj->pltgot;
598	fptr.target = target;
599	dbg(" initfini: target=%p, gp=%p",
600	    (void *) fptr.target, (void *) fptr.gp);
601	((InitArrFunc) &fptr)(main_argc, main_argv, environ);
602}
603
604/* Initialize the special PLT entries. */
605void
606init_pltgot(Obj_Entry *obj)
607{
608	const Elf_Dyn *dynp;
609	Elf_Addr *pltres = 0;
610
611	/*
612	 * When there are no PLT relocations, the DT_IA_64_PLT_RESERVE entry
613	 * is bogus. Do not setup the BOR pointers in that case. An example
614	 * of where this happens is /usr/lib/libxpg4.so.3.
615	 */
616	if (obj->pltrelasize == 0 && obj->pltrelsize == 0)
617		return;
618
619	/*
620	 * Find the PLT RESERVE section.
621	 */
622	for (dynp = obj->dynamic;  dynp->d_tag != DT_NULL;  dynp++) {
623		if (dynp->d_tag == DT_IA_64_PLT_RESERVE)
624			pltres = (u_int64_t *)
625				(obj->relocbase + dynp->d_un.d_ptr);
626	}
627	if (!pltres)
628		errx(1, "Can't find DT_IA_64_PLT_RESERVE entry");
629
630	/*
631	 * The PLT RESERVE section is used to get values to pass to
632	 * _rtld_bind when lazy binding.
633	 */
634	pltres[0] = (Elf_Addr) obj;
635	pltres[1] = FPTR_TARGET(_rtld_bind_start);
636	pltres[2] = FPTR_GP(_rtld_bind_start);
637}
638
639void
640allocate_initial_tls(Obj_Entry *list)
641{
642    void *tpval;
643
644    /*
645     * Fix the size of the static TLS block by using the maximum
646     * offset allocated so far and adding a bit for dynamic modules to
647     * use.
648     */
649    tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA;
650
651    tpval = allocate_tls(list, NULL, TLS_TCB_SIZE, 16);
652    __asm __volatile("mov r13 = %0" :: "r"(tpval));
653}
654
655void *__tls_get_addr(unsigned long module, unsigned long offset)
656{
657    register Elf_Addr** tp __asm__("r13");
658
659    return tls_get_addr_common(tp, module, offset);
660}
661