reloc.c revision 331206
1/*-
2 * Copyright 1996, 1997, 1998, 1999 John D. Polstra.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/10/libexec/rtld-elf/ia64/reloc.c 331206 2018-03-19 14:28:58Z marius $
26 */
27
28/*
29 * Dynamic linker for ELF.
30 *
31 * John Polstra <jdp@polstra.com>.
32 */
33
34#include <sys/param.h>
35#include <sys/mman.h>
36#include <machine/ia64_cpu.h>
37
38#include <dlfcn.h>
39#include <err.h>
40#include <errno.h>
41#include <fcntl.h>
42#include <stdarg.h>
43#include <stdio.h>
44#include <stdlib.h>
45#include <string.h>
46#include <unistd.h>
47
48#include "debug.h"
49#include "rtld.h"
50
51extern Elf_Dyn _DYNAMIC;
52
53/*
54 * Macros for loading/storing unaligned 64-bit values.  These are
55 * needed because relocations can point to unaligned data.  This
56 * occurs in the DWARF2 exception frame tables generated by the
57 * compiler, for instance.
58 *
59 * We don't use these when relocating jump slots and GOT entries,
60 * since they are guaranteed to be aligned.
61 *
62 * XXX dfr stub for now.
63 */
64#define load64(p)	(*(u_int64_t *) (p))
65#define store64(p, v)	(*(u_int64_t *) (p) = (v))
66
67/* Allocate an @fptr. */
68
69#define FPTR_CHUNK_SIZE		64
70
71struct fptr_chunk {
72	struct fptr fptrs[FPTR_CHUNK_SIZE];
73};
74
75static struct fptr_chunk first_chunk;
76static struct fptr_chunk *current_chunk = &first_chunk;
77static struct fptr *next_fptr = &first_chunk.fptrs[0];
78static struct fptr *last_fptr = &first_chunk.fptrs[FPTR_CHUNK_SIZE];
79
80/*
81 * We use static storage initially so that we don't have to call
82 * malloc during init_rtld().
83 */
84static struct fptr *
85alloc_fptr(Elf_Addr target, Elf_Addr gp)
86{
87	struct fptr* fptr;
88
89	if (next_fptr == last_fptr) {
90		current_chunk = xmalloc(sizeof(struct fptr_chunk));
91		next_fptr = &current_chunk->fptrs[0];
92		last_fptr = &current_chunk->fptrs[FPTR_CHUNK_SIZE];
93	}
94	fptr = next_fptr;
95	next_fptr++;
96	fptr->target = target;
97	fptr->gp = gp;
98	return fptr;
99}
100
101static struct fptr **
102alloc_fptrs(Obj_Entry *obj, bool mapped)
103{
104	struct fptr **fptrs;
105	size_t fbytes;
106
107	fbytes = obj->dynsymcount * sizeof(struct fptr *);
108
109	/*
110	 * Avoid malloc, if requested. Happens when relocating
111	 * rtld itself on startup.
112	 */
113	if (mapped) {
114		fptrs = mmap(NULL, fbytes, PROT_READ|PROT_WRITE,
115	    	    MAP_ANON, -1, 0);
116		if (fptrs == MAP_FAILED)
117			fptrs = NULL;
118	} else {
119		fptrs = xcalloc(1, fbytes);
120	}
121
122	/*
123	 * This assertion is necessary to guarantee function pointer
124	 * uniqueness
125 	 */
126	assert(fptrs != NULL);
127
128	return (obj->priv = fptrs);
129}
130
131static void
132free_fptrs(Obj_Entry *obj, bool mapped)
133{
134	struct fptr **fptrs;
135	size_t fbytes;
136
137	fptrs  = obj->priv;
138	if (fptrs == NULL)
139		return;
140
141	fbytes = obj->dynsymcount * sizeof(struct fptr *);
142	if (mapped)
143		munmap(fptrs, fbytes);
144	else
145		free(fptrs);
146	obj->priv = NULL;
147}
148
149/* Relocate a non-PLT object with addend. */
150static int
151reloc_non_plt_obj(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela,
152    SymCache *cache, int flags, RtldLockState *lockstate)
153{
154	struct fptr **fptrs;
155	Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset);
156
157	switch (ELF_R_TYPE(rela->r_info)) {
158	case R_IA_64_REL64LSB:
159		/*
160		 * We handle rtld's relocations in rtld_start.S
161		 */
162		if (obj != obj_rtld)
163			store64(where,
164				load64(where) + (Elf_Addr) obj->relocbase);
165		break;
166
167	case R_IA_64_DIR64LSB: {
168		const Elf_Sym *def;
169		const Obj_Entry *defobj;
170		Elf_Addr target;
171
172		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
173		    flags, cache, lockstate);
174		if (def == NULL)
175			return -1;
176
177		target = (def->st_shndx != SHN_UNDEF)
178		    ? (Elf_Addr)(defobj->relocbase + def->st_value) : 0;
179		store64(where, target + rela->r_addend);
180		break;
181	}
182
183	case R_IA_64_FPTR64LSB: {
184		/*
185		 * We have to make sure that all @fptr references to
186		 * the same function are identical so that code can
187		 * compare function pointers.
188		 */
189		const Elf_Sym *def;
190		const Obj_Entry *defobj;
191		struct fptr *fptr = 0;
192		Elf_Addr target, gp;
193		int sym_index;
194
195		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
196		    SYMLOOK_IN_PLT | flags, cache, lockstate);
197		if (def == NULL) {
198			/*
199			 * XXX r_debug_state is problematic and find_symdef()
200			 * returns NULL for it. This probably has something to
201			 * do with symbol versioning (r_debug_state is in the
202			 * symbol map). If we return -1 in that case we abort
203			 * relocating rtld, which typically is fatal. So, for
204			 * now just skip the symbol when we're relocating
205			 * rtld. We don't care about r_debug_state unless we
206			 * are being debugged.
207			 */
208			if (obj != obj_rtld)
209				return -1;
210			break;
211		}
212
213		if (def->st_shndx != SHN_UNDEF) {
214			target = (Elf_Addr)(defobj->relocbase + def->st_value);
215			gp = (Elf_Addr)defobj->pltgot;
216
217			/* rtld is allowed to reference itself only */
218			assert(!obj->rtld || obj == defobj);
219			fptrs = defobj->priv;
220			if (fptrs == NULL)
221				fptrs = alloc_fptrs((Obj_Entry *) defobj,
222				    obj->rtld);
223
224			sym_index = def - defobj->symtab;
225
226			/*
227			 * Find the @fptr, using fptrs as a helper.
228			 */
229			if (fptrs)
230				fptr = fptrs[sym_index];
231			if (!fptr) {
232				fptr = alloc_fptr(target, gp);
233				if (fptrs)
234					fptrs[sym_index] = fptr;
235			}
236		} else
237			fptr = NULL;
238
239		store64(where, (Elf_Addr)fptr);
240		break;
241	}
242
243	case R_IA_64_IPLTLSB: {
244		/*
245		 * Relocation typically used to populate C++ virtual function
246		 * tables. It creates a 128-bit function descriptor at the
247		 * specified memory address.
248		 */
249		const Elf_Sym *def;
250		const Obj_Entry *defobj;
251		struct fptr *fptr;
252		Elf_Addr target, gp;
253
254		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
255		    flags, cache, lockstate);
256		if (def == NULL)
257			return -1;
258
259		if (def->st_shndx != SHN_UNDEF) {
260			target = (Elf_Addr)(defobj->relocbase + def->st_value);
261			gp = (Elf_Addr)defobj->pltgot;
262		} else {
263			target = 0;
264			gp = 0;
265		}
266
267		fptr = (void*)where;
268		store64(&fptr->target, target);
269		store64(&fptr->gp, gp);
270		break;
271	}
272
273	case R_IA_64_DTPMOD64LSB: {
274		const Elf_Sym *def;
275		const Obj_Entry *defobj;
276
277		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
278		    flags, cache, lockstate);
279		if (def == NULL)
280			return -1;
281
282		store64(where, defobj->tlsindex);
283		break;
284	}
285
286	case R_IA_64_DTPREL64LSB: {
287		const Elf_Sym *def;
288		const Obj_Entry *defobj;
289
290		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
291		    flags, cache, lockstate);
292		if (def == NULL)
293			return -1;
294
295		store64(where, def->st_value + rela->r_addend);
296		break;
297	}
298
299	case R_IA_64_TPREL64LSB: {
300		const Elf_Sym *def;
301		const Obj_Entry *defobj;
302
303		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
304		    flags, cache, lockstate);
305		if (def == NULL)
306			return -1;
307
308		/*
309		 * We lazily allocate offsets for static TLS as we
310		 * see the first relocation that references the
311		 * TLS block. This allows us to support (small
312		 * amounts of) static TLS in dynamically loaded
313		 * modules. If we run out of space, we generate an
314		 * error.
315		 */
316		if (!defobj->tls_done) {
317			if (!allocate_tls_offset((Obj_Entry*) defobj)) {
318				_rtld_error("%s: No space available for static "
319				    "Thread Local Storage", obj->path);
320				return -1;
321			}
322		}
323
324		store64(where, defobj->tlsoffset + def->st_value + rela->r_addend);
325		break;
326	}
327
328	case R_IA_64_NONE:
329		break;
330
331	default:
332		_rtld_error("%s: Unsupported relocation type %u"
333			    " in non-PLT relocations\n", obj->path,
334			    (unsigned int)ELF_R_TYPE(rela->r_info));
335		return -1;
336	}
337
338	return(0);
339}
340
341/* Process the non-PLT relocations. */
342int
343reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
344    RtldLockState *lockstate)
345{
346	const Elf_Rel *rellim;
347	const Elf_Rel *rel;
348	const Elf_Rela *relalim;
349	const Elf_Rela *rela;
350	SymCache *cache;
351	int bytes = obj->dynsymcount * sizeof(SymCache);
352	int r = -1;
353
354	if ((flags & SYMLOOK_IFUNC) != 0)
355		/* XXX not implemented */
356		return (0);
357
358	/*
359	 * The dynamic loader may be called from a thread, we have
360	 * limited amounts of stack available so we cannot use alloca().
361	 */
362	cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON, -1, 0);
363	if (cache == MAP_FAILED)
364		cache = NULL;
365
366	/* Perform relocations without addend if there are any: */
367	rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize);
368	for (rel = obj->rel;  obj->rel != NULL && rel < rellim;  rel++) {
369		Elf_Rela locrela;
370
371		locrela.r_info = rel->r_info;
372		locrela.r_offset = rel->r_offset;
373		locrela.r_addend = 0;
374		if (reloc_non_plt_obj(obj_rtld, obj, &locrela, cache, flags,
375		    lockstate))
376			goto done;
377	}
378
379	/* Perform relocations with addend if there are any: */
380	relalim = (const Elf_Rela *) ((caddr_t) obj->rela + obj->relasize);
381	for (rela = obj->rela;  obj->rela != NULL && rela < relalim;  rela++) {
382		if (reloc_non_plt_obj(obj_rtld, obj, rela, cache, flags,
383		    lockstate))
384			goto done;
385	}
386
387	r = 0;
388done:
389	if (cache)
390		munmap(cache, bytes);
391
392	/*
393	 * Release temporarily mapped fptrs if relocating
394	 * rtld object itself. A new table will be created
395	 * in make_function_pointer using malloc when needed.
396	 */
397	if (obj->rtld && obj->priv)
398		free_fptrs(obj, true);
399
400	return (r);
401}
402
403/* Process the PLT relocations. */
404int
405reloc_plt(Obj_Entry *obj)
406{
407	/* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */
408	if (obj->pltrelsize != 0) {
409		const Elf_Rel *rellim;
410		const Elf_Rel *rel;
411
412		rellim = (const Elf_Rel *)
413			((char *)obj->pltrel + obj->pltrelsize);
414		for (rel = obj->pltrel;  rel < rellim;  rel++) {
415			Elf_Addr *where;
416
417			assert(ELF_R_TYPE(rel->r_info) == R_IA_64_IPLTLSB);
418
419			/* Relocate the @fptr pointing into the PLT. */
420			where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
421			*where += (Elf_Addr)obj->relocbase;
422		}
423	} else {
424		const Elf_Rela *relalim;
425		const Elf_Rela *rela;
426
427		relalim = (const Elf_Rela *)
428			((char *)obj->pltrela + obj->pltrelasize);
429		for (rela = obj->pltrela;  rela < relalim;  rela++) {
430			Elf_Addr *where;
431
432			assert(ELF_R_TYPE(rela->r_info) == R_IA_64_IPLTLSB);
433
434			/* Relocate the @fptr pointing into the PLT. */
435			where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
436			*where += (Elf_Addr)obj->relocbase;
437		}
438	}
439	return 0;
440}
441
442int
443reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
444{
445
446	/* XXX not implemented */
447	return (0);
448}
449
450int
451reloc_gnu_ifunc(Obj_Entry *obj, int flags,
452    struct Struct_RtldLockState *lockstate)
453{
454
455	/* XXX not implemented */
456	return (0);
457}
458
459/* Relocate the jump slots in an object. */
460int
461reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
462{
463	if (obj->jmpslots_done)
464		return 0;
465	/* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */
466	if (obj->pltrelsize != 0) {
467		const Elf_Rel *rellim;
468		const Elf_Rel *rel;
469
470		rellim = (const Elf_Rel *)
471			((char *)obj->pltrel + obj->pltrelsize);
472		for (rel = obj->pltrel;  rel < rellim;  rel++) {
473			Elf_Addr *where;
474			const Elf_Sym *def;
475			const Obj_Entry *defobj;
476
477			assert(ELF_R_TYPE(rel->r_info) == R_IA_64_IPLTLSB);
478			where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
479			def = find_symdef(ELF_R_SYM(rel->r_info), obj,
480			    &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate);
481			if (def == NULL)
482				return -1;
483			reloc_jmpslot(where,
484				      (Elf_Addr)(defobj->relocbase
485						 + def->st_value),
486				      defobj, obj, rel);
487		}
488	} else {
489		const Elf_Rela *relalim;
490		const Elf_Rela *rela;
491
492		relalim = (const Elf_Rela *)
493			((char *)obj->pltrela + obj->pltrelasize);
494		for (rela = obj->pltrela;  rela < relalim;  rela++) {
495			Elf_Addr *where;
496			const Elf_Sym *def;
497			const Obj_Entry *defobj;
498
499			where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
500			def = find_symdef(ELF_R_SYM(rela->r_info), obj,
501			    &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate);
502			if (def == NULL)
503				return -1;
504			reloc_jmpslot(where,
505				      (Elf_Addr)(defobj->relocbase
506						 + def->st_value),
507				      defobj, obj, (Elf_Rel *)rela);
508		}
509	}
510	obj->jmpslots_done = true;
511	return 0;
512}
513
514/* Fixup the jump slot at "where" to transfer control to "target". */
515Elf_Addr
516reloc_jmpslot(Elf_Addr *where, Elf_Addr target, const Obj_Entry *obj,
517	      const Obj_Entry *refobj, const Elf_Rel *rel)
518{
519	Elf_Addr stubaddr;
520
521	dbg(" reloc_jmpslot: where=%p, target=%p, gp=%p",
522	    (void *)where, (void *)target, (void *)obj->pltgot);
523	stubaddr = *where;
524	if (stubaddr != target) {
525
526		/*
527		 * Point this @fptr directly at the target. Update the
528		 * gp value first so that we don't break another cpu
529		 * which is currently executing the PLT entry.
530		 */
531		where[1] = (Elf_Addr) obj->pltgot;
532		ia64_mf();
533		where[0] = target;
534		ia64_mf();
535	}
536
537	/*
538	 * The caller needs an @fptr for the adjusted entry. The PLT
539	 * entry serves this purpose nicely.
540	 */
541	return (Elf_Addr) where;
542}
543
544/*
545 * XXX ia64 doesn't seem to have copy relocations.
546 *
547 * Returns 0 on success, -1 on failure.
548 */
549int
550do_copy_relocations(Obj_Entry *dstobj)
551{
552
553	return 0;
554}
555
556/*
557 * Return the @fptr representing a given function symbol.
558 */
559void *
560make_function_pointer(const Elf_Sym *sym, const Obj_Entry *obj)
561{
562	struct fptr **fptrs = obj->priv;
563	int index = sym - obj->symtab;
564
565	if (!fptrs) {
566		/*
567		 * This should only happen for something like
568		 * dlsym("dlopen"). Actually, I'm not sure it can ever
569		 * happen.
570		 */
571		fptrs = alloc_fptrs((Obj_Entry *) obj, false);
572	}
573	if (!fptrs[index]) {
574		Elf_Addr target, gp;
575		target = (Elf_Addr) (obj->relocbase + sym->st_value);
576		gp = (Elf_Addr) obj->pltgot;
577		fptrs[index] = alloc_fptr(target, gp);
578	}
579	return fptrs[index];
580}
581
582void
583call_initfini_pointer(const Obj_Entry *obj, Elf_Addr target)
584{
585	struct fptr fptr;
586
587	fptr.gp = (Elf_Addr) obj->pltgot;
588	fptr.target = target;
589	dbg(" initfini: target=%p, gp=%p",
590	    (void *) fptr.target, (void *) fptr.gp);
591	((InitFunc) &fptr)();
592}
593
594void
595call_init_pointer(const Obj_Entry *obj, Elf_Addr target)
596{
597	struct fptr fptr;
598
599	fptr.gp = (Elf_Addr) obj->pltgot;
600	fptr.target = target;
601	dbg(" initfini: target=%p, gp=%p",
602	    (void *) fptr.target, (void *) fptr.gp);
603	((InitArrFunc) &fptr)(main_argc, main_argv, environ);
604}
605
606void
607ifunc_init(Elf_Auxinfo aux_info[__min_size(AT_COUNT)] __unused)
608{
609
610}
611
612void
613pre_init(void)
614{
615
616}
617
618/* Initialize the special PLT entries. */
619void
620init_pltgot(Obj_Entry *obj)
621{
622	const Elf_Dyn *dynp;
623	Elf_Addr *pltres = 0;
624
625	/*
626	 * When there are no PLT relocations, the DT_IA_64_PLT_RESERVE entry
627	 * is bogus. Do not setup the BOR pointers in that case. An example
628	 * of where this happens is /usr/lib/libxpg4.so.3.
629	 */
630	if (obj->pltrelasize == 0 && obj->pltrelsize == 0)
631		return;
632
633	/*
634	 * Find the PLT RESERVE section.
635	 */
636	for (dynp = obj->dynamic;  dynp->d_tag != DT_NULL;  dynp++) {
637		if (dynp->d_tag == DT_IA_64_PLT_RESERVE)
638			pltres = (u_int64_t *)
639				(obj->relocbase + dynp->d_un.d_ptr);
640	}
641	if (!pltres)
642		errx(1, "Can't find DT_IA_64_PLT_RESERVE entry");
643
644	/*
645	 * The PLT RESERVE section is used to get values to pass to
646	 * _rtld_bind when lazy binding.
647	 */
648	pltres[0] = (Elf_Addr) obj;
649	pltres[1] = FPTR_TARGET(_rtld_bind_start);
650	pltres[2] = FPTR_GP(_rtld_bind_start);
651}
652
653void
654allocate_initial_tls(Obj_Entry *list)
655{
656    void *tpval;
657
658    /*
659     * Fix the size of the static TLS block by using the maximum
660     * offset allocated so far and adding a bit for dynamic modules to
661     * use.
662     */
663    tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA;
664
665    tpval = allocate_tls(list, NULL, TLS_TCB_SIZE, 16);
666    __asm __volatile("mov r13 = %0" :: "r"(tpval));
667}
668
669void *__tls_get_addr(unsigned long module, unsigned long offset)
670{
671    register Elf_Addr** tp __asm__("r13");
672
673    return tls_get_addr_common(tp, module, offset);
674}
675