reloc.c revision 309061
1/*	$NetBSD: mips_reloc.c,v 1.58 2010/01/14 11:57:06 skrll Exp $	*/
2
3/*
4 * Copyright 1997 Michael L. Hitch <mhitch@montana.edu>
5 * Portions copyright 2002 Charles M. Hannum <root@ihack.net>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: stable/10/libexec/rtld-elf/mips/reloc.c 309061 2016-11-23 17:48:43Z kib $");
33
34#include <sys/types.h>
35#include <sys/stat.h>
36#include <sys/endian.h>
37
38#include <stdlib.h>
39#include <string.h>
40#include <inttypes.h>
41
42#include <machine/sysarch.h>
43#include <machine/tls.h>
44
45#include "debug.h"
46#include "rtld.h"
47
48#ifdef __mips_n64
49#define	GOT1_MASK	0x8000000000000000UL
50#else
51#define	GOT1_MASK	0x80000000UL
52#endif
53
54void
55init_pltgot(Obj_Entry *obj)
56{
57	if (obj->pltgot != NULL) {
58		obj->pltgot[0] = (Elf_Addr) &_rtld_bind_start;
59		if (obj->pltgot[1] & 0x80000000)
60			obj->pltgot[1] = (Elf_Addr) obj | GOT1_MASK;
61	}
62}
63
64int
65do_copy_relocations(Obj_Entry *dstobj)
66{
67	/* Do nothing */
68	return 0;
69}
70
71void _rtld_relocate_nonplt_self(Elf_Dyn *, Elf_Addr);
72
73/*
74 * It is possible for the compiler to emit relocations for unaligned data.
75 * We handle this situation with these inlines.
76 */
77#ifdef __mips_n64
78/*
79 * ELF64 MIPS encodes the relocs uniquely.  The first 32-bits of info contain
80 * the symbol index.  The top 32-bits contain three relocation types encoded
81 * in big-endian integer with first relocation in LSB.  This means for little
82 * endian we have to byte swap that integer (r_type).
83 */
84#define	Elf_Sxword			Elf64_Sxword
85#define	ELF_R_NXTTYPE_64_P(r_type)	((((r_type) >> 8) & 0xff) == R_TYPE(64))
86#if BYTE_ORDER == LITTLE_ENDIAN
87#undef ELF_R_SYM
88#undef ELF_R_TYPE
89#define ELF_R_SYM(r_info)		((r_info) & 0xffffffff)
90#define ELF_R_TYPE(r_info)		bswap32((r_info) >> 32)
91#endif
92#else
93#define	ELF_R_NXTTYPE_64_P(r_type)	(0)
94#define	Elf_Sxword			Elf32_Sword
95#endif
96
97static __inline Elf_Sxword
98load_ptr(void *where, size_t len)
99{
100	Elf_Sxword val;
101
102	if (__predict_true(((uintptr_t)where & (len - 1)) == 0)) {
103#ifdef __mips_n64
104		if (len == sizeof(Elf_Sxword))
105			return *(Elf_Sxword *)where;
106#endif
107		return *(Elf_Sword *)where;
108	}
109
110	val = 0;
111#if BYTE_ORDER == LITTLE_ENDIAN
112	(void)memcpy(&val, where, len);
113#endif
114#if BYTE_ORDER == BIG_ENDIAN
115	(void)memcpy((uint8_t *)((&val)+1) - len, where, len);
116#endif
117	return (len == sizeof(Elf_Sxword)) ? val : (Elf_Sword)val;
118}
119
120static __inline void
121store_ptr(void *where, Elf_Sxword val, size_t len)
122{
123	if (__predict_true(((uintptr_t)where & (len - 1)) == 0)) {
124#ifdef __mips_n64
125		if (len == sizeof(Elf_Sxword)) {
126			*(Elf_Sxword *)where = val;
127			return;
128		}
129#endif
130		*(Elf_Sword *)where = val;
131		return;
132	}
133#if BYTE_ORDER == LITTLE_ENDIAN
134	(void)memcpy(where, &val, len);
135#endif
136#if BYTE_ORDER == BIG_ENDIAN
137	(void)memcpy(where, (const uint8_t *)((&val)+1) - len, len);
138#endif
139}
140
141void
142_rtld_relocate_nonplt_self(Elf_Dyn *dynp, Elf_Addr relocbase)
143{
144	const Elf_Rel *rel = 0, *rellim;
145	Elf_Addr relsz = 0;
146	const Elf_Sym *symtab = NULL, *sym;
147	Elf_Addr *where;
148	Elf_Addr *got = NULL;
149	Elf_Word local_gotno = 0, symtabno = 0, gotsym = 0;
150	size_t i;
151
152	for (; dynp->d_tag != DT_NULL; dynp++) {
153		switch (dynp->d_tag) {
154		case DT_REL:
155			rel = (const Elf_Rel *)(relocbase + dynp->d_un.d_ptr);
156			break;
157		case DT_RELSZ:
158			relsz = dynp->d_un.d_val;
159			break;
160		case DT_SYMTAB:
161			symtab = (const Elf_Sym *)(relocbase + dynp->d_un.d_ptr);
162			break;
163		case DT_PLTGOT:
164			got = (Elf_Addr *)(relocbase + dynp->d_un.d_ptr);
165			break;
166		case DT_MIPS_LOCAL_GOTNO:
167			local_gotno = dynp->d_un.d_val;
168			break;
169		case DT_MIPS_SYMTABNO:
170			symtabno = dynp->d_un.d_val;
171			break;
172		case DT_MIPS_GOTSYM:
173			gotsym = dynp->d_un.d_val;
174			break;
175		}
176	}
177
178	i = (got[1] & GOT1_MASK) ? 2 : 1;
179	/* Relocate the local GOT entries */
180	got += i;
181	for (; i < local_gotno; i++) {
182		*got++ += relocbase;
183	}
184
185	sym = symtab + gotsym;
186	/* Now do the global GOT entries */
187	for (i = gotsym; i < symtabno; i++) {
188		*got = sym->st_value + relocbase;
189		++sym;
190		++got;
191	}
192
193	rellim = (const Elf_Rel *)((caddr_t)rel + relsz);
194	for (; rel < rellim; rel++) {
195		Elf_Word r_symndx, r_type;
196
197		where = (void *)(relocbase + rel->r_offset);
198
199		r_symndx = ELF_R_SYM(rel->r_info);
200		r_type = ELF_R_TYPE(rel->r_info);
201
202		switch (r_type & 0xff) {
203		case R_TYPE(REL32): {
204			const size_t rlen =
205			    ELF_R_NXTTYPE_64_P(r_type)
206				? sizeof(Elf_Sxword)
207				: sizeof(Elf_Sword);
208			Elf_Sxword old = load_ptr(where, rlen);
209			Elf_Sxword val = old;
210#ifdef __mips_n64
211			assert(r_type == R_TYPE(REL32)
212			    || r_type == (R_TYPE(REL32)|(R_TYPE(64) << 8)));
213#endif
214			assert(r_symndx < gotsym);
215			sym = symtab + r_symndx;
216			assert(ELF_ST_BIND(sym->st_info) == STB_LOCAL);
217			val += relocbase;
218			store_ptr(where, val, sizeof(Elf_Sword));
219			dbg("REL32/L(%p) %p -> %p in <self>",
220			    where, (void *)old, (void *)val);
221			store_ptr(where, val, rlen);
222			break;
223		}
224
225		case R_TYPE(GPREL32):
226		case R_TYPE(NONE):
227			break;
228
229
230		default:
231			abort();
232			break;
233		}
234	}
235}
236
237Elf_Addr
238_mips_rtld_bind(Obj_Entry *obj, Elf_Size reloff)
239{
240        Elf_Addr *got = obj->pltgot;
241        const Elf_Sym *def;
242        const Obj_Entry *defobj;
243        Elf_Addr target;
244
245        def = find_symdef(reloff, obj, &defobj, SYMLOOK_IN_PLT, NULL,
246	    NULL);
247        if (def == NULL)
248		rtld_die();
249
250        target = (Elf_Addr)(defobj->relocbase + def->st_value);
251        dbg("bind now/fixup at %s sym # %jd in %s --> was=%p new=%p",
252	    obj->path,
253	    (intmax_t)reloff, defobj->strtab + def->st_name,
254	    (void *)got[obj->local_gotno + reloff - obj->gotsym],
255	    (void *)target);
256        got[obj->local_gotno + reloff - obj->gotsym] = target;
257	return (Elf_Addr)target;
258}
259
260int
261reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
262    RtldLockState *lockstate)
263{
264	const Elf_Rel *rel;
265	const Elf_Rel *rellim;
266	Elf_Addr *got = obj->pltgot;
267	const Elf_Sym *sym, *def;
268	const Obj_Entry *defobj;
269	Elf_Word i;
270#ifdef SUPPORT_OLD_BROKEN_LD
271	int broken;
272#endif
273
274	/* The relocation for the dynamic loader has already been done. */
275	if (obj == obj_rtld)
276		return (0);
277
278	if ((flags & SYMLOOK_IFUNC) != 0)
279		/* XXX not implemented */
280		return (0);
281
282#ifdef SUPPORT_OLD_BROKEN_LD
283	broken = 0;
284	sym = obj->symtab;
285	for (i = 1; i < 12; i++)
286		if (sym[i].st_info == ELF_ST_INFO(STB_LOCAL, STT_NOTYPE))
287			broken = 1;
288	dbg("%s: broken=%d", obj->path, broken);
289#endif
290
291	i = (got[1] & GOT1_MASK) ? 2 : 1;
292
293	/* Relocate the local GOT entries */
294	got += i;
295	dbg("got:%p for %d entries adding %p",
296	    got, obj->local_gotno, obj->relocbase);
297	for (; i < obj->local_gotno; i++) {
298		*got += (Elf_Addr)obj->relocbase;
299		got++;
300	}
301	sym = obj->symtab + obj->gotsym;
302
303	dbg("got:%p for %d entries",
304	    got, obj->symtabno);
305	/* Now do the global GOT entries */
306	for (i = obj->gotsym; i < obj->symtabno; i++) {
307		dbg(" doing got %d sym %p (%s, %lx)", i - obj->gotsym, sym,
308		    sym->st_name + obj->strtab, (u_long) *got);
309
310#ifdef SUPPORT_OLD_BROKEN_LD
311		if (ELF_ST_TYPE(sym->st_info) == STT_FUNC &&
312		    broken && sym->st_shndx == SHN_UNDEF) {
313			/*
314			 * XXX DANGER WILL ROBINSON!
315			 * You might think this is stupid, as it intentionally
316			 * defeats lazy binding -- and you'd be right.
317			 * Unfortunately, for lazy binding to work right, we
318			 * need to a way to force the GOT slots used for
319			 * function pointers to be resolved immediately.  This
320			 * is supposed to be done automatically by the linker,
321			 * by not outputting a PLT slot and setting st_value
322			 * to 0 if there are non-PLT references, but older
323			 * versions of GNU ld do not do this.
324			 */
325			def = find_symdef(i, obj, &defobj, flags, NULL,
326			    lockstate);
327			if (def == NULL)
328				return -1;
329			*got = def->st_value + (Elf_Addr)defobj->relocbase;
330		} else
331#endif
332		if (ELF_ST_TYPE(sym->st_info) == STT_FUNC &&
333		    sym->st_value != 0 && sym->st_shndx == SHN_UNDEF) {
334			/*
335			 * If there are non-PLT references to the function,
336			 * st_value should be 0, forcing us to resolve the
337			 * address immediately.
338			 *
339			 * XXX DANGER WILL ROBINSON!
340			 * The linker is not outputting PLT slots for calls to
341			 * functions that are defined in the same shared
342			 * library.  This is a bug, because it can screw up
343			 * link ordering rules if the symbol is defined in
344			 * more than one module.  For now, if there is a
345			 * definition, we fail the test above and force a full
346			 * symbol lookup.  This means that all intra-module
347			 * calls are bound immediately.  - mycroft, 2003/09/24
348			 */
349			*got = sym->st_value + (Elf_Addr)obj->relocbase;
350			if ((Elf_Addr)(*got) == (Elf_Addr)obj->relocbase) {
351				dbg("Warning2, i:%d maps to relocbase address:%p",
352				    i, obj->relocbase);
353			}
354
355		} else if (sym->st_info == ELF_ST_INFO(STB_GLOBAL, STT_SECTION)) {
356			/* Symbols with index SHN_ABS are not relocated. */
357			if (sym->st_shndx != SHN_ABS) {
358				*got = sym->st_value +
359				    (Elf_Addr)obj->relocbase;
360				if ((Elf_Addr)(*got) == (Elf_Addr)obj->relocbase) {
361					dbg("Warning3, i:%d maps to relocbase address:%p",
362					    i, obj->relocbase);
363				}
364			}
365		} else {
366			/* TODO: add cache here */
367			def = find_symdef(i, obj, &defobj, flags, NULL,
368			    lockstate);
369			if (def == NULL) {
370				dbg("Warning4, can't find symbole %d", i);
371				return -1;
372			}
373			*got = def->st_value + (Elf_Addr)defobj->relocbase;
374			if ((Elf_Addr)(*got) == (Elf_Addr)obj->relocbase) {
375				dbg("Warning4, i:%d maps to relocbase address:%p",
376				    i, obj->relocbase);
377				dbg("via first obj symbol %s",
378				    obj->strtab + obj->symtab[i].st_name);
379				dbg("found in obj %p:%s",
380				    defobj, defobj->path);
381			}
382		}
383
384		dbg("  --> now %lx", (u_long) *got);
385		++sym;
386		++got;
387	}
388
389	got = obj->pltgot;
390	rellim = (const Elf_Rel *)((caddr_t)obj->rel + obj->relsize);
391	for (rel = obj->rel; rel < rellim; rel++) {
392		Elf_Word	r_symndx, r_type;
393		void		*where;
394
395		where = obj->relocbase + rel->r_offset;
396		r_symndx = ELF_R_SYM(rel->r_info);
397		r_type = ELF_R_TYPE(rel->r_info);
398
399		switch (r_type & 0xff) {
400		case R_TYPE(NONE):
401			break;
402
403		case R_TYPE(REL32): {
404			/* 32-bit PC-relative reference */
405			const size_t rlen =
406			    ELF_R_NXTTYPE_64_P(r_type)
407				? sizeof(Elf_Sxword)
408				: sizeof(Elf_Sword);
409			Elf_Sxword old = load_ptr(where, rlen);
410			Elf_Sxword val = old;
411
412			def = obj->symtab + r_symndx;
413
414			if (r_symndx >= obj->gotsym) {
415				val += got[obj->local_gotno + r_symndx - obj->gotsym];
416				dbg("REL32/G(%p) %p --> %p (%s) in %s",
417				    where, (void *)old, (void *)val,
418				    obj->strtab + def->st_name,
419				    obj->path);
420			} else {
421				/*
422				 * XXX: ABI DIFFERENCE!
423				 *
424				 * Old NetBSD binutils would generate shared
425				 * libs with section-relative relocations being
426				 * already adjusted for the start address of
427				 * the section.
428				 *
429				 * New binutils, OTOH, generate shared libs
430				 * with the same relocations being based at
431				 * zero, so we need to add in the start address
432				 * of the section.
433				 *
434				 * --rkb, Oct 6, 2001
435				 */
436
437				if (def->st_info ==
438				    ELF_ST_INFO(STB_LOCAL, STT_SECTION)
439#ifdef SUPPORT_OLD_BROKEN_LD
440				    && !broken
441#endif
442				    )
443					val += (Elf_Addr)def->st_value;
444
445				val += (Elf_Addr)obj->relocbase;
446
447				dbg("REL32/L(%p) %p -> %p (%s) in %s",
448				    where, (void *)old, (void *)val,
449				    obj->strtab + def->st_name, obj->path);
450			}
451			store_ptr(where, val, rlen);
452			break;
453		}
454
455#ifdef __mips_n64
456		case R_TYPE(TLS_DTPMOD64):
457#else
458		case R_TYPE(TLS_DTPMOD32):
459#endif
460		{
461
462			const size_t rlen = sizeof(Elf_Addr);
463			Elf_Addr old = load_ptr(where, rlen);
464			Elf_Addr val = old;
465
466        		def = find_symdef(r_symndx, obj, &defobj, flags, NULL,
467	    			lockstate);
468			if (def == NULL)
469				return -1;
470
471			val += (Elf_Addr)defobj->tlsindex;
472
473			store_ptr(where, val, rlen);
474			dbg("DTPMOD %s in %s %p --> %p in %s",
475			    obj->strtab + obj->symtab[r_symndx].st_name,
476			    obj->path, (void *)old, (void*)val, defobj->path);
477			break;
478		}
479
480#ifdef __mips_n64
481		case R_TYPE(TLS_DTPREL64):
482#else
483		case R_TYPE(TLS_DTPREL32):
484#endif
485		{
486			const size_t rlen = sizeof(Elf_Addr);
487			Elf_Addr old = load_ptr(where, rlen);
488			Elf_Addr val = old;
489
490        		def = find_symdef(r_symndx, obj, &defobj, flags, NULL,
491	    			lockstate);
492			if (def == NULL)
493				return -1;
494
495			if (!defobj->tls_done && allocate_tls_offset(obj))
496				return -1;
497
498			val += (Elf_Addr)def->st_value - TLS_DTP_OFFSET;
499			store_ptr(where, val, rlen);
500
501			dbg("DTPREL %s in %s %p --> %p in %s",
502			    obj->strtab + obj->symtab[r_symndx].st_name,
503			    obj->path, (void*)old, (void *)val, defobj->path);
504			break;
505		}
506
507#ifdef __mips_n64
508		case R_TYPE(TLS_TPREL64):
509#else
510		case R_TYPE(TLS_TPREL32):
511#endif
512		{
513			const size_t rlen = sizeof(Elf_Addr);
514			Elf_Addr old = load_ptr(where, rlen);
515			Elf_Addr val = old;
516
517        		def = find_symdef(r_symndx, obj, &defobj, flags, NULL,
518	    			lockstate);
519
520			if (def == NULL)
521				return -1;
522
523			if (!defobj->tls_done && allocate_tls_offset(obj))
524				return -1;
525
526			val += (Elf_Addr)(def->st_value + defobj->tlsoffset
527			    - TLS_TP_OFFSET - TLS_TCB_SIZE);
528			store_ptr(where, val, rlen);
529
530			dbg("TPREL %s in %s %p --> %p in %s",
531			    obj->strtab + obj->symtab[r_symndx].st_name,
532			    obj->path, (void*)old, (void *)val, defobj->path);
533			break;
534		}
535
536
537
538		default:
539			dbg("sym = %lu, type = %lu, offset = %p, "
540			    "contents = %p, symbol = %s",
541			    (u_long)r_symndx, (u_long)ELF_R_TYPE(rel->r_info),
542			    (void *)rel->r_offset,
543			    (void *)load_ptr(where, sizeof(Elf_Sword)),
544			    obj->strtab + obj->symtab[r_symndx].st_name);
545			_rtld_error("%s: Unsupported relocation type %ld "
546			    "in non-PLT relocations",
547			    obj->path, (u_long) ELF_R_TYPE(rel->r_info));
548			return -1;
549		}
550	}
551
552	return 0;
553}
554
555/*
556 *  Process the PLT relocations.
557 */
558int
559reloc_plt(Obj_Entry *obj)
560{
561#if 0
562	const Elf_Rel *rellim;
563	const Elf_Rel *rel;
564
565	dbg("reloc_plt obj:%p pltrel:%p sz:%s", obj, obj->pltrel, (int)obj->pltrelsize);
566	dbg("gottable %p num syms:%s", obj->pltgot, obj->symtabno );
567	dbg("*****************************************************");
568	rellim = (const Elf_Rel *)((char *)obj->pltrel +
569	    obj->pltrelsize);
570	for (rel = obj->pltrel;  rel < rellim;  rel++) {
571		Elf_Addr *where;
572		where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
573		*where += (Elf_Addr )obj->relocbase;
574	}
575
576#endif
577	/* PLT fixups were done above in the GOT relocation. */
578	return (0);
579}
580
581/*
582 * LD_BIND_NOW was set - force relocation for all jump slots
583 */
584int
585reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
586{
587	/* Do nothing */
588	obj->jmpslots_done = true;
589
590	return (0);
591}
592
593int
594reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
595{
596
597	/* XXX not implemented */
598	return (0);
599}
600
601int
602reloc_gnu_ifunc(Obj_Entry *obj, int flags,
603    struct Struct_RtldLockState *lockstate)
604{
605
606	/* XXX not implemented */
607	return (0);
608}
609
610Elf_Addr
611reloc_jmpslot(Elf_Addr *where, Elf_Addr target, const Obj_Entry *defobj,
612    		const Obj_Entry *obj, const Elf_Rel *rel)
613{
614
615	/* Do nothing */
616
617	return target;
618}
619
620void
621ifunc_init(Elf_Auxinfo aux_info[__min_size(AT_COUNT)] __unused)
622{
623}
624
625void
626allocate_initial_tls(Obj_Entry *objs)
627{
628	char *tls;
629
630	/*
631	 * Fix the size of the static TLS block by using the maximum
632	 * offset allocated so far and adding a bit for dynamic modules to
633	 * use.
634	 */
635	tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA;
636
637	tls = (char *) allocate_tls(objs, NULL, TLS_TCB_SIZE, 8);
638
639	sysarch(MIPS_SET_TLS, tls);
640}
641
642void *
643__tls_get_addr(tls_index* ti)
644{
645	Elf_Addr** tls;
646	char *p;
647
648	sysarch(MIPS_GET_TLS, &tls);
649
650	p = tls_get_addr_common(tls, ti->ti_module, ti->ti_offset + TLS_DTP_OFFSET);
651
652	return (p);
653}
654