Deleted Added
full compact
reloc.c (172708) reloc.c (208256)
1/* $NetBSD: mdreloc.c,v 1.5 2001/04/25 12:24:51 kleink Exp $ */
2
3/*-
4 * Copyright (c) 2000 Eduardo Horvath.
5 * Copyright (c) 1999 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Paul Kranenburg.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40#include <sys/cdefs.h>
1/* $NetBSD: mdreloc.c,v 1.5 2001/04/25 12:24:51 kleink Exp $ */
2
3/*-
4 * Copyright (c) 2000 Eduardo Horvath.
5 * Copyright (c) 1999 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Paul Kranenburg.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40#include <sys/cdefs.h>
41__FBSDID("$FreeBSD: head/libexec/rtld-elf/sparc64/reloc.c 172708 2007-10-16 19:17:48Z marius $");
41__FBSDID("$FreeBSD: head/libexec/rtld-elf/sparc64/reloc.c 208256 2010-05-18 08:55:23Z rdivacky $");
42
43#include <sys/param.h>
44#include <sys/mman.h>
45
46#include <errno.h>
47#include <stdio.h>
48#include <stdlib.h>
49#include <string.h>
50#include <unistd.h>
51
52#include "debug.h"
53#include "rtld.h"
54
55/*
56 * The following table holds for each relocation type:
57 * - the width in bits of the memory location the relocation
58 * applies to (not currently used)
59 * - the number of bits the relocation value must be shifted to the
60 * right (i.e. discard least significant bits) to fit into
61 * the appropriate field in the instruction word.
62 * - flags indicating whether
63 * * the relocation involves a symbol
64 * * the relocation is relative to the current position
65 * * the relocation is for a GOT entry
66 * * the relocation is relative to the load address
67 *
68 */
69#define _RF_S 0x80000000 /* Resolve symbol */
70#define _RF_A 0x40000000 /* Use addend */
71#define _RF_P 0x20000000 /* Location relative */
72#define _RF_G 0x10000000 /* GOT offset */
73#define _RF_B 0x08000000 /* Load address relative */
74#define _RF_U 0x04000000 /* Unaligned */
75#define _RF_SZ(s) (((s) & 0xff) << 8) /* memory target size */
76#define _RF_RS(s) ( (s) & 0xff) /* right shift */
77static const int reloc_target_flags[] = {
78 0, /* NONE */
79 _RF_S|_RF_A| _RF_SZ(8) | _RF_RS(0), /* RELOC_8 */
80 _RF_S|_RF_A| _RF_SZ(16) | _RF_RS(0), /* RELOC_16 */
81 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* RELOC_32 */
82 _RF_S|_RF_A|_RF_P| _RF_SZ(8) | _RF_RS(0), /* DISP_8 */
83 _RF_S|_RF_A|_RF_P| _RF_SZ(16) | _RF_RS(0), /* DISP_16 */
84 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(0), /* DISP_32 */
85 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(2), /* WDISP_30 */
86 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(2), /* WDISP_22 */
87 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(10), /* HI22 */
88 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* 22 */
89 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* 13 */
90 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* LO10 */
91 _RF_G| _RF_SZ(32) | _RF_RS(0), /* GOT10 */
92 _RF_G| _RF_SZ(32) | _RF_RS(0), /* GOT13 */
93 _RF_G| _RF_SZ(32) | _RF_RS(10), /* GOT22 */
94 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(0), /* PC10 */
95 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(10), /* PC22 */
96 _RF_A|_RF_P| _RF_SZ(32) | _RF_RS(2), /* WPLT30 */
97 _RF_SZ(32) | _RF_RS(0), /* COPY */
98 _RF_S|_RF_A| _RF_SZ(64) | _RF_RS(0), /* GLOB_DAT */
99 _RF_SZ(32) | _RF_RS(0), /* JMP_SLOT */
100 _RF_A| _RF_B| _RF_SZ(64) | _RF_RS(0), /* RELATIVE */
101 _RF_S|_RF_A| _RF_U| _RF_SZ(32) | _RF_RS(0), /* UA_32 */
102
103 _RF_A| _RF_SZ(32) | _RF_RS(0), /* PLT32 */
104 _RF_A| _RF_SZ(32) | _RF_RS(10), /* HIPLT22 */
105 _RF_A| _RF_SZ(32) | _RF_RS(0), /* LOPLT10 */
106 _RF_A|_RF_P| _RF_SZ(32) | _RF_RS(0), /* PCPLT32 */
107 _RF_A|_RF_P| _RF_SZ(32) | _RF_RS(10), /* PCPLT22 */
108 _RF_A|_RF_P| _RF_SZ(32) | _RF_RS(0), /* PCPLT10 */
109 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* 10 */
110 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* 11 */
111 _RF_S|_RF_A| _RF_SZ(64) | _RF_RS(0), /* 64 */
112 _RF_S|_RF_A|/*extra*/ _RF_SZ(32) | _RF_RS(0), /* OLO10 */
113 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(42), /* HH22 */
114 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(32), /* HM10 */
115 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(10), /* LM22 */
116 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(42), /* PC_HH22 */
117 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(32), /* PC_HM10 */
118 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(10), /* PC_LM22 */
119 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(2), /* WDISP16 */
120 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(2), /* WDISP19 */
121 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* GLOB_JMP */
122 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* 7 */
123 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* 5 */
124 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* 6 */
125 _RF_S|_RF_A|_RF_P| _RF_SZ(64) | _RF_RS(0), /* DISP64 */
126 _RF_A| _RF_SZ(64) | _RF_RS(0), /* PLT64 */
127 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(10), /* HIX22 */
128 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* LOX10 */
129 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(22), /* H44 */
130 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(12), /* M44 */
131 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* L44 */
132 _RF_S|_RF_A| _RF_SZ(64) | _RF_RS(0), /* REGISTER */
133 _RF_S|_RF_A| _RF_U| _RF_SZ(64) | _RF_RS(0), /* UA64 */
134 _RF_S|_RF_A| _RF_U| _RF_SZ(16) | _RF_RS(0), /* UA16 */
135};
136
137#if 0
138static const char *reloc_names[] = {
139 "NONE", "RELOC_8", "RELOC_16", "RELOC_32", "DISP_8",
140 "DISP_16", "DISP_32", "WDISP_30", "WDISP_22", "HI22",
141 "22", "13", "LO10", "GOT10", "GOT13",
142 "GOT22", "PC10", "PC22", "WPLT30", "COPY",
143 "GLOB_DAT", "JMP_SLOT", "RELATIVE", "UA_32", "PLT32",
144 "HIPLT22", "LOPLT10", "LOPLT10", "PCPLT22", "PCPLT32",
145 "10", "11", "64", "OLO10", "HH22",
146 "HM10", "LM22", "PC_HH22", "PC_HM10", "PC_LM22",
147 "WDISP16", "WDISP19", "GLOB_JMP", "7", "5", "6",
148 "DISP64", "PLT64", "HIX22", "LOX10", "H44", "M44",
149 "L44", "REGISTER", "UA64", "UA16"
150};
151#endif
152
153#define RELOC_RESOLVE_SYMBOL(t) ((reloc_target_flags[t] & _RF_S) != 0)
154#define RELOC_PC_RELATIVE(t) ((reloc_target_flags[t] & _RF_P) != 0)
155#define RELOC_BASE_RELATIVE(t) ((reloc_target_flags[t] & _RF_B) != 0)
156#define RELOC_UNALIGNED(t) ((reloc_target_flags[t] & _RF_U) != 0)
157#define RELOC_USE_ADDEND(t) ((reloc_target_flags[t] & _RF_A) != 0)
158#define RELOC_TARGET_SIZE(t) ((reloc_target_flags[t] >> 8) & 0xff)
159#define RELOC_VALUE_RIGHTSHIFT(t) (reloc_target_flags[t] & 0xff)
160
161static const long reloc_target_bitmask[] = {
162#define _BM(x) (~(-(1ULL << (x))))
163 0, /* NONE */
164 _BM(8), _BM(16), _BM(32), /* RELOC_8, _16, _32 */
165 _BM(8), _BM(16), _BM(32), /* DISP8, DISP16, DISP32 */
166 _BM(30), _BM(22), /* WDISP30, WDISP22 */
167 _BM(22), _BM(22), /* HI22, _22 */
168 _BM(13), _BM(10), /* RELOC_13, _LO10 */
169 _BM(10), _BM(13), _BM(22), /* GOT10, GOT13, GOT22 */
170 _BM(10), _BM(22), /* _PC10, _PC22 */
171 _BM(30), 0, /* _WPLT30, _COPY */
172 _BM(32), _BM(32), _BM(32), /* _GLOB_DAT, JMP_SLOT, _RELATIVE */
173 _BM(32), _BM(32), /* _UA32, PLT32 */
174 _BM(22), _BM(10), /* _HIPLT22, LOPLT10 */
175 _BM(32), _BM(22), _BM(10), /* _PCPLT32, _PCPLT22, _PCPLT10 */
176 _BM(10), _BM(11), -1, /* _10, _11, _64 */
177 _BM(13), _BM(22), /* _OLO10, _HH22 */
178 _BM(10), _BM(22), /* _HM10, _LM22 */
179 _BM(22), _BM(10), _BM(22), /* _PC_HH22, _PC_HM10, _PC_LM22 */
180 _BM(16), _BM(19), /* _WDISP16, _WDISP19 */
181 -1, /* GLOB_JMP */
182 _BM(7), _BM(5), _BM(6), /* _7, _5, _6 */
183 -1, -1, /* DISP64, PLT64 */
184 _BM(22), _BM(13), /* HIX22, LOX10 */
185 _BM(22), _BM(10), _BM(13), /* H44, M44, L44 */
186 -1, -1, _BM(16), /* REGISTER, UA64, UA16 */
187#undef _BM
188};
189#define RELOC_VALUE_BITMASK(t) (reloc_target_bitmask[t])
190
191#undef flush
192#define flush(va, offs) \
193 __asm __volatile("flush %0 + %1" : : "r" (va), "I" (offs));
194
195static int reloc_nonplt_object(Obj_Entry *obj, const Elf_Rela *rela,
196 SymCache *cache);
197static void install_plt(Elf_Word *pltgot, Elf_Addr proc);
198
199extern char _rtld_bind_start_0[];
200extern char _rtld_bind_start_1[];
201
202int
203do_copy_relocations(Obj_Entry *dstobj)
204{
205 const Elf_Rela *relalim;
206 const Elf_Rela *rela;
207 const Elf_Sym *dstsym;
208 const Elf_Sym *srcsym;
209 const Ver_Entry *ve;
210 void *dstaddr;
211 const void *srcaddr;
212 Obj_Entry *srcobj;
213 unsigned long hash;
214 const char *name;
215 size_t size;
216
217 assert(dstobj->mainprog); /* COPY relocations are invalid elsewhere */
218
219 relalim = (const Elf_Rela *)((caddr_t)dstobj->rela + dstobj->relasize);
220 for (rela = dstobj->rela; rela < relalim; rela++) {
221 if (ELF_R_TYPE(rela->r_info) == R_SPARC_COPY) {
222 dstaddr = (void *)(dstobj->relocbase + rela->r_offset);
223 dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info);
224 name = dstobj->strtab + dstsym->st_name;
225 hash = elf_hash(name);
226 size = dstsym->st_size;
227 ve = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info));
228
229 for (srcobj = dstobj->next; srcobj != NULL;
230 srcobj = srcobj->next)
231 if ((srcsym = symlook_obj(name, hash, srcobj,
232 ve, 0)) != NULL)
233 break;
234
235 if (srcobj == NULL) {
236 _rtld_error("Undefined symbol \"%s\""
237 "referenced from COPY relocation"
238 "in %s", name, dstobj->path);
239 return (-1);
240 }
241
242 srcaddr = (const void *)(srcobj->relocbase +
243 srcsym->st_value);
244 memcpy(dstaddr, srcaddr, size);
245 }
246 }
247
248 return (0);
249}
250
251int
252reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld)
253{
254 const Elf_Rela *relalim;
255 const Elf_Rela *rela;
256 SymCache *cache;
42
43#include <sys/param.h>
44#include <sys/mman.h>
45
46#include <errno.h>
47#include <stdio.h>
48#include <stdlib.h>
49#include <string.h>
50#include <unistd.h>
51
52#include "debug.h"
53#include "rtld.h"
54
55/*
56 * The following table holds for each relocation type:
57 * - the width in bits of the memory location the relocation
58 * applies to (not currently used)
59 * - the number of bits the relocation value must be shifted to the
60 * right (i.e. discard least significant bits) to fit into
61 * the appropriate field in the instruction word.
62 * - flags indicating whether
63 * * the relocation involves a symbol
64 * * the relocation is relative to the current position
65 * * the relocation is for a GOT entry
66 * * the relocation is relative to the load address
67 *
68 */
69#define _RF_S 0x80000000 /* Resolve symbol */
70#define _RF_A 0x40000000 /* Use addend */
71#define _RF_P 0x20000000 /* Location relative */
72#define _RF_G 0x10000000 /* GOT offset */
73#define _RF_B 0x08000000 /* Load address relative */
74#define _RF_U 0x04000000 /* Unaligned */
75#define _RF_SZ(s) (((s) & 0xff) << 8) /* memory target size */
76#define _RF_RS(s) ( (s) & 0xff) /* right shift */
77static const int reloc_target_flags[] = {
78 0, /* NONE */
79 _RF_S|_RF_A| _RF_SZ(8) | _RF_RS(0), /* RELOC_8 */
80 _RF_S|_RF_A| _RF_SZ(16) | _RF_RS(0), /* RELOC_16 */
81 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* RELOC_32 */
82 _RF_S|_RF_A|_RF_P| _RF_SZ(8) | _RF_RS(0), /* DISP_8 */
83 _RF_S|_RF_A|_RF_P| _RF_SZ(16) | _RF_RS(0), /* DISP_16 */
84 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(0), /* DISP_32 */
85 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(2), /* WDISP_30 */
86 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(2), /* WDISP_22 */
87 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(10), /* HI22 */
88 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* 22 */
89 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* 13 */
90 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* LO10 */
91 _RF_G| _RF_SZ(32) | _RF_RS(0), /* GOT10 */
92 _RF_G| _RF_SZ(32) | _RF_RS(0), /* GOT13 */
93 _RF_G| _RF_SZ(32) | _RF_RS(10), /* GOT22 */
94 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(0), /* PC10 */
95 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(10), /* PC22 */
96 _RF_A|_RF_P| _RF_SZ(32) | _RF_RS(2), /* WPLT30 */
97 _RF_SZ(32) | _RF_RS(0), /* COPY */
98 _RF_S|_RF_A| _RF_SZ(64) | _RF_RS(0), /* GLOB_DAT */
99 _RF_SZ(32) | _RF_RS(0), /* JMP_SLOT */
100 _RF_A| _RF_B| _RF_SZ(64) | _RF_RS(0), /* RELATIVE */
101 _RF_S|_RF_A| _RF_U| _RF_SZ(32) | _RF_RS(0), /* UA_32 */
102
103 _RF_A| _RF_SZ(32) | _RF_RS(0), /* PLT32 */
104 _RF_A| _RF_SZ(32) | _RF_RS(10), /* HIPLT22 */
105 _RF_A| _RF_SZ(32) | _RF_RS(0), /* LOPLT10 */
106 _RF_A|_RF_P| _RF_SZ(32) | _RF_RS(0), /* PCPLT32 */
107 _RF_A|_RF_P| _RF_SZ(32) | _RF_RS(10), /* PCPLT22 */
108 _RF_A|_RF_P| _RF_SZ(32) | _RF_RS(0), /* PCPLT10 */
109 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* 10 */
110 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* 11 */
111 _RF_S|_RF_A| _RF_SZ(64) | _RF_RS(0), /* 64 */
112 _RF_S|_RF_A|/*extra*/ _RF_SZ(32) | _RF_RS(0), /* OLO10 */
113 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(42), /* HH22 */
114 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(32), /* HM10 */
115 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(10), /* LM22 */
116 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(42), /* PC_HH22 */
117 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(32), /* PC_HM10 */
118 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(10), /* PC_LM22 */
119 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(2), /* WDISP16 */
120 _RF_S|_RF_A|_RF_P| _RF_SZ(32) | _RF_RS(2), /* WDISP19 */
121 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* GLOB_JMP */
122 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* 7 */
123 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* 5 */
124 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* 6 */
125 _RF_S|_RF_A|_RF_P| _RF_SZ(64) | _RF_RS(0), /* DISP64 */
126 _RF_A| _RF_SZ(64) | _RF_RS(0), /* PLT64 */
127 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(10), /* HIX22 */
128 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* LOX10 */
129 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(22), /* H44 */
130 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(12), /* M44 */
131 _RF_S|_RF_A| _RF_SZ(32) | _RF_RS(0), /* L44 */
132 _RF_S|_RF_A| _RF_SZ(64) | _RF_RS(0), /* REGISTER */
133 _RF_S|_RF_A| _RF_U| _RF_SZ(64) | _RF_RS(0), /* UA64 */
134 _RF_S|_RF_A| _RF_U| _RF_SZ(16) | _RF_RS(0), /* UA16 */
135};
136
137#if 0
138static const char *reloc_names[] = {
139 "NONE", "RELOC_8", "RELOC_16", "RELOC_32", "DISP_8",
140 "DISP_16", "DISP_32", "WDISP_30", "WDISP_22", "HI22",
141 "22", "13", "LO10", "GOT10", "GOT13",
142 "GOT22", "PC10", "PC22", "WPLT30", "COPY",
143 "GLOB_DAT", "JMP_SLOT", "RELATIVE", "UA_32", "PLT32",
144 "HIPLT22", "LOPLT10", "LOPLT10", "PCPLT22", "PCPLT32",
145 "10", "11", "64", "OLO10", "HH22",
146 "HM10", "LM22", "PC_HH22", "PC_HM10", "PC_LM22",
147 "WDISP16", "WDISP19", "GLOB_JMP", "7", "5", "6",
148 "DISP64", "PLT64", "HIX22", "LOX10", "H44", "M44",
149 "L44", "REGISTER", "UA64", "UA16"
150};
151#endif
152
153#define RELOC_RESOLVE_SYMBOL(t) ((reloc_target_flags[t] & _RF_S) != 0)
154#define RELOC_PC_RELATIVE(t) ((reloc_target_flags[t] & _RF_P) != 0)
155#define RELOC_BASE_RELATIVE(t) ((reloc_target_flags[t] & _RF_B) != 0)
156#define RELOC_UNALIGNED(t) ((reloc_target_flags[t] & _RF_U) != 0)
157#define RELOC_USE_ADDEND(t) ((reloc_target_flags[t] & _RF_A) != 0)
158#define RELOC_TARGET_SIZE(t) ((reloc_target_flags[t] >> 8) & 0xff)
159#define RELOC_VALUE_RIGHTSHIFT(t) (reloc_target_flags[t] & 0xff)
160
161static const long reloc_target_bitmask[] = {
162#define _BM(x) (~(-(1ULL << (x))))
163 0, /* NONE */
164 _BM(8), _BM(16), _BM(32), /* RELOC_8, _16, _32 */
165 _BM(8), _BM(16), _BM(32), /* DISP8, DISP16, DISP32 */
166 _BM(30), _BM(22), /* WDISP30, WDISP22 */
167 _BM(22), _BM(22), /* HI22, _22 */
168 _BM(13), _BM(10), /* RELOC_13, _LO10 */
169 _BM(10), _BM(13), _BM(22), /* GOT10, GOT13, GOT22 */
170 _BM(10), _BM(22), /* _PC10, _PC22 */
171 _BM(30), 0, /* _WPLT30, _COPY */
172 _BM(32), _BM(32), _BM(32), /* _GLOB_DAT, JMP_SLOT, _RELATIVE */
173 _BM(32), _BM(32), /* _UA32, PLT32 */
174 _BM(22), _BM(10), /* _HIPLT22, LOPLT10 */
175 _BM(32), _BM(22), _BM(10), /* _PCPLT32, _PCPLT22, _PCPLT10 */
176 _BM(10), _BM(11), -1, /* _10, _11, _64 */
177 _BM(13), _BM(22), /* _OLO10, _HH22 */
178 _BM(10), _BM(22), /* _HM10, _LM22 */
179 _BM(22), _BM(10), _BM(22), /* _PC_HH22, _PC_HM10, _PC_LM22 */
180 _BM(16), _BM(19), /* _WDISP16, _WDISP19 */
181 -1, /* GLOB_JMP */
182 _BM(7), _BM(5), _BM(6), /* _7, _5, _6 */
183 -1, -1, /* DISP64, PLT64 */
184 _BM(22), _BM(13), /* HIX22, LOX10 */
185 _BM(22), _BM(10), _BM(13), /* H44, M44, L44 */
186 -1, -1, _BM(16), /* REGISTER, UA64, UA16 */
187#undef _BM
188};
189#define RELOC_VALUE_BITMASK(t) (reloc_target_bitmask[t])
190
191#undef flush
192#define flush(va, offs) \
193 __asm __volatile("flush %0 + %1" : : "r" (va), "I" (offs));
194
195static int reloc_nonplt_object(Obj_Entry *obj, const Elf_Rela *rela,
196 SymCache *cache);
197static void install_plt(Elf_Word *pltgot, Elf_Addr proc);
198
199extern char _rtld_bind_start_0[];
200extern char _rtld_bind_start_1[];
201
202int
203do_copy_relocations(Obj_Entry *dstobj)
204{
205 const Elf_Rela *relalim;
206 const Elf_Rela *rela;
207 const Elf_Sym *dstsym;
208 const Elf_Sym *srcsym;
209 const Ver_Entry *ve;
210 void *dstaddr;
211 const void *srcaddr;
212 Obj_Entry *srcobj;
213 unsigned long hash;
214 const char *name;
215 size_t size;
216
217 assert(dstobj->mainprog); /* COPY relocations are invalid elsewhere */
218
219 relalim = (const Elf_Rela *)((caddr_t)dstobj->rela + dstobj->relasize);
220 for (rela = dstobj->rela; rela < relalim; rela++) {
221 if (ELF_R_TYPE(rela->r_info) == R_SPARC_COPY) {
222 dstaddr = (void *)(dstobj->relocbase + rela->r_offset);
223 dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info);
224 name = dstobj->strtab + dstsym->st_name;
225 hash = elf_hash(name);
226 size = dstsym->st_size;
227 ve = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info));
228
229 for (srcobj = dstobj->next; srcobj != NULL;
230 srcobj = srcobj->next)
231 if ((srcsym = symlook_obj(name, hash, srcobj,
232 ve, 0)) != NULL)
233 break;
234
235 if (srcobj == NULL) {
236 _rtld_error("Undefined symbol \"%s\""
237 "referenced from COPY relocation"
238 "in %s", name, dstobj->path);
239 return (-1);
240 }
241
242 srcaddr = (const void *)(srcobj->relocbase +
243 srcsym->st_value);
244 memcpy(dstaddr, srcaddr, size);
245 }
246 }
247
248 return (0);
249}
250
251int
252reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld)
253{
254 const Elf_Rela *relalim;
255 const Elf_Rela *rela;
256 SymCache *cache;
257 int bytes = obj->nchains * sizeof(SymCache);
258 int r = -1;
259
260 /*
261 * The dynamic loader may be called from a thread, we have
262 * limited amounts of stack available so we cannot use alloca().
263 */
264 if (obj != obj_rtld) {
257 int r = -1;
258
259 /*
260 * The dynamic loader may be called from a thread, we have
261 * limited amounts of stack available so we cannot use alloca().
262 */
263 if (obj != obj_rtld) {
265 cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON,
266 -1, 0);
267 if (cache == MAP_FAILED)
268 cache = NULL;
264 cache = calloc(obj->nchains, sizeof(SymCache));
265 /* No need to check for NULL here */
269 } else
270 cache = NULL;
271
272 relalim = (const Elf_Rela *)((caddr_t)obj->rela + obj->relasize);
273 for (rela = obj->rela; rela < relalim; rela++) {
274 if (reloc_nonplt_object(obj, rela, cache) < 0)
275 goto done;
276 }
277 r = 0;
278done:
266 } else
267 cache = NULL;
268
269 relalim = (const Elf_Rela *)((caddr_t)obj->rela + obj->relasize);
270 for (rela = obj->rela; rela < relalim; rela++) {
271 if (reloc_nonplt_object(obj, rela, cache) < 0)
272 goto done;
273 }
274 r = 0;
275done:
279 if (cache)
280 munmap(cache, bytes);
276 if (cache != NULL)
277 free(cache);
281 return (r);
282}
283
284static int
285reloc_nonplt_object(Obj_Entry *obj, const Elf_Rela *rela, SymCache *cache)
286{
287 const Obj_Entry *defobj;
288 const Elf_Sym *def;
289 Elf_Addr *where;
290 Elf_Word *where32;
291 Elf_Word type;
292 Elf_Addr value;
293 Elf_Addr mask;
294
295 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
296 where32 = (Elf_Word *)where;
297 defobj = NULL;
298 def = NULL;
299
300 type = ELF64_R_TYPE_ID(rela->r_info);
301 if (type == R_SPARC_NONE)
302 return (0);
303
304 /* We do JMP_SLOTs below */
305 if (type == R_SPARC_JMP_SLOT)
306 return (0);
307
308 /* COPY relocs are also handled elsewhere */
309 if (type == R_SPARC_COPY)
310 return (0);
311
312 /*
313 * Note: R_SPARC_UA16 must be numerically largest relocation type.
314 */
315 if (type >= sizeof(reloc_target_bitmask) /
316 sizeof(*reloc_target_bitmask))
317 return (-1);
318
319 value = rela->r_addend;
320
321 /*
322 * Handle relative relocs here, because we might not
323 * be able to access globals yet.
324 */
325 if (type == R_SPARC_RELATIVE) {
326 /* XXXX -- apparently we ignore the preexisting value */
327 *where = (Elf_Addr)(obj->relocbase + value);
328 return (0);
329 }
330
331 /*
332 * If we get here while relocating rtld itself, we will crash because
333 * a non-local variable is accessed.
334 */
335 if (RELOC_RESOLVE_SYMBOL(type)) {
336
337 /* Find the symbol */
338 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
339 false, cache);
340 if (def == NULL)
341 return (-1);
342
343 /* Add in the symbol's absolute address */
344 value += (Elf_Addr)(defobj->relocbase + def->st_value);
345 }
346
347 if (type == R_SPARC_OLO10)
348 value = (value & 0x3ff) + ELF64_R_TYPE_DATA(rela->r_info);
349
350 if (RELOC_PC_RELATIVE(type))
351 value -= (Elf_Addr)where;
352
353 if (RELOC_BASE_RELATIVE(type)) {
354 /*
355 * Note that even though sparcs use `Elf_rela' exclusively
356 * we still need the implicit memory addend in relocations
357 * referring to GOT entries. Undoubtedly, someone f*cked
358 * this up in the distant past, and now we're stuck with
359 * it in the name of compatibility for all eternity..
360 *
361 * In any case, the implicit and explicit should be mutually
362 * exclusive. We provide a check for that here.
363 */
364 /* XXXX -- apparently we ignore the preexisting value */
365 value += (Elf_Addr)(obj->relocbase);
366 }
367
368 mask = RELOC_VALUE_BITMASK(type);
369 value >>= RELOC_VALUE_RIGHTSHIFT(type);
370 value &= mask;
371
372 if (RELOC_UNALIGNED(type)) {
373 /* Handle unaligned relocations. */
374 Elf_Addr tmp;
375 char *ptr;
376 int size;
377 int i;
378
379 size = RELOC_TARGET_SIZE(type) / 8;
380 ptr = (char *)where;
381 tmp = 0;
382
383 /* Read it in one byte at a time. */
384 for (i = 0; i < size; i++)
385 tmp = (tmp << 8) | ptr[i];
386
387 tmp &= ~mask;
388 tmp |= value;
389
390 /* Write it back out. */
391 for (i = 0; i < size; i++)
392 ptr[i] = ((tmp >> ((size - i - 1) * 8)) & 0xff);
393 } else if (RELOC_TARGET_SIZE(type) > 32) {
394 *where &= ~mask;
395 *where |= value;
396 } else {
397 *where32 &= ~mask;
398 *where32 |= value;
399 }
400
401 return (0);
402}
403
404int
405reloc_plt(Obj_Entry *obj)
406{
407#if 0
408 const Obj_Entry *defobj;
409 const Elf_Rela *relalim;
410 const Elf_Rela *rela;
411 const Elf_Sym *def;
412 Elf_Addr *where;
413 Elf_Addr value;
414
415 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize);
416 for (rela = obj->pltrela; rela < relalim; rela++) {
417 if (rela->r_addend == 0)
418 continue;
419 assert(ELF64_R_TYPE_ID(rela->r_info) == R_SPARC_JMP_SLOT);
420 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
421 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
422 true, NULL);
423 value = (Elf_Addr)(defobj->relocbase + def->st_value);
424 *where = value;
425 }
426#endif
427 return (0);
428}
429
430/*
431 * Instruction templates:
432 */
433#define BAA 0x10400000 /* ba,a %xcc, 0 */
434#define SETHI 0x03000000 /* sethi %hi(0), %g1 */
435#define JMP 0x81c06000 /* jmpl %g1+%lo(0), %g0 */
436#define NOP 0x01000000 /* sethi %hi(0), %g0 */
437#define OR 0x82806000 /* or %g1, 0, %g1 */
438#define XOR 0x82c06000 /* xor %g1, 0, %g1 */
439#define MOV71 0x8283a000 /* or %o7, 0, %g1 */
440#define MOV17 0x9c806000 /* or %g1, 0, %o7 */
441#define CALL 0x40000000 /* call 0 */
442#define SLLX 0x8b407000 /* sllx %g1, 0, %g1 */
443#define SETHIG5 0x0b000000 /* sethi %hi(0), %g5 */
444#define ORG5 0x82804005 /* or %g1, %g5, %g1 */
445
446
447/* %hi(v) with variable shift */
448#define HIVAL(v, s) (((v) >> (s)) & 0x003fffff)
449#define LOVAL(v) ((v) & 0x000003ff)
450
451int
452reloc_jmpslots(Obj_Entry *obj)
453{
454 const Obj_Entry *defobj;
455 const Elf_Rela *relalim;
456 const Elf_Rela *rela;
457 const Elf_Sym *def;
458 Elf_Addr *where;
459 Elf_Addr target;
460
461 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize);
462 for (rela = obj->pltrela; rela < relalim; rela++) {
463 assert(ELF64_R_TYPE_ID(rela->r_info) == R_SPARC_JMP_SLOT);
464 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
465 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
466 true, NULL);
467 if (def == NULL)
468 return -1;
469 target = (Elf_Addr)(defobj->relocbase + def->st_value);
470 reloc_jmpslot(where, target, defobj, obj, (Elf_Rel *)rela);
471 }
472 obj->jmpslots_done = true;
473 return (0);
474}
475
476Elf_Addr
477reloc_jmpslot(Elf_Addr *wherep, Elf_Addr target, const Obj_Entry *obj,
478 const Obj_Entry *refobj, const Elf_Rel *rel)
479{
480 const Elf_Rela *rela = (const Elf_Rela *)rel;
481 Elf_Addr offset;
482 Elf_Word *where;
483
484 if (rela - refobj->pltrela < 32764) {
485 /*
486 * At the PLT entry pointed at by `where', we now construct
487 * a direct transfer to the now fully resolved function
488 * address.
489 *
490 * A PLT entry is supposed to start by looking like this:
491 *
492 * sethi (. - .PLT0), %g1
493 * ba,a %xcc, .PLT1
494 * nop
495 * nop
496 * nop
497 * nop
498 * nop
499 * nop
500 *
501 * When we replace these entries we start from the second
502 * entry and do it in reverse order so the last thing we
503 * do is replace the branch. That allows us to change this
504 * atomically.
505 *
506 * We now need to find out how far we need to jump. We
507 * have a choice of several different relocation techniques
508 * which are increasingly expensive.
509 */
510 where = (Elf_Word *)wherep;
511 offset = ((Elf_Addr)where) - target;
512 if (offset <= (1L<<20) && offset >= -(1L<<20)) {
513 /*
514 * We're within 1MB -- we can use a direct branch insn.
515 *
516 * We can generate this pattern:
517 *
518 * sethi %hi(. - .PLT0), %g1
519 * ba,a %xcc, addr
520 * nop
521 * nop
522 * nop
523 * nop
524 * nop
525 * nop
526 *
527 */
528 where[1] = BAA | ((offset >> 2) &0x3fffff);
529 flush(where, 4);
530 } else if (target >= 0 && target < (1L<<32)) {
531 /*
532 * We're withing 32-bits of address zero.
533 *
534 * The resulting code in the jump slot is:
535 *
536 * sethi %hi(. - .PLT0), %g1
537 * sethi %hi(addr), %g1
538 * jmp %g1+%lo(addr)
539 * nop
540 * nop
541 * nop
542 * nop
543 * nop
544 *
545 */
546 where[2] = JMP | LOVAL(target);
547 flush(where, 8);
548 where[1] = SETHI | HIVAL(target, 10);
549 flush(where, 4);
550 } else if (target <= 0 && target > -(1L<<32)) {
551 /*
552 * We're withing 32-bits of address -1.
553 *
554 * The resulting code in the jump slot is:
555 *
556 * sethi %hi(. - .PLT0), %g1
557 * sethi %hix(addr), %g1
558 * xor %g1, %lox(addr), %g1
559 * jmp %g1
560 * nop
561 * nop
562 * nop
563 * nop
564 *
565 */
566 where[3] = JMP;
567 flush(where, 12);
568 where[2] = XOR | ((~target) & 0x00001fff);
569 flush(where, 8);
570 where[1] = SETHI | HIVAL(~target, 10);
571 flush(where, 4);
572 } else if (offset <= (1L<<32) && offset >= -((1L<<32) - 4)) {
573 /*
574 * We're withing 32-bits -- we can use a direct call
575 * insn
576 *
577 * The resulting code in the jump slot is:
578 *
579 * sethi %hi(. - .PLT0), %g1
580 * mov %o7, %g1
581 * call (.+offset)
582 * mov %g1, %o7
583 * nop
584 * nop
585 * nop
586 * nop
587 *
588 */
589 where[3] = MOV17;
590 flush(where, 12);
591 where[2] = CALL | ((offset >> 4) & 0x3fffffff);
592 flush(where, 8);
593 where[1] = MOV71;
594 flush(where, 4);
595 } else if (offset >= 0 && offset < (1L<<44)) {
596 /*
597 * We're withing 44 bits. We can generate this pattern:
598 *
599 * The resulting code in the jump slot is:
600 *
601 * sethi %hi(. - .PLT0), %g1
602 * sethi %h44(addr), %g1
603 * or %g1, %m44(addr), %g1
604 * sllx %g1, 12, %g1
605 * jmp %g1+%l44(addr)
606 * nop
607 * nop
608 * nop
609 *
610 */
611 where[4] = JMP | LOVAL(offset);
612 flush(where, 16);
613 where[3] = SLLX | 12;
614 flush(where, 12);
615 where[2] = OR | (((offset) >> 12) & 0x00001fff);
616 flush(where, 8);
617 where[1] = SETHI | HIVAL(offset, 22);
618 flush(where, 4);
619 } else if (offset < 0 && offset > -(1L<<44)) {
620 /*
621 * We're withing 44 bits. We can generate this pattern:
622 *
623 * The resulting code in the jump slot is:
624 *
625 * sethi %hi(. - .PLT0), %g1
626 * sethi %h44(-addr), %g1
627 * xor %g1, %m44(-addr), %g1
628 * sllx %g1, 12, %g1
629 * jmp %g1+%l44(addr)
630 * nop
631 * nop
632 * nop
633 *
634 */
635 where[4] = JMP | LOVAL(offset);
636 flush(where, 16);
637 where[3] = SLLX | 12;
638 flush(where, 12);
639 where[2] = XOR | (((~offset) >> 12) & 0x00001fff);
640 flush(where, 8);
641 where[1] = SETHI | HIVAL(~offset, 22);
642 flush(where, 4);
643 } else {
644 /*
645 * We need to load all 64-bits
646 *
647 * The resulting code in the jump slot is:
648 *
649 * sethi %hi(. - .PLT0), %g1
650 * sethi %hh(addr), %g1
651 * sethi %lm(addr), %g5
652 * or %g1, %hm(addr), %g1
653 * sllx %g1, 32, %g1
654 * or %g1, %g5, %g1
655 * jmp %g1+%lo(addr)
656 * nop
657 *
658 */
659 where[6] = JMP | LOVAL(target);
660 flush(where, 24);
661 where[5] = ORG5;
662 flush(where, 20);
663 where[4] = SLLX | 32;
664 flush(where, 16);
665 where[3] = OR | LOVAL((target) >> 32);
666 flush(where, 12);
667 where[2] = SETHIG5 | HIVAL(target, 10);
668 flush(where, 8);
669 where[1] = SETHI | HIVAL(target, 42);
670 flush(where, 4);
671 }
672 } else {
673 /*
674 * This is a high PLT slot; the relocation offset specifies a
675 * pointer that needs to be frobbed; no actual code needs to
676 * be modified. The pointer to be calculated needs the addend
677 * added and the reference object relocation base subtraced.
678 */
679 *wherep = target + rela->r_addend -
680 (Elf_Addr)refobj->relocbase;
681 }
682
683 return (target);
684}
685
686/*
687 * Install rtld function call into this PLT slot.
688 */
689#define SAVE 0x9de3bf50
690#define SETHI_l0 0x21000000
691#define SETHI_l1 0x23000000
692#define OR_l0_l0 0xa0142000
693#define SLLX_l0_32_l0 0xa12c3020
694#define OR_l0_l1_l0 0xa0140011
695#define JMPL_l0_o1 0x93c42000
696#define MOV_g1_o0 0x90100001
697
698void
699init_pltgot(Obj_Entry *obj)
700{
701 Elf_Word *entry;
702
703 if (obj->pltgot != NULL) {
704 entry = (Elf_Word *)obj->pltgot;
705 install_plt(&entry[0], (Elf_Addr)_rtld_bind_start_0);
706 install_plt(&entry[8], (Elf_Addr)_rtld_bind_start_1);
707 obj->pltgot[8] = (Elf_Addr)obj;
708 }
709}
710
711static void
712install_plt(Elf_Word *pltgot, Elf_Addr proc)
713{
714 pltgot[0] = SAVE;
715 flush(pltgot, 0);
716 pltgot[1] = SETHI_l0 | HIVAL(proc, 42);
717 flush(pltgot, 4);
718 pltgot[2] = SETHI_l1 | HIVAL(proc, 10);
719 flush(pltgot, 8);
720 pltgot[3] = OR_l0_l0 | LOVAL((proc) >> 32);
721 flush(pltgot, 12);
722 pltgot[4] = SLLX_l0_32_l0;
723 flush(pltgot, 16);
724 pltgot[5] = OR_l0_l1_l0;
725 flush(pltgot, 20);
726 pltgot[6] = JMPL_l0_o1 | LOVAL(proc);
727 flush(pltgot, 24);
728 pltgot[7] = MOV_g1_o0;
729 flush(pltgot, 28);
730}
731
732void
733allocate_initial_tls(Obj_Entry *objs)
734{
735 Elf_Addr* tpval;
736
737 /*
738 * Fix the size of the static TLS block by using the maximum
739 * offset allocated so far and adding a bit for dynamic modules to
740 * use.
741 */
742 tls_static_space = tls_last_offset + RTLD_STATIC_TLS_EXTRA;
743 tpval = allocate_tls(objs, NULL, 3*sizeof(Elf_Addr), sizeof(Elf_Addr));
744 __asm __volatile("mov %0, %%g7" : : "r" (tpval));
745}
746
747void *__tls_get_addr(tls_index *ti)
748{
749 register Elf_Addr** tp __asm__("%g7");
750
751 return tls_get_addr_common(tp, ti->ti_module, ti->ti_offset);
752}
278 return (r);
279}
280
281static int
282reloc_nonplt_object(Obj_Entry *obj, const Elf_Rela *rela, SymCache *cache)
283{
284 const Obj_Entry *defobj;
285 const Elf_Sym *def;
286 Elf_Addr *where;
287 Elf_Word *where32;
288 Elf_Word type;
289 Elf_Addr value;
290 Elf_Addr mask;
291
292 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
293 where32 = (Elf_Word *)where;
294 defobj = NULL;
295 def = NULL;
296
297 type = ELF64_R_TYPE_ID(rela->r_info);
298 if (type == R_SPARC_NONE)
299 return (0);
300
301 /* We do JMP_SLOTs below */
302 if (type == R_SPARC_JMP_SLOT)
303 return (0);
304
305 /* COPY relocs are also handled elsewhere */
306 if (type == R_SPARC_COPY)
307 return (0);
308
309 /*
310 * Note: R_SPARC_UA16 must be numerically largest relocation type.
311 */
312 if (type >= sizeof(reloc_target_bitmask) /
313 sizeof(*reloc_target_bitmask))
314 return (-1);
315
316 value = rela->r_addend;
317
318 /*
319 * Handle relative relocs here, because we might not
320 * be able to access globals yet.
321 */
322 if (type == R_SPARC_RELATIVE) {
323 /* XXXX -- apparently we ignore the preexisting value */
324 *where = (Elf_Addr)(obj->relocbase + value);
325 return (0);
326 }
327
328 /*
329 * If we get here while relocating rtld itself, we will crash because
330 * a non-local variable is accessed.
331 */
332 if (RELOC_RESOLVE_SYMBOL(type)) {
333
334 /* Find the symbol */
335 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
336 false, cache);
337 if (def == NULL)
338 return (-1);
339
340 /* Add in the symbol's absolute address */
341 value += (Elf_Addr)(defobj->relocbase + def->st_value);
342 }
343
344 if (type == R_SPARC_OLO10)
345 value = (value & 0x3ff) + ELF64_R_TYPE_DATA(rela->r_info);
346
347 if (RELOC_PC_RELATIVE(type))
348 value -= (Elf_Addr)where;
349
350 if (RELOC_BASE_RELATIVE(type)) {
351 /*
352 * Note that even though sparcs use `Elf_rela' exclusively
353 * we still need the implicit memory addend in relocations
354 * referring to GOT entries. Undoubtedly, someone f*cked
355 * this up in the distant past, and now we're stuck with
356 * it in the name of compatibility for all eternity..
357 *
358 * In any case, the implicit and explicit should be mutually
359 * exclusive. We provide a check for that here.
360 */
361 /* XXXX -- apparently we ignore the preexisting value */
362 value += (Elf_Addr)(obj->relocbase);
363 }
364
365 mask = RELOC_VALUE_BITMASK(type);
366 value >>= RELOC_VALUE_RIGHTSHIFT(type);
367 value &= mask;
368
369 if (RELOC_UNALIGNED(type)) {
370 /* Handle unaligned relocations. */
371 Elf_Addr tmp;
372 char *ptr;
373 int size;
374 int i;
375
376 size = RELOC_TARGET_SIZE(type) / 8;
377 ptr = (char *)where;
378 tmp = 0;
379
380 /* Read it in one byte at a time. */
381 for (i = 0; i < size; i++)
382 tmp = (tmp << 8) | ptr[i];
383
384 tmp &= ~mask;
385 tmp |= value;
386
387 /* Write it back out. */
388 for (i = 0; i < size; i++)
389 ptr[i] = ((tmp >> ((size - i - 1) * 8)) & 0xff);
390 } else if (RELOC_TARGET_SIZE(type) > 32) {
391 *where &= ~mask;
392 *where |= value;
393 } else {
394 *where32 &= ~mask;
395 *where32 |= value;
396 }
397
398 return (0);
399}
400
401int
402reloc_plt(Obj_Entry *obj)
403{
404#if 0
405 const Obj_Entry *defobj;
406 const Elf_Rela *relalim;
407 const Elf_Rela *rela;
408 const Elf_Sym *def;
409 Elf_Addr *where;
410 Elf_Addr value;
411
412 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize);
413 for (rela = obj->pltrela; rela < relalim; rela++) {
414 if (rela->r_addend == 0)
415 continue;
416 assert(ELF64_R_TYPE_ID(rela->r_info) == R_SPARC_JMP_SLOT);
417 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
418 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
419 true, NULL);
420 value = (Elf_Addr)(defobj->relocbase + def->st_value);
421 *where = value;
422 }
423#endif
424 return (0);
425}
426
427/*
428 * Instruction templates:
429 */
430#define BAA 0x10400000 /* ba,a %xcc, 0 */
431#define SETHI 0x03000000 /* sethi %hi(0), %g1 */
432#define JMP 0x81c06000 /* jmpl %g1+%lo(0), %g0 */
433#define NOP 0x01000000 /* sethi %hi(0), %g0 */
434#define OR 0x82806000 /* or %g1, 0, %g1 */
435#define XOR 0x82c06000 /* xor %g1, 0, %g1 */
436#define MOV71 0x8283a000 /* or %o7, 0, %g1 */
437#define MOV17 0x9c806000 /* or %g1, 0, %o7 */
438#define CALL 0x40000000 /* call 0 */
439#define SLLX 0x8b407000 /* sllx %g1, 0, %g1 */
440#define SETHIG5 0x0b000000 /* sethi %hi(0), %g5 */
441#define ORG5 0x82804005 /* or %g1, %g5, %g1 */
442
443
444/* %hi(v) with variable shift */
445#define HIVAL(v, s) (((v) >> (s)) & 0x003fffff)
446#define LOVAL(v) ((v) & 0x000003ff)
447
448int
449reloc_jmpslots(Obj_Entry *obj)
450{
451 const Obj_Entry *defobj;
452 const Elf_Rela *relalim;
453 const Elf_Rela *rela;
454 const Elf_Sym *def;
455 Elf_Addr *where;
456 Elf_Addr target;
457
458 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize);
459 for (rela = obj->pltrela; rela < relalim; rela++) {
460 assert(ELF64_R_TYPE_ID(rela->r_info) == R_SPARC_JMP_SLOT);
461 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
462 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
463 true, NULL);
464 if (def == NULL)
465 return -1;
466 target = (Elf_Addr)(defobj->relocbase + def->st_value);
467 reloc_jmpslot(where, target, defobj, obj, (Elf_Rel *)rela);
468 }
469 obj->jmpslots_done = true;
470 return (0);
471}
472
473Elf_Addr
474reloc_jmpslot(Elf_Addr *wherep, Elf_Addr target, const Obj_Entry *obj,
475 const Obj_Entry *refobj, const Elf_Rel *rel)
476{
477 const Elf_Rela *rela = (const Elf_Rela *)rel;
478 Elf_Addr offset;
479 Elf_Word *where;
480
481 if (rela - refobj->pltrela < 32764) {
482 /*
483 * At the PLT entry pointed at by `where', we now construct
484 * a direct transfer to the now fully resolved function
485 * address.
486 *
487 * A PLT entry is supposed to start by looking like this:
488 *
489 * sethi (. - .PLT0), %g1
490 * ba,a %xcc, .PLT1
491 * nop
492 * nop
493 * nop
494 * nop
495 * nop
496 * nop
497 *
498 * When we replace these entries we start from the second
499 * entry and do it in reverse order so the last thing we
500 * do is replace the branch. That allows us to change this
501 * atomically.
502 *
503 * We now need to find out how far we need to jump. We
504 * have a choice of several different relocation techniques
505 * which are increasingly expensive.
506 */
507 where = (Elf_Word *)wherep;
508 offset = ((Elf_Addr)where) - target;
509 if (offset <= (1L<<20) && offset >= -(1L<<20)) {
510 /*
511 * We're within 1MB -- we can use a direct branch insn.
512 *
513 * We can generate this pattern:
514 *
515 * sethi %hi(. - .PLT0), %g1
516 * ba,a %xcc, addr
517 * nop
518 * nop
519 * nop
520 * nop
521 * nop
522 * nop
523 *
524 */
525 where[1] = BAA | ((offset >> 2) &0x3fffff);
526 flush(where, 4);
527 } else if (target >= 0 && target < (1L<<32)) {
528 /*
529 * We're withing 32-bits of address zero.
530 *
531 * The resulting code in the jump slot is:
532 *
533 * sethi %hi(. - .PLT0), %g1
534 * sethi %hi(addr), %g1
535 * jmp %g1+%lo(addr)
536 * nop
537 * nop
538 * nop
539 * nop
540 * nop
541 *
542 */
543 where[2] = JMP | LOVAL(target);
544 flush(where, 8);
545 where[1] = SETHI | HIVAL(target, 10);
546 flush(where, 4);
547 } else if (target <= 0 && target > -(1L<<32)) {
548 /*
549 * We're withing 32-bits of address -1.
550 *
551 * The resulting code in the jump slot is:
552 *
553 * sethi %hi(. - .PLT0), %g1
554 * sethi %hix(addr), %g1
555 * xor %g1, %lox(addr), %g1
556 * jmp %g1
557 * nop
558 * nop
559 * nop
560 * nop
561 *
562 */
563 where[3] = JMP;
564 flush(where, 12);
565 where[2] = XOR | ((~target) & 0x00001fff);
566 flush(where, 8);
567 where[1] = SETHI | HIVAL(~target, 10);
568 flush(where, 4);
569 } else if (offset <= (1L<<32) && offset >= -((1L<<32) - 4)) {
570 /*
571 * We're withing 32-bits -- we can use a direct call
572 * insn
573 *
574 * The resulting code in the jump slot is:
575 *
576 * sethi %hi(. - .PLT0), %g1
577 * mov %o7, %g1
578 * call (.+offset)
579 * mov %g1, %o7
580 * nop
581 * nop
582 * nop
583 * nop
584 *
585 */
586 where[3] = MOV17;
587 flush(where, 12);
588 where[2] = CALL | ((offset >> 4) & 0x3fffffff);
589 flush(where, 8);
590 where[1] = MOV71;
591 flush(where, 4);
592 } else if (offset >= 0 && offset < (1L<<44)) {
593 /*
594 * We're withing 44 bits. We can generate this pattern:
595 *
596 * The resulting code in the jump slot is:
597 *
598 * sethi %hi(. - .PLT0), %g1
599 * sethi %h44(addr), %g1
600 * or %g1, %m44(addr), %g1
601 * sllx %g1, 12, %g1
602 * jmp %g1+%l44(addr)
603 * nop
604 * nop
605 * nop
606 *
607 */
608 where[4] = JMP | LOVAL(offset);
609 flush(where, 16);
610 where[3] = SLLX | 12;
611 flush(where, 12);
612 where[2] = OR | (((offset) >> 12) & 0x00001fff);
613 flush(where, 8);
614 where[1] = SETHI | HIVAL(offset, 22);
615 flush(where, 4);
616 } else if (offset < 0 && offset > -(1L<<44)) {
617 /*
618 * We're withing 44 bits. We can generate this pattern:
619 *
620 * The resulting code in the jump slot is:
621 *
622 * sethi %hi(. - .PLT0), %g1
623 * sethi %h44(-addr), %g1
624 * xor %g1, %m44(-addr), %g1
625 * sllx %g1, 12, %g1
626 * jmp %g1+%l44(addr)
627 * nop
628 * nop
629 * nop
630 *
631 */
632 where[4] = JMP | LOVAL(offset);
633 flush(where, 16);
634 where[3] = SLLX | 12;
635 flush(where, 12);
636 where[2] = XOR | (((~offset) >> 12) & 0x00001fff);
637 flush(where, 8);
638 where[1] = SETHI | HIVAL(~offset, 22);
639 flush(where, 4);
640 } else {
641 /*
642 * We need to load all 64-bits
643 *
644 * The resulting code in the jump slot is:
645 *
646 * sethi %hi(. - .PLT0), %g1
647 * sethi %hh(addr), %g1
648 * sethi %lm(addr), %g5
649 * or %g1, %hm(addr), %g1
650 * sllx %g1, 32, %g1
651 * or %g1, %g5, %g1
652 * jmp %g1+%lo(addr)
653 * nop
654 *
655 */
656 where[6] = JMP | LOVAL(target);
657 flush(where, 24);
658 where[5] = ORG5;
659 flush(where, 20);
660 where[4] = SLLX | 32;
661 flush(where, 16);
662 where[3] = OR | LOVAL((target) >> 32);
663 flush(where, 12);
664 where[2] = SETHIG5 | HIVAL(target, 10);
665 flush(where, 8);
666 where[1] = SETHI | HIVAL(target, 42);
667 flush(where, 4);
668 }
669 } else {
670 /*
671 * This is a high PLT slot; the relocation offset specifies a
672 * pointer that needs to be frobbed; no actual code needs to
673 * be modified. The pointer to be calculated needs the addend
674 * added and the reference object relocation base subtraced.
675 */
676 *wherep = target + rela->r_addend -
677 (Elf_Addr)refobj->relocbase;
678 }
679
680 return (target);
681}
682
683/*
684 * Install rtld function call into this PLT slot.
685 */
686#define SAVE 0x9de3bf50
687#define SETHI_l0 0x21000000
688#define SETHI_l1 0x23000000
689#define OR_l0_l0 0xa0142000
690#define SLLX_l0_32_l0 0xa12c3020
691#define OR_l0_l1_l0 0xa0140011
692#define JMPL_l0_o1 0x93c42000
693#define MOV_g1_o0 0x90100001
694
695void
696init_pltgot(Obj_Entry *obj)
697{
698 Elf_Word *entry;
699
700 if (obj->pltgot != NULL) {
701 entry = (Elf_Word *)obj->pltgot;
702 install_plt(&entry[0], (Elf_Addr)_rtld_bind_start_0);
703 install_plt(&entry[8], (Elf_Addr)_rtld_bind_start_1);
704 obj->pltgot[8] = (Elf_Addr)obj;
705 }
706}
707
708static void
709install_plt(Elf_Word *pltgot, Elf_Addr proc)
710{
711 pltgot[0] = SAVE;
712 flush(pltgot, 0);
713 pltgot[1] = SETHI_l0 | HIVAL(proc, 42);
714 flush(pltgot, 4);
715 pltgot[2] = SETHI_l1 | HIVAL(proc, 10);
716 flush(pltgot, 8);
717 pltgot[3] = OR_l0_l0 | LOVAL((proc) >> 32);
718 flush(pltgot, 12);
719 pltgot[4] = SLLX_l0_32_l0;
720 flush(pltgot, 16);
721 pltgot[5] = OR_l0_l1_l0;
722 flush(pltgot, 20);
723 pltgot[6] = JMPL_l0_o1 | LOVAL(proc);
724 flush(pltgot, 24);
725 pltgot[7] = MOV_g1_o0;
726 flush(pltgot, 28);
727}
728
729void
730allocate_initial_tls(Obj_Entry *objs)
731{
732 Elf_Addr* tpval;
733
734 /*
735 * Fix the size of the static TLS block by using the maximum
736 * offset allocated so far and adding a bit for dynamic modules to
737 * use.
738 */
739 tls_static_space = tls_last_offset + RTLD_STATIC_TLS_EXTRA;
740 tpval = allocate_tls(objs, NULL, 3*sizeof(Elf_Addr), sizeof(Elf_Addr));
741 __asm __volatile("mov %0, %%g7" : : "r" (tpval));
742}
743
744void *__tls_get_addr(tls_index *ti)
745{
746 register Elf_Addr** tp __asm__("%g7");
747
748 return tls_get_addr_common(tp, ti->ti_module, ti->ti_offset);
749}