amd64_elf.c revision 11827:d7ef53deac3f
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 * amd64 machine dependent and ELF file class dependent functions.
29 * Contains routines for performing function binding and symbol relocations.
30 */
31
32#include	<stdio.h>
33#include	<sys/elf.h>
34#include	<sys/elf_amd64.h>
35#include	<sys/mman.h>
36#include	<dlfcn.h>
37#include	<synch.h>
38#include	<string.h>
39#include	<debug.h>
40#include	<reloc.h>
41#include	<conv.h>
42#include	"_rtld.h"
43#include	"_audit.h"
44#include	"_elf.h"
45#include	"_inline.h"
46#include	"msg.h"
47
48extern void	elf_rtbndr(Rt_map *, ulong_t, caddr_t);
49
50int
51elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr)
52{
53	/*
54	 * Check machine type and flags.
55	 */
56	if (ehdr->e_flags != 0) {
57		rej->rej_type = SGS_REJ_BADFLAG;
58		rej->rej_info = (uint_t)ehdr->e_flags;
59		return (0);
60	}
61	return (1);
62}
63
64void
65ldso_plt_init(Rt_map *lmp)
66{
67	/*
68	 * There is no need to analyze ld.so because we don't map in any of
69	 * its dependencies.  However we may map these dependencies in later
70	 * (as if ld.so had dlopened them), so initialize the plt and the
71	 * permission information.
72	 */
73	if (PLTGOT(lmp))
74		elf_plt_init((PLTGOT(lmp)), (caddr_t)lmp);
75}
76
77static const uchar_t dyn_plt_template[] = {
78/* 0x00 */  0x55,			/* pushq %rbp */
79/* 0x01 */  0x48, 0x89, 0xe5,		/* movq  %rsp, %rbp */
80/* 0x04 */  0x48, 0x83, 0xec, 0x10,	/* subq	 $0x10, %rsp */
81/* 0x08 */  0x4c, 0x8d, 0x1d, 0x00,	/* leaq  trace_fields(%rip), %r11 */
82		0x00, 0x00, 0x00,
83/* 0x0f */  0x4c, 0x89, 0x5d, 0xf8,	/* movq  %r11, -0x8(%rbp) */
84/* 0x13 */  0x49, 0xbb, 0x00, 0x00, 	/* movq  $elf_plt_trace, %r11 */
85		0x00, 0x00, 0x00,
86		0x00, 0x00, 0x00,
87/* 0x1d */  0x41, 0xff, 0xe3		/* jmp   *%r11 */
88/* 0x20 */
89};
90
91/*
92 * And the virutal outstanding relocations against the
93 * above block are:
94 *
95 *	reloc		offset	Addend	symbol
96 *	R_AMD64_PC32	0x0b	-4	trace_fields
97 *	R_AMD64_64	0x15	0	elf_plt_trace
98 */
99
100#define	TRCREL1OFF	0x0b
101#define	TRCREL2OFF	0x15
102
103int	dyn_plt_ent_size = sizeof (dyn_plt_template);
104
105/*
106 * the dynamic plt entry is:
107 *
108 *	pushq	%rbp
109 *	movq	%rsp, %rbp
110 *	subq	$0x10, %rsp
111 *	leaq	trace_fields(%rip), %r11
112 *	movq	%r11, -0x8(%rbp)
113 *	movq	$elf_plt_trace, %r11
114 *	jmp	*%r11
115 * dyn_data:
116 *	.align  8
117 *	uintptr_t	reflmp
118 *	uintptr_t	deflmp
119 *	uint_t		symndx
120 *	uint_t		sb_flags
121 *	Sym		symdef
122 */
123static caddr_t
124elf_plt_trace_write(ulong_t roffset, Rt_map *rlmp, Rt_map *dlmp, Sym *sym,
125    uint_t symndx, uint_t pltndx, caddr_t to, uint_t sb_flags, int *fail)
126{
127	extern int	elf_plt_trace();
128	ulong_t		got_entry;
129	uchar_t		*dyn_plt;
130	uintptr_t	*dyndata;
131
132	/*
133	 * We only need to add the glue code if there is an auditing
134	 * library that is interested in this binding.
135	 */
136	dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts +
137	    (pltndx * dyn_plt_ent_size));
138
139	/*
140	 * Have we initialized this dynamic plt entry yet?  If we haven't do it
141	 * now.  Otherwise this function has been called before, but from a
142	 * different plt (ie. from another shared object).  In that case
143	 * we just set the plt to point to the new dyn_plt.
144	 */
145	if (*dyn_plt == 0) {
146		Sym	*symp;
147		Xword	symvalue;
148		Lm_list	*lml = LIST(rlmp);
149
150		(void) memcpy((void *)dyn_plt, dyn_plt_template,
151		    sizeof (dyn_plt_template));
152		dyndata = (uintptr_t *)((uintptr_t)dyn_plt +
153		    ROUND(sizeof (dyn_plt_template), M_WORD_ALIGN));
154
155		/*
156		 * relocate:
157		 *	leaq	trace_fields(%rip), %r11
158		 *	R_AMD64_PC32	0x0b	-4	trace_fields
159		 */
160		symvalue = (Xword)((uintptr_t)dyndata -
161		    (uintptr_t)(&dyn_plt[TRCREL1OFF]) - 4);
162		if (do_reloc_rtld(R_AMD64_PC32, &dyn_plt[TRCREL1OFF],
163		    &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA),
164		    MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
165			*fail = 1;
166			return (0);
167		}
168
169		/*
170		 * relocating:
171		 *	movq	$elf_plt_trace, %r11
172		 *	R_AMD64_64	0x15	0	elf_plt_trace
173		 */
174		symvalue = (Xword)elf_plt_trace;
175		if (do_reloc_rtld(R_AMD64_64, &dyn_plt[TRCREL2OFF],
176		    &symvalue, MSG_ORIG(MSG_SYM_ELFPLTTRACE),
177		    MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
178			*fail = 1;
179			return (0);
180		}
181
182		*dyndata++ = (uintptr_t)rlmp;
183		*dyndata++ = (uintptr_t)dlmp;
184		*dyndata = (uintptr_t)(((uint64_t)sb_flags << 32) | symndx);
185		dyndata++;
186		symp = (Sym *)dyndata;
187		*symp = *sym;
188		symp->st_value = (Addr)to;
189	}
190
191	got_entry = (ulong_t)roffset;
192	*(ulong_t *)got_entry = (ulong_t)dyn_plt;
193	return ((caddr_t)dyn_plt);
194}
195
196/*
197 * Function binding routine - invoked on the first call to a function through
198 * the procedure linkage table;
199 * passes first through an assembly language interface.
200 *
201 * Takes the offset into the relocation table of the associated
202 * relocation entry and the address of the link map (rt_private_map struct)
203 * for the entry.
204 *
205 * Returns the address of the function referenced after re-writing the PLT
206 * entry to invoke the function directly.
207 *
208 * On error, causes process to terminate with a signal.
209 */
210ulong_t
211elf_bndr(Rt_map *lmp, ulong_t pltndx, caddr_t from)
212{
213	Rt_map		*nlmp, *llmp;
214	ulong_t		addr, reloff, symval, rsymndx;
215	char		*name;
216	Rela		*rptr;
217	Sym		*rsym, *nsym;
218	uint_t		binfo, sb_flags = 0, dbg_class;
219	Slookup		sl;
220	Sresult		sr;
221	int		entry, lmflags;
222	Lm_list		*lml;
223
224	/*
225	 * For compatibility with libthread (TI_VERSION 1) we track the entry
226	 * value.  A zero value indicates we have recursed into ld.so.1 to
227	 * further process a locking request.  Under this recursion we disable
228	 * tsort and cleanup activities.
229	 */
230	entry = enter(0);
231
232	lml = LIST(lmp);
233	if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) {
234		dbg_class = dbg_desc->d_class;
235		dbg_desc->d_class = 0;
236	}
237
238	/*
239	 * Perform some basic sanity checks.  If we didn't get a load map or
240	 * the relocation offset is invalid then its possible someone has walked
241	 * over the .got entries or jumped to plt0 out of the blue.
242	 */
243	if ((!lmp) && (pltndx <=
244	    (ulong_t)PLTRELSZ(lmp) / (ulong_t)RELENT(lmp))) {
245		Conv_inv_buf_t inv_buf;
246
247		eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF),
248		    conv_reloc_amd64_type(R_AMD64_JUMP_SLOT, 0, &inv_buf),
249		    EC_NATPTR(lmp), EC_XWORD(pltndx), EC_NATPTR(from));
250		rtldexit(lml, 1);
251	}
252	reloff = pltndx * (ulong_t)RELENT(lmp);
253
254	/*
255	 * Use relocation entry to get symbol table entry and symbol name.
256	 */
257	addr = (ulong_t)JMPREL(lmp);
258	rptr = (Rela *)(addr + reloff);
259	rsymndx = ELF_R_SYM(rptr->r_info);
260	rsym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp)));
261	name = (char *)(STRTAB(lmp) + rsym->st_name);
262
263	/*
264	 * Determine the last link-map of this list, this'll be the starting
265	 * point for any tsort() processing.
266	 */
267	llmp = lml->lm_tail;
268
269	/*
270	 * Find definition for symbol.  Initialize the symbol lookup, and
271	 * symbol result, data structures.
272	 */
273	SLOOKUP_INIT(sl, name, lmp, lml->lm_head, ld_entry_cnt, 0,
274	    rsymndx, rsym, 0, LKUP_DEFT);
275	SRESULT_INIT(sr, name);
276
277	if (lookup_sym(&sl, &sr, &binfo, NULL) == 0) {
278		eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp),
279		    demangle(name));
280		rtldexit(lml, 1);
281	}
282
283	name = (char *)sr.sr_name;
284	nlmp = sr.sr_dmap;
285	nsym = sr.sr_sym;
286
287	symval = nsym->st_value;
288
289	if (!(FLAGS(nlmp) & FLG_RT_FIXED) &&
290	    (nsym->st_shndx != SHN_ABS))
291		symval += ADDR(nlmp);
292	if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) {
293		/*
294		 * Record that this new link map is now bound to the caller.
295		 */
296		if (bind_one(lmp, nlmp, BND_REFER) == 0)
297			rtldexit(lml, 1);
298	}
299
300	if ((lml->lm_tflags | AFLAGS(lmp)) & LML_TFLG_AUD_SYMBIND) {
301		uint_t	symndx = (((uintptr_t)nsym -
302		    (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
303		symval = audit_symbind(lmp, nlmp, nsym, symndx, symval,
304		    &sb_flags);
305	}
306
307	if (!(rtld_flags & RT_FL_NOBIND)) {
308		addr = rptr->r_offset;
309		if (!(FLAGS(lmp) & FLG_RT_FIXED))
310			addr += ADDR(lmp);
311		if (((lml->lm_tflags | AFLAGS(lmp)) &
312		    (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
313		    AUDINFO(lmp)->ai_dynplts) {
314			int	fail = 0;
315			uint_t	pltndx = reloff / sizeof (Rela);
316			uint_t	symndx = (((uintptr_t)nsym -
317			    (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
318
319			symval = (ulong_t)elf_plt_trace_write(addr, lmp, nlmp,
320			    nsym, symndx, pltndx, (caddr_t)symval, sb_flags,
321			    &fail);
322			if (fail)
323				rtldexit(lml, 1);
324		} else {
325			/*
326			 * Write standard PLT entry to jump directly
327			 * to newly bound function.
328			 */
329			*(ulong_t *)addr = symval;
330		}
331	}
332
333	/*
334	 * Print binding information and rebuild PLT entry.
335	 */
336	DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)),
337	    (Xword)(reloff / sizeof (Rela)), PLT_T_FULL, nlmp, (Addr)symval,
338	    nsym->st_value, name, binfo));
339
340	/*
341	 * Complete any processing for newly loaded objects.  Note we don't
342	 * know exactly where any new objects are loaded (we know the object
343	 * that supplied the symbol, but others may have been loaded lazily as
344	 * we searched for the symbol), so sorting starts from the last
345	 * link-map know on entry to this routine.
346	 */
347	if (entry)
348		load_completion(llmp);
349
350	/*
351	 * Some operations like dldump() or dlopen()'ing a relocatable object
352	 * result in objects being loaded on rtld's link-map, make sure these
353	 * objects are initialized also.
354	 */
355	if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init)
356		load_completion(nlmp);
357
358	/*
359	 * Make sure the object to which we've bound has had it's .init fired.
360	 * Cleanup before return to user code.
361	 */
362	if (entry) {
363		is_dep_init(nlmp, lmp);
364		leave(lml, 0);
365	}
366
367	if (lmflags & LML_FLG_RTLDLM)
368		dbg_desc->d_class = dbg_class;
369
370	return (symval);
371}
372
373/*
374 * Read and process the relocations for one link object, we assume all
375 * relocation sections for loadable segments are stored contiguously in
376 * the file.
377 */
378int
379elf_reloc(Rt_map *lmp, uint_t plt, int *in_nfavl, APlist **textrel)
380{
381	ulong_t		relbgn, relend, relsiz, basebgn;
382	ulong_t		pltbgn, pltend, _pltbgn, _pltend;
383	ulong_t		roffset, rsymndx, psymndx = 0;
384	ulong_t		dsymndx;
385	uchar_t		rtype;
386	long		reladd, value, pvalue;
387	Sym		*symref, *psymref, *symdef, *psymdef;
388	char		*name, *pname;
389	Rt_map		*_lmp, *plmp;
390	int		ret = 1, noplt = 0;
391	int		relacount = RELACOUNT(lmp), plthint = 0;
392	Rela		*rel;
393	uint_t		binfo, pbinfo;
394	APlist		*bound = NULL;
395
396	/*
397	 * Although only necessary for lazy binding, initialize the first
398	 * global offset entry to go to elf_rtbndr().  dbx(1) seems
399	 * to find this useful.
400	 */
401	if ((plt == 0) && PLTGOT(lmp)) {
402		mmapobj_result_t	*mpp;
403
404		/*
405		 * Make sure the segment is writable.
406		 */
407		if ((((mpp =
408		    find_segment((caddr_t)PLTGOT(lmp), lmp)) != NULL) &&
409		    ((mpp->mr_prot & PROT_WRITE) == 0)) &&
410		    ((set_prot(lmp, mpp, 1) == 0) ||
411		    (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL)))
412			return (0);
413
414		elf_plt_init(PLTGOT(lmp), (caddr_t)lmp);
415	}
416
417	/*
418	 * Initialize the plt start and end addresses.
419	 */
420	if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0)
421		pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp));
422
423
424	relsiz = (ulong_t)(RELENT(lmp));
425	basebgn = ADDR(lmp);
426
427	if (PLTRELSZ(lmp))
428		plthint = PLTRELSZ(lmp) / relsiz;
429
430	/*
431	 * If we've been called upon to promote an RTLD_LAZY object to an
432	 * RTLD_NOW then we're only interested in scaning the .plt table.
433	 * An uninitialized .plt is the case where the associated got entry
434	 * points back to the plt itself.  Determine the range of the real .plt
435	 * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol.
436	 */
437	if (plt) {
438		Slookup	sl;
439		Sresult	sr;
440
441		relbgn = pltbgn;
442		relend = pltend;
443		if (!relbgn || (relbgn == relend))
444			return (1);
445
446		/*
447		 * Initialize the symbol lookup, and symbol result, data
448		 * structures.
449		 */
450		SLOOKUP_INIT(sl, MSG_ORIG(MSG_SYM_PLT), lmp, lmp, ld_entry_cnt,
451		    elf_hash(MSG_ORIG(MSG_SYM_PLT)), 0, 0, 0, LKUP_DEFT);
452		SRESULT_INIT(sr, MSG_ORIG(MSG_SYM_PLT));
453
454		if (elf_find_sym(&sl, &sr, &binfo, NULL) == 0)
455			return (1);
456
457		symdef = sr.sr_sym;
458		_pltbgn = symdef->st_value;
459		if (!(FLAGS(lmp) & FLG_RT_FIXED) &&
460		    (symdef->st_shndx != SHN_ABS))
461			_pltbgn += basebgn;
462		_pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) *
463		    M_PLT_ENTSIZE) + M_PLT_RESERVSZ;
464
465	} else {
466		/*
467		 * The relocation sections appear to the run-time linker as a
468		 * single table.  Determine the address of the beginning and end
469		 * of this table.  There are two different interpretations of
470		 * the ABI at this point:
471		 *
472		 *   o	The REL table and its associated RELSZ indicate the
473		 *	concatenation of *all* relocation sections (this is the
474		 *	model our link-editor constructs).
475		 *
476		 *   o	The REL table and its associated RELSZ indicate the
477		 *	concatenation of all *but* the .plt relocations.  These
478		 *	relocations are specified individually by the JMPREL and
479		 *	PLTRELSZ entries.
480		 *
481		 * Determine from our knowledege of the relocation range and
482		 * .plt range, the range of the total relocation table.  Note
483		 * that one other ABI assumption seems to be that the .plt
484		 * relocations always follow any other relocations, the
485		 * following range checking drops that assumption.
486		 */
487		relbgn = (ulong_t)(REL(lmp));
488		relend = relbgn + (ulong_t)(RELSZ(lmp));
489		if (pltbgn) {
490			if (!relbgn || (relbgn > pltbgn))
491				relbgn = pltbgn;
492			if (!relbgn || (relend < pltend))
493				relend = pltend;
494		}
495	}
496	if (!relbgn || (relbgn == relend)) {
497		DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE));
498		return (1);
499	}
500	DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START));
501
502	/*
503	 * If we're processing a dynamic executable in lazy mode there is no
504	 * need to scan the .rel.plt table, however if we're processing a shared
505	 * object in lazy mode the .got addresses associated to each .plt must
506	 * be relocated to reflect the location of the shared object.
507	 */
508	if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) &&
509	    (FLAGS(lmp) & FLG_RT_FIXED))
510		noplt = 1;
511
512	/*
513	 * Loop through relocations.
514	 */
515	while (relbgn < relend) {
516		mmapobj_result_t	*mpp;
517		uint_t			sb_flags = 0;
518
519		rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH);
520
521		/*
522		 * If this is a RELATIVE relocation in a shared object (the
523		 * common case), and if we are not debugging, then jump into a
524		 * tighter relocation loop (elf_reloc_relative).
525		 */
526		if ((rtype == R_AMD64_RELATIVE) &&
527		    ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) {
528			if (relacount) {
529				relbgn = elf_reloc_relative_count(relbgn,
530				    relacount, relsiz, basebgn, lmp, textrel);
531				relacount = 0;
532			} else {
533				relbgn = elf_reloc_relative(relbgn, relend,
534				    relsiz, basebgn, lmp, textrel);
535			}
536			if (relbgn >= relend)
537				break;
538			rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH);
539		}
540
541		roffset = ((Rela *)relbgn)->r_offset;
542
543		/*
544		 * If this is a shared object, add the base address to offset.
545		 */
546		if (!(FLAGS(lmp) & FLG_RT_FIXED)) {
547			/*
548			 * If we're processing lazy bindings, we have to step
549			 * through the plt entries and add the base address
550			 * to the corresponding got entry.
551			 */
552			if (plthint && (plt == 0) &&
553			    (rtype == R_AMD64_JUMP_SLOT) &&
554			    ((MODE(lmp) & RTLD_NOW) == 0)) {
555				/*
556				 * The PLT relocations (for lazy bindings)
557				 * are additive to what's already in the GOT.
558				 * This differs to what happens in
559				 * elf_reloc_relacount() and that's why we
560				 * just do it inline here.
561				 */
562				for (roffset = ((Rela *)relbgn)->r_offset;
563				    plthint; plthint--) {
564					roffset += basebgn;
565
566					/*
567					 * Perform the actual relocation.
568					 */
569					*((ulong_t *)roffset) += basebgn;
570
571					relbgn += relsiz;
572					roffset = ((Rela *)relbgn)->r_offset;
573
574				}
575				continue;
576			}
577			roffset += basebgn;
578		}
579
580		reladd = (long)(((Rela *)relbgn)->r_addend);
581		rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info);
582		rel = (Rela *)relbgn;
583		relbgn += relsiz;
584
585		/*
586		 * Optimizations.
587		 */
588		if (rtype == R_AMD64_NONE)
589			continue;
590		if (noplt && ((ulong_t)rel >= pltbgn) &&
591		    ((ulong_t)rel < pltend)) {
592			relbgn = pltend;
593			continue;
594		}
595
596		/*
597		 * If we're promoting plts, determine if this one has already
598		 * been written.
599		 */
600		if (plt && ((*(ulong_t *)roffset < _pltbgn) ||
601		    (*(ulong_t *)roffset > _pltend)))
602			continue;
603
604		/*
605		 * If this relocation is not against part of the image
606		 * mapped into memory we skip it.
607		 */
608		if ((mpp = find_segment((caddr_t)roffset, lmp)) == NULL) {
609			elf_reloc_bad(lmp, (void *)rel, rtype, roffset,
610			    rsymndx);
611			continue;
612		}
613
614		binfo = 0;
615		/*
616		 * If a symbol index is specified then get the symbol table
617		 * entry, locate the symbol definition, and determine its
618		 * address.
619		 */
620		if (rsymndx) {
621			/*
622			 * Get the local symbol table entry.
623			 */
624			symref = (Sym *)((ulong_t)SYMTAB(lmp) +
625			    (rsymndx * SYMENT(lmp)));
626
627			/*
628			 * If this is a local symbol, just use the base address.
629			 * (we should have no local relocations in the
630			 * executable).
631			 */
632			if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) {
633				value = basebgn;
634				name = NULL;
635
636				/*
637				 * Special case TLS relocations.
638				 */
639				if (rtype == R_AMD64_DTPMOD64) {
640					/*
641					 * Use the TLS modid.
642					 */
643					value = TLSMODID(lmp);
644
645				} else if ((rtype == R_AMD64_TPOFF64) ||
646				    (rtype == R_AMD64_TPOFF32)) {
647					if ((value = elf_static_tls(lmp, symref,
648					    rel, rtype, 0, roffset, 0)) == 0) {
649						ret = 0;
650						break;
651					}
652				}
653			} else {
654				/*
655				 * If the symbol index is equal to the previous
656				 * symbol index relocation we processed then
657				 * reuse the previous values. (Note that there
658				 * have been cases where a relocation exists
659				 * against a copy relocation symbol, our ld(1)
660				 * should optimize this away, but make sure we
661				 * don't use the same symbol information should
662				 * this case exist).
663				 */
664				if ((rsymndx == psymndx) &&
665				    (rtype != R_AMD64_COPY)) {
666					/* LINTED */
667					if (psymdef == 0) {
668						DBG_CALL(Dbg_bind_weak(lmp,
669						    (Addr)roffset, (Addr)
670						    (roffset - basebgn), name));
671						continue;
672					}
673					/* LINTED */
674					value = pvalue;
675					/* LINTED */
676					name = pname;
677					/* LINTED */
678					symdef = psymdef;
679					/* LINTED */
680					symref = psymref;
681					/* LINTED */
682					_lmp = plmp;
683					/* LINTED */
684					binfo = pbinfo;
685
686					if ((LIST(_lmp)->lm_tflags |
687					    AFLAGS(_lmp)) &
688					    LML_TFLG_AUD_SYMBIND) {
689						value = audit_symbind(lmp, _lmp,
690						    /* LINTED */
691						    symdef, dsymndx, value,
692						    &sb_flags);
693					}
694				} else {
695					Slookup		sl;
696					Sresult		sr;
697
698					/*
699					 * Lookup the symbol definition.
700					 * Initialize the symbol lookup, and
701					 * symbol result, data structure.
702					 */
703					name = (char *)(STRTAB(lmp) +
704					    symref->st_name);
705
706					SLOOKUP_INIT(sl, name, lmp, 0,
707					    ld_entry_cnt, 0, rsymndx, symref,
708					    rtype, LKUP_STDRELOC);
709					SRESULT_INIT(sr, name);
710					symdef = NULL;
711
712					if (lookup_sym(&sl, &sr, &binfo,
713					    in_nfavl)) {
714						name = (char *)sr.sr_name;
715						_lmp = sr.sr_dmap;
716						symdef = sr.sr_sym;
717					}
718
719					/*
720					 * If the symbol is not found and the
721					 * reference was not to a weak symbol,
722					 * report an error.  Weak references
723					 * may be unresolved.
724					 */
725					/* BEGIN CSTYLED */
726					if (symdef == 0) {
727					    if (sl.sl_bind != STB_WEAK) {
728						if (elf_reloc_error(lmp, name,
729						    rel, binfo))
730							continue;
731
732						ret = 0;
733						break;
734
735					    } else {
736						psymndx = rsymndx;
737						psymdef = 0;
738
739						DBG_CALL(Dbg_bind_weak(lmp,
740						    (Addr)roffset, (Addr)
741						    (roffset - basebgn), name));
742						continue;
743					    }
744					}
745					/* END CSTYLED */
746
747					/*
748					 * If symbol was found in an object
749					 * other than the referencing object
750					 * then record the binding.
751					 */
752					if ((lmp != _lmp) && ((FLAGS1(_lmp) &
753					    FL1_RT_NOINIFIN) == 0)) {
754						if (aplist_test(&bound, _lmp,
755						    AL_CNT_RELBIND) == 0) {
756							ret = 0;
757							break;
758						}
759					}
760
761					/*
762					 * Calculate the location of definition;
763					 * symbol value plus base address of
764					 * containing shared object.
765					 */
766					if (IS_SIZE(rtype))
767						value = symdef->st_size;
768					else
769						value = symdef->st_value;
770
771					if (!(FLAGS(_lmp) & FLG_RT_FIXED) &&
772					    !(IS_SIZE(rtype)) &&
773					    (symdef->st_shndx != SHN_ABS) &&
774					    (ELF_ST_TYPE(symdef->st_info) !=
775					    STT_TLS))
776						value += ADDR(_lmp);
777
778					/*
779					 * Retain this symbol index and the
780					 * value in case it can be used for the
781					 * subsequent relocations.
782					 */
783					if (rtype != R_AMD64_COPY) {
784						psymndx = rsymndx;
785						pvalue = value;
786						pname = name;
787						psymdef = symdef;
788						psymref = symref;
789						plmp = _lmp;
790						pbinfo = binfo;
791					}
792					if ((LIST(_lmp)->lm_tflags |
793					    AFLAGS(_lmp)) &
794					    LML_TFLG_AUD_SYMBIND) {
795						dsymndx = (((uintptr_t)symdef -
796						    (uintptr_t)SYMTAB(_lmp)) /
797						    SYMENT(_lmp));
798						value = audit_symbind(lmp, _lmp,
799						    symdef, dsymndx, value,
800						    &sb_flags);
801					}
802				}
803
804				/*
805				 * If relocation is PC-relative, subtract
806				 * offset address.
807				 */
808				if (IS_PC_RELATIVE(rtype))
809					value -= roffset;
810
811				/*
812				 * Special case TLS relocations.
813				 */
814				if (rtype == R_AMD64_DTPMOD64) {
815					/*
816					 * Relocation value is the TLS modid.
817					 */
818					value = TLSMODID(_lmp);
819
820				} else if ((rtype == R_AMD64_TPOFF64) ||
821				    (rtype == R_AMD64_TPOFF32)) {
822					if ((value = elf_static_tls(_lmp,
823					    symdef, rel, rtype, name, roffset,
824					    value)) == 0) {
825						ret = 0;
826						break;
827					}
828				}
829			}
830		} else {
831			/*
832			 * Special cases.
833			 */
834			if (rtype == R_AMD64_DTPMOD64) {
835				/*
836				 * TLS relocation value is the TLS modid.
837				 */
838				value = TLSMODID(lmp);
839			} else
840				value = basebgn;
841
842			name = NULL;
843		}
844
845		DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH,
846		    M_REL_SHT_TYPE, rel, NULL, 0, name));
847
848		/*
849		 * Make sure the segment is writable.
850		 */
851		if (((mpp->mr_prot & PROT_WRITE) == 0) &&
852		    ((set_prot(lmp, mpp, 1) == 0) ||
853		    (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) {
854			ret = 0;
855			break;
856		}
857
858		/*
859		 * Call relocation routine to perform required relocation.
860		 */
861		switch (rtype) {
862		case R_AMD64_COPY:
863			if (elf_copy_reloc(name, symref, lmp, (void *)roffset,
864			    symdef, _lmp, (const void *)value) == 0)
865				ret = 0;
866			break;
867		case R_AMD64_JUMP_SLOT:
868			if (((LIST(lmp)->lm_tflags | AFLAGS(lmp)) &
869			    (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
870			    AUDINFO(lmp)->ai_dynplts) {
871				int	fail = 0;
872				int	pltndx = (((ulong_t)rel -
873				    (uintptr_t)JMPREL(lmp)) / relsiz);
874				int	symndx = (((uintptr_t)symdef -
875				    (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp));
876
877				(void) elf_plt_trace_write(roffset, lmp, _lmp,
878				    symdef, symndx, pltndx, (caddr_t)value,
879				    sb_flags, &fail);
880				if (fail)
881					ret = 0;
882			} else {
883				/*
884				 * Write standard PLT entry to jump directly
885				 * to newly bound function.
886				 */
887				DBG_CALL(Dbg_reloc_apply_val(LIST(lmp),
888				    ELF_DBG_RTLD, (Xword)roffset,
889				    (Xword)value));
890				*(ulong_t *)roffset = value;
891			}
892			break;
893		default:
894			value += reladd;
895			/*
896			 * Write the relocation out.
897			 */
898			if (do_reloc_rtld(rtype, (uchar_t *)roffset,
899			    (Xword *)&value, name, NAME(lmp), LIST(lmp)) == 0)
900				ret = 0;
901
902			DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD,
903			    (Xword)roffset, (Xword)value));
904		}
905
906		if ((ret == 0) &&
907		    ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0))
908			break;
909
910		if (binfo) {
911			DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset,
912			    (Off)(roffset - basebgn), (Xword)(-1), PLT_T_FULL,
913			    _lmp, (Addr)value, symdef->st_value, name, binfo));
914		}
915	}
916
917	return (relocate_finish(lmp, bound, ret));
918}
919
920/*
921 * Initialize the first few got entries so that function calls go to
922 * elf_rtbndr:
923 *
924 *	GOT[GOT_XLINKMAP] =	the address of the link map
925 *	GOT[GOT_XRTLD] =	the address of rtbinder
926 */
927void
928elf_plt_init(void *got, caddr_t l)
929{
930	uint64_t	*_got;
931	/* LINTED */
932	Rt_map		*lmp = (Rt_map *)l;
933
934	_got = (uint64_t *)got + M_GOT_XLINKMAP;
935	*_got = (uint64_t)lmp;
936	_got = (uint64_t *)got + M_GOT_XRTLD;
937	*_got = (uint64_t)elf_rtbndr;
938}
939
940/*
941 * Plt writing interface to allow debugging initialization to be generic.
942 */
943Pltbindtype
944/* ARGSUSED1 */
945elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval,
946	Xword pltndx)
947{
948	Rela		*rel = (Rela*)rptr;
949	uintptr_t	pltaddr;
950
951	pltaddr = addr + rel->r_offset;
952	*(ulong_t *)pltaddr = (ulong_t)symval + rel->r_addend;
953	DBG_CALL(pltcntfull++);
954	return (PLT_T_FULL);
955}
956
957/*
958 * Provide a machine specific interface to the conversion routine.  By calling
959 * the machine specific version, rather than the generic version, we insure that
960 * the data tables/strings for all known machine versions aren't dragged into
961 * ld.so.1.
962 */
963const char *
964_conv_reloc_type(uint_t rel)
965{
966	static Conv_inv_buf_t	inv_buf;
967
968	return (conv_reloc_amd64_type(rel, 0, &inv_buf));
969}
970