sparc_elf.c revision 11827:d7ef53deac3f
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 * SPARC V9 machine dependent and ELF file class dependent functions.
29 * Contains routines for performing function binding and symbol relocations.
30 */
31
32#include	<stdio.h>
33#include	<sys/elf.h>
34#include	<sys/elf_SPARC.h>
35#include	<sys/mman.h>
36#include	<dlfcn.h>
37#include	<synch.h>
38#include	<string.h>
39#include	<debug.h>
40#include	<reloc.h>
41#include	<conv.h>
42#include	"_rtld.h"
43#include	"_audit.h"
44#include	"_elf.h"
45#include	"_inline.h"
46#include	"msg.h"
47
48extern void	iflush_range(caddr_t, size_t);
49extern void	plt_upper_32(uintptr_t, uintptr_t);
50extern void	plt_upper_44(uintptr_t, uintptr_t);
51extern void	plt_full_range(uintptr_t, uintptr_t);
52extern void	elf_rtbndr(Rt_map *, ulong_t, caddr_t);
53extern void	elf_rtbndr_far(Rt_map *, ulong_t, caddr_t);
54
55int
56elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr)
57{
58	/*
59	 * Check machine type and flags.
60	 */
61	if (ehdr->e_flags & EF_SPARC_EXT_MASK) {
62		/*
63		 * Check vendor-specific extensions.
64		 */
65		if (ehdr->e_flags & EF_SPARC_HAL_R1) {
66			rej->rej_type = SGS_REJ_HAL;
67			rej->rej_info = (uint_t)ehdr->e_flags;
68			return (0);
69		}
70		if ((ehdr->e_flags & EF_SPARC_SUN_US3) & ~at_flags) {
71			rej->rej_type = SGS_REJ_US3;
72			rej->rej_info = (uint_t)ehdr->e_flags;
73			return (0);
74		}
75
76		/*
77		 * Generic check.
78		 * All of our 64-bit SPARC's support the US1 (UltraSPARC 1)
79		 * instructions so that bit isn't worth checking for explicitly.
80		 */
81		if ((ehdr->e_flags & EF_SPARC_EXT_MASK) & ~at_flags) {
82			rej->rej_type = SGS_REJ_BADFLAG;
83			rej->rej_info = (uint_t)ehdr->e_flags;
84			return (0);
85		}
86	} else if ((ehdr->e_flags & ~EF_SPARCV9_MM) != 0) {
87		rej->rej_type = SGS_REJ_BADFLAG;
88		rej->rej_info = (uint_t)ehdr->e_flags;
89		return (0);
90	}
91	return (1);
92}
93
94
95void
96ldso_plt_init(Rt_map *lmp)
97{
98	/*
99	 * There is no need to analyze ld.so because we don't map in any of
100	 * its dependencies.  However we may map these dependencies in later
101	 * (as if ld.so had dlopened them), so initialize the plt and the
102	 * permission information.
103	 */
104	if (PLTGOT(lmp)) {
105		Xword pltoff;
106
107		/*
108		 * Install the lm pointer in .PLT2 as per the ABI.
109		 */
110		pltoff = (2 * M_PLT_ENTSIZE) / M_PLT_INSSIZE;
111		elf_plt2_init(PLTGOT(lmp) + pltoff, lmp);
112
113		/*
114		 * The V9 ABI states that the first 32k PLT entries
115		 * use .PLT1, with .PLT0 used by the "latter" entries.
116		 * We don't currently implement the extendend format,
117		 * so install an error handler in .PLT0 to catch anyone
118		 * trying to use it.
119		 */
120		elf_plt_init(PLTGOT(lmp), (caddr_t)elf_rtbndr_far);
121
122		/*
123		 * Initialize .PLT1
124		 */
125		pltoff = M_PLT_ENTSIZE / M_PLT_INSSIZE;
126		elf_plt_init(PLTGOT(lmp) + pltoff, (caddr_t)elf_rtbndr);
127	}
128}
129
130/*
131 * elf_plt_write() will test to see how far away our destination
132 *	address lies.  If it is close enough that a branch can
133 *	be used instead of a jmpl - we will fill the plt in with
134 * 	single branch.  The branches are much quicker then
135 *	a jmpl instruction - see bug#4356879 for further
136 *	details.
137 *
138 *	NOTE: we pass in both a 'pltaddr' and a 'vpltaddr' since
139 *		librtld/dldump update PLT's who's physical
140 *		address is not the same as the 'virtual' runtime
141 *		address.
142 */
143Pltbindtype
144elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval,
145	Xword pltndx)
146{
147	Rela		*rel = (Rela *)rptr;
148	uintptr_t	nsym = ~symval;
149	uintptr_t	vpltaddr, pltaddr;
150	long		disp;
151
152
153	pltaddr = addr + rel->r_offset;
154	vpltaddr = vaddr + rel->r_offset;
155	disp = symval - vpltaddr - 4;
156
157	if (pltndx >= (M64_PLT_NEARPLTS - M_PLT_XNumber)) {
158		*((Sxword *)pltaddr) = (uintptr_t)symval +
159		    (uintptr_t)rel->r_addend - vaddr;
160		DBG_CALL(pltcntfar++);
161		return (PLT_T_FAR);
162	}
163
164	/*
165	 * Test if the destination address is close enough to use
166	 * a ba,a... instruction to reach it.
167	 */
168	if (S_INRANGE(disp, 23) && !(rtld_flags & RT_FL_NOBAPLT)) {
169		uint_t		*pltent, bainstr;
170		Pltbindtype	rc;
171
172		pltent = (uint_t *)pltaddr;
173
174		/*
175		 * The
176		 *
177		 *	ba,a,pt %icc, <dest>
178		 *
179		 * is the most efficient of the PLT's.  If we
180		 * are within +-20 bits - use that branch.
181		 */
182		if (S_INRANGE(disp, 20)) {
183			bainstr = M_BA_A_PT;	/* ba,a,pt %icc,<dest> */
184			/* LINTED */
185			bainstr |= (uint_t)(S_MASK(19) & (disp >> 2));
186			rc = PLT_T_21D;
187			DBG_CALL(pltcnt21d++);
188		} else {
189			/*
190			 * Otherwise - we fall back to the good old
191			 *
192			 *	ba,a	<dest>
193			 *
194			 * Which still beats a jmpl instruction.
195			 */
196			bainstr = M_BA_A;		/* ba,a <dest> */
197			/* LINTED */
198			bainstr |= (uint_t)(S_MASK(22) & (disp >> 2));
199			rc = PLT_T_24D;
200			DBG_CALL(pltcnt24d++);
201		}
202
203		pltent[2] = M_NOP;		/* nop instr */
204		pltent[1] = bainstr;
205
206		iflush_range((char *)(&pltent[1]), 4);
207		pltent[0] = M_NOP;		/* nop instr */
208		iflush_range((char *)(&pltent[0]), 4);
209		return (rc);
210	}
211
212	if ((nsym >> 32) == 0) {
213		plt_upper_32(pltaddr, symval);
214		DBG_CALL(pltcntu32++);
215		return (PLT_T_U32);
216	}
217
218	if ((nsym >> 44) == 0) {
219		plt_upper_44(pltaddr, symval);
220		DBG_CALL(pltcntu44++);
221		return (PLT_T_U44);
222	}
223
224	/*
225	 * The PLT destination is not in reach of
226	 * a branch instruction - so we fall back
227	 * to a 'jmpl' sequence.
228	 */
229	plt_full_range(pltaddr, symval);
230	DBG_CALL(pltcntfull++);
231	return (PLT_T_FULL);
232}
233
234/*
235 * Once relocated, the following 6 instruction sequence moves
236 * a 64-bit immediate value into register %g1
237 */
238#define	VAL64_TO_G1 \
239/* 0x00 */	0x0b, 0x00, 0x00, 0x00,	/* sethi %hh(value), %g5 */ \
240/* 0x04 */	0x8a, 0x11, 0x60, 0x00,	/* or %g5, %hm(value), %g5 */ \
241/* 0x08 */	0x8b, 0x29, 0x70, 0x20,	/* sllx %g5, 32, %g5 */ \
242/* 0x0c */	0x03, 0x00, 0x00, 0x00,	/* sethi %lm(value), %g1 */ \
243/* 0x10 */	0x82, 0x10, 0x60, 0x00,	/* or %g1, %lo(value), %g1 */ \
244/* 0x14 */	0x82, 0x10, 0x40, 0x05	/* or %g1, %g5, %g1 */
245
246/*
247 * Local storage space created on the stack created for this glue
248 * code includes space for:
249 *		0x8	pointer to dyn_data
250 *		0x8	size prev stack frame
251 */
252static const Byte dyn_plt_template[] = {
253/* 0x0 */	0x2a, 0xcf, 0x80, 0x03,	/* brnz,a,pt %fp, 0xc	*/
254/* 0x4 */	0x82, 0x27, 0x80, 0x0e,	/* sub %fp, %sp, %g1 */
255/* 0x8 */	0x82, 0x10, 0x20, 0xb0,	/* mov 176, %g1	*/
256/* 0xc */	0x9d, 0xe3, 0xbf, 0x40,	/* save %sp, -192, %sp	*/
257/* 0x10 */	0xc2, 0x77, 0xa7, 0xef,	/* stx %g1, [%fp + 2031] */
258
259					/* store prev stack size */
260/* 0x14 */	VAL64_TO_G1,		/* dyn_data to g1 */
261/* 0x2c */	0xc2, 0x77, 0xa7, 0xf7,	/* stx %g1, [%fp + 2039] */
262
263/* 0x30 */	VAL64_TO_G1,		/* elf_plt_trace() addr to g1 */
264
265					/* Call to elf_plt_trace() via g1 */
266/* 0x48 */	0x9f, 0xc0, 0x60, 0x00,	/* jmpl ! link r[15] to addr in g1 */
267/* 0x4c */	0x01, 0x00, 0x00, 0x00	/* nop ! for jmpl delay slot *AND* */
268					/*	to get 8-byte alignment */
269};
270
271int	dyn_plt_ent_size = sizeof (dyn_plt_template) +
272		sizeof (Addr) +		/* reflmp */
273		sizeof (Addr) +		/* deflmp */
274		sizeof (Word) +		/* symndx */
275		sizeof (Word) +		/* sb_flags */
276		sizeof (Sym);		/* symdef */
277
278/*
279 * the dynamic plt entry is:
280 *
281 *	brnz,a,pt	%fp, 1f
282 *	sub     	%sp, %fp, %g1
283 *	mov     	SA(MINFRAME), %g1
284 * 1:
285 *	save    	%sp, -(SA(MINFRAME) + (2 * CLONGSIZE)), %sp
286 *
287 *	! store prev stack size
288 *	stx     	%g1, [%fp + STACK_BIAS - (2 * CLONGSIZE)]
289 *
290 * 2:
291 *	! move dyn_data to %g1
292 *	sethi   	%hh(dyn_data), %g5
293 *	or      	%g5, %hm(dyn_data), %g5
294 *	sllx    	%g5, 32, %g5
295 *	sethi   	%lm(dyn_data), %g1
296 *	or      	%g1, %lo(dyn_data), %g1
297 *	or      	%g1, %g5, %g1
298 *
299 *	! store dyn_data ptr on frame (from %g1)
300 *	 stx     	%g1, [%fp + STACK_BIAS - CLONGSIZE]
301 *
302 *	! Move address of elf_plt_trace() into %g1
303 *	[Uses same 6 instructions as shown at label 2: above. Not shown.]
304 *
305 *	! Use JMPL to make call. CALL instruction is limited to 30-bits.
306 *	! of displacement.
307 *	jmp1		%g1, %o7
308 *
309 *	! JMPL has a delay slot that must be filled. And, the sequence
310 *	! of instructions needs to have 8-byte alignment. This NOP does both.
311 *	! The alignment is needed for the data we put following the
312 *	! instruction.
313 *	nop
314 *
315 * dyn data:
316 *	Addr		reflmp
317 *	Addr		deflmp
318 *	Word		symndx
319 *	Word		sb_flags
320 *	Sym		symdef  (Elf64_Sym = 24-bytes)
321 */
322
323/*
324 * Relocate the instructions given by the VAL64_TO_G1 macro above.
325 * The arguments parallel those of do_reloc_rtld().
326 *
327 * entry:
328 *	off - Address of 1st instruction in sequence.
329 *	value - Value being relocated (addend)
330 *	sym - Name of value being relocated.
331 *	lml - link map list
332 *
333 * exit:
334 *	Returns TRUE for success, FALSE for failure.
335 */
336static int
337reloc_val64_to_g1(uchar_t *off, Addr *value, const char *sym, Lm_list *lml)
338{
339	Xword	tmp_value;
340
341	/*
342	 * relocating:
343	 *	sethi	%hh(value), %g5
344	 */
345	tmp_value = (Xword)value;
346	if (do_reloc_rtld(R_SPARC_HH22, off, &tmp_value, sym,
347	    MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
348		return (0);
349	}
350
351	/*
352	 * relocating:
353	 *	or	%g5, %hm(value), %g5
354	 */
355	tmp_value = (Xword)value;
356	if (do_reloc_rtld(R_SPARC_HM10, off + 4, &tmp_value, sym,
357	    MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
358		return (0);
359	}
360
361	/*
362	 * relocating:
363	 *	sethi	%lm(value), %g1
364	 */
365	tmp_value = (Xword)value;
366	if (do_reloc_rtld(R_SPARC_LM22, off + 12, &tmp_value, sym,
367	    MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
368		return (0);
369	}
370
371	/*
372	 * relocating:
373	 *	or	%g1, %lo(value), %g1
374	 */
375	tmp_value = (Xword)value;
376	if (do_reloc_rtld(R_SPARC_LO10, off + 16, &tmp_value, sym,
377	    MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
378		return (0);
379	}
380
381	return (1);
382}
383
384static caddr_t
385elf_plt_trace_write(caddr_t addr, Rela *rptr, Rt_map *rlmp, Rt_map *dlmp,
386    Sym *sym, uint_t symndx, ulong_t pltndx, caddr_t to, uint_t sb_flags,
387    int *fail)
388{
389	extern ulong_t	elf_plt_trace();
390	uchar_t		*dyn_plt;
391	uintptr_t	*dyndata;
392
393	/*
394	 * If both pltenter & pltexit have been disabled there
395	 * there is no reason to even create the glue code.
396	 */
397	if ((sb_flags & (LA_SYMB_NOPLTENTER | LA_SYMB_NOPLTEXIT)) ==
398	    (LA_SYMB_NOPLTENTER | LA_SYMB_NOPLTEXIT)) {
399		(void) elf_plt_write((uintptr_t)addr, (uintptr_t)addr,
400		    rptr, (uintptr_t)to, pltndx);
401		return (to);
402	}
403
404	/*
405	 * We only need to add the glue code if there is an auditing
406	 * library that is interested in this binding.
407	 */
408	dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts +
409	    (pltndx * dyn_plt_ent_size));
410
411	/*
412	 * Have we initialized this dynamic plt entry yet?  If we haven't do it
413	 * now.  Otherwise this function has been called before, but from a
414	 * different plt (ie. from another shared object).  In that case
415	 * we just set the plt to point to the new dyn_plt.
416	 */
417	if (*dyn_plt == 0) {
418		Sym	*symp;
419		Lm_list	*lml = LIST(rlmp);
420
421		(void) memcpy((void *)dyn_plt, dyn_plt_template,
422		    sizeof (dyn_plt_template));
423		dyndata = (uintptr_t *)((uintptr_t)dyn_plt +
424		    sizeof (dyn_plt_template));
425
426		/*
427		 * relocating:
428		 *	VAL64_TO_G1(dyndata)
429		 *	VAL64_TO_G1(&elf_plt_trace)
430		 */
431		if (!(reloc_val64_to_g1((dyn_plt + 0x14), dyndata,
432		    MSG_ORIG(MSG_SYM_LADYNDATA), lml) &&
433		    reloc_val64_to_g1((dyn_plt + 0x30), (Addr *)&elf_plt_trace,
434		    MSG_ORIG(MSG_SYM_ELFPLTTRACE), lml))) {
435			*fail = 1;
436			return (0);
437		}
438
439		*dyndata++ = (Addr)rlmp;
440		*dyndata++ = (Addr)dlmp;
441
442		/*
443		 * symndx in the high word, sb_flags in the low.
444		 */
445		*dyndata = (Addr)sb_flags;
446		*(Word *)dyndata = symndx;
447		dyndata++;
448
449		symp = (Sym *)dyndata;
450		*symp = *sym;
451		symp->st_value = (Addr)to;
452		iflush_range((void *)dyn_plt, sizeof (dyn_plt_template));
453	}
454
455	(void) elf_plt_write((uintptr_t)addr, (uintptr_t)addr, rptr,
456	    (uintptr_t)dyn_plt, pltndx);
457	return ((caddr_t)dyn_plt);
458}
459
460/*
461 * Function binding routine - invoked on the first call to a function through
462 * the procedure linkage table;
463 * passes first through an assembly language interface.
464 *
465 * Takes the address of the PLT entry where the call originated,
466 * the offset into the relocation table of the associated
467 * relocation entry and the address of the link map (rt_private_map struct)
468 * for the entry.
469 *
470 * Returns the address of the function referenced after re-writing the PLT
471 * entry to invoke the function directly.
472 *
473 * On error, causes process to terminate with a signal.
474 */
475ulong_t
476elf_bndr(Rt_map *lmp, ulong_t pltoff, caddr_t from)
477{
478	Rt_map		*nlmp, *llmp;
479	Addr		addr, vaddr, reloff, symval;
480	char		*name;
481	Rela		*rptr;
482	Sym		*rsym, *nsym;
483	Xword		pltndx;
484	uint_t		binfo, sb_flags = 0, dbg_class;
485	ulong_t		rsymndx;
486	Slookup		sl;
487	Sresult		sr;
488	Pltbindtype	pbtype;
489	int		entry, lmflags, farplt = 0;
490	Lm_list		*lml;
491
492	/*
493	 * For compatibility with libthread (TI_VERSION 1) we track the entry
494	 * value.  A zero value indicates we have recursed into ld.so.1 to
495	 * further process a locking request.  Under this recursion we disable
496	 * tsort and cleanup activities.
497	 */
498	entry = enter(0);
499
500	lml = LIST(lmp);
501	if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) {
502		dbg_class = dbg_desc->d_class;
503		dbg_desc->d_class = 0;
504	}
505
506	/*
507	 * Must calculate true plt relocation address from reloc.
508	 * Take offset, subtract number of reserved PLT entries, and divide
509	 * by PLT entry size, which should give the index of the plt
510	 * entry (and relocation entry since they have been defined to be
511	 * in the same order).  Then we must multiply by the size of
512	 * a relocation entry, which will give us the offset of the
513	 * plt relocation entry from the start of them given by JMPREL(lm).
514	 */
515	addr = pltoff - M_PLT_RESERVSZ;
516
517	if (pltoff < (M64_PLT_NEARPLTS * M_PLT_ENTSIZE)) {
518		pltndx = addr / M_PLT_ENTSIZE;
519	} else {
520		ulong_t	pltblockoff;
521
522		pltblockoff = pltoff - (M64_PLT_NEARPLTS * M_PLT_ENTSIZE);
523		pltndx = M64_PLT_NEARPLTS +
524		    ((pltblockoff / M64_PLT_FBLOCKSZ) * M64_PLT_FBLKCNTS) +
525		    ((pltblockoff % M64_PLT_FBLOCKSZ) / M64_PLT_FENTSIZE) -
526		    M_PLT_XNumber;
527		farplt = 1;
528	}
529
530	/*
531	 * Perform some basic sanity checks.  If we didn't get a load map
532	 * or the plt offset is invalid then its possible someone has walked
533	 * over the plt entries or jumped to plt[01] out of the blue.
534	 */
535	if (!lmp || (!farplt && (addr % M_PLT_ENTSIZE) != 0) ||
536	    (farplt && (addr % M_PLT_INSSIZE))) {
537		Conv_inv_buf_t	inv_buf;
538
539		eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF),
540		    conv_reloc_SPARC_type(R_SPARC_JMP_SLOT, 0, &inv_buf),
541		    EC_NATPTR(lmp), EC_XWORD(pltoff), EC_NATPTR(from));
542		rtldexit(lml, 1);
543	}
544	reloff = pltndx * sizeof (Rela);
545
546	/*
547	 * Use relocation entry to get symbol table entry and symbol name.
548	 */
549	addr = (ulong_t)JMPREL(lmp);
550	rptr = (Rela *)(addr + reloff);
551	rsymndx = ELF_R_SYM(rptr->r_info);
552	rsym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp)));
553	name = (char *)(STRTAB(lmp) + rsym->st_name);
554
555	/*
556	 * Determine the last link-map of this list, this'll be the starting
557	 * point for any tsort() processing.
558	 */
559	llmp = lml->lm_tail;
560
561	/*
562	 * Find definition for symbol.  Initialize the symbol lookup, and symbol
563	 * result, data structures.
564	 */
565	SLOOKUP_INIT(sl, name, lmp, lml->lm_head, ld_entry_cnt, 0,
566	    rsymndx, rsym, 0, LKUP_DEFT);
567	SRESULT_INIT(sr, name);
568
569	if (lookup_sym(&sl, &sr, &binfo, NULL) == 0) {
570		eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp),
571		    demangle(name));
572		rtldexit(lml, 1);
573	}
574
575	name = (char *)sr.sr_name;
576	nlmp = sr.sr_dmap;
577	nsym = sr.sr_sym;
578
579	symval = nsym->st_value;
580
581	if (!(FLAGS(nlmp) & FLG_RT_FIXED) &&
582	    (nsym->st_shndx != SHN_ABS))
583		symval += ADDR(nlmp);
584	if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) {
585		/*
586		 * Record that this new link map is now bound to the caller.
587		 */
588		if (bind_one(lmp, nlmp, BND_REFER) == 0)
589			rtldexit(lml, 1);
590	}
591
592	if ((lml->lm_tflags | AFLAGS(lmp)) & LML_TFLG_AUD_SYMBIND) {
593		/* LINTED */
594		uint_t	symndx = (uint_t)(((uintptr_t)nsym -
595		    (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
596
597		symval = audit_symbind(lmp, nlmp, nsym, symndx, symval,
598		    &sb_flags);
599	}
600
601	if (FLAGS(lmp) & FLG_RT_FIXED)
602		vaddr = 0;
603	else
604		vaddr = ADDR(lmp);
605
606	pbtype = PLT_T_NONE;
607	if (!(rtld_flags & RT_FL_NOBIND)) {
608		if (((lml->lm_tflags | AFLAGS(lmp)) &
609		    (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
610		    AUDINFO(lmp)->ai_dynplts) {
611			int	fail = 0;
612			/* LINTED */
613			uint_t	symndx = (uint_t)(((uintptr_t)nsym -
614			    (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
615
616			symval = (ulong_t)elf_plt_trace_write((caddr_t)vaddr,
617			    rptr, lmp, nlmp, nsym, symndx, pltndx,
618			    (caddr_t)symval, sb_flags, &fail);
619			if (fail)
620				rtldexit(lml, 1);
621		} else {
622			/*
623			 * Write standard PLT entry to jump directly
624			 * to newly bound function.
625			 */
626			pbtype = elf_plt_write((uintptr_t)vaddr,
627			    (uintptr_t)vaddr, rptr, symval, pltndx);
628		}
629	}
630
631	/*
632	 * Print binding information and rebuild PLT entry.
633	 */
634	DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)),
635	    (Xword)pltndx, pbtype, nlmp, (Addr)symval, nsym->st_value,
636	    name, binfo));
637
638	/*
639	 * Complete any processing for newly loaded objects.  Note we don't
640	 * know exactly where any new objects are loaded (we know the object
641	 * that supplied the symbol, but others may have been loaded lazily as
642	 * we searched for the symbol), so sorting starts from the last
643	 * link-map know on entry to this routine.
644	 */
645	if (entry)
646		load_completion(llmp);
647
648	/*
649	 * Some operations like dldump() or dlopen()'ing a relocatable object
650	 * result in objects being loaded on rtld's link-map, make sure these
651	 * objects are initialized also.
652	 */
653	if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init)
654		load_completion(nlmp);
655
656	/*
657	 * Make sure the object to which we've bound has had it's .init fired.
658	 * Cleanup before return to user code.
659	 */
660	if (entry) {
661		is_dep_init(nlmp, lmp);
662		leave(lml, 0);
663	}
664
665	if (lmflags & LML_FLG_RTLDLM)
666		dbg_desc->d_class = dbg_class;
667
668	return (symval);
669}
670
671static int
672bindpltpad(Rt_map *lmp, Alist **padlist, Addr value, void **pltaddr,
673    const char *fname, const char *sname)
674{
675	Aliste		idx = 0;
676	Pltpadinfo	ppi, *ppip;
677	void		*plt;
678	uintptr_t	pltoff;
679	Rela		rel;
680	int		i;
681
682	for (ALIST_TRAVERSE(*padlist, idx, ppip)) {
683		if (ppip->pp_addr == value) {
684			*pltaddr = ppip->pp_plt;
685			DBG_CALL(Dbg_bind_pltpad_from(lmp, (Addr)*pltaddr,
686			    sname));
687			return (1);
688		}
689		if (ppip->pp_addr > value)
690			break;
691	}
692
693	plt = PLTPAD(lmp);
694	pltoff = (uintptr_t)plt - (uintptr_t)ADDR(lmp);
695
696	PLTPAD(lmp) = (void *)((uintptr_t)PLTPAD(lmp) + M_PLT_ENTSIZE);
697
698	if (PLTPAD(lmp) > PLTPADEND(lmp)) {
699		/*
700		 * Just fail in usual relocation way
701		 */
702		*pltaddr = (void *)value;
703		return (1);
704	}
705	rel.r_offset = pltoff;
706	rel.r_info = 0;
707	rel.r_addend = 0;
708
709	/*
710	 * elf_plt_write assumes the plt was previously filled
711	 * with NOP's, so fill it in now.
712	 */
713	for (i = 0; i < (M_PLT_ENTSIZE / sizeof (uint_t)); i++) {
714		((uint_t *)plt)[i] = M_NOP;
715	}
716	iflush_range((caddr_t)plt, M_PLT_ENTSIZE);
717
718	(void) elf_plt_write(ADDR(lmp), ADDR(lmp), &rel, value, 0);
719
720	ppi.pp_addr = value;
721	ppi.pp_plt = plt;
722
723	if (alist_insert(padlist, &ppi, sizeof (Pltpadinfo),
724	    AL_CNT_PLTPAD, idx) == NULL)
725		return (0);
726
727	*pltaddr = plt;
728	DBG_CALL(Dbg_bind_pltpad_to(lmp, (Addr)*pltaddr, fname, sname));
729	return (1);
730}
731
732/*
733 * Read and process the relocations for one link object, we assume all
734 * relocation sections for loadable segments are stored contiguously in
735 * the file.
736 */
737int
738elf_reloc(Rt_map *lmp, uint_t plt, int *in_nfavl, APlist **textrel)
739{
740	ulong_t		relbgn, relend, relsiz, basebgn, pltbgn, pltend;
741	ulong_t		pltndx, roffset, rsymndx, psymndx = 0;
742	uint_t		dsymndx, binfo, pbinfo;
743	Byte		rtype;
744	long		reladd;
745	Addr		value, pvalue;
746	Sym		*symref, *psymref, *symdef, *psymdef;
747	char		*name, *pname;
748	Rt_map		*_lmp, *plmp;
749	int		ret = 1, noplt = 0;
750	long		relacount = RELACOUNT(lmp);
751	Rela		*rel;
752	Pltbindtype	pbtype;
753	Alist		*pltpadlist = NULL;
754	APlist		*bound = NULL;
755
756	/*
757	 * If an object has any DT_REGISTER entries associated with
758	 * it, they are processed now.
759	 */
760	if ((plt == 0) && (FLAGS(lmp) & FLG_RT_REGSYMS)) {
761		if (elf_regsyms(lmp) == 0)
762			return (0);
763	}
764
765	/*
766	 * Although only necessary for lazy binding, initialize the first
767	 * procedure linkage table entry to go to elf_rtbndr().  dbx(1) seems
768	 * to find this useful.
769	 */
770	if ((plt == 0) && PLTGOT(lmp)) {
771		mmapobj_result_t	*mpp;
772		Xword			pltoff;
773
774		/*
775		 * Make sure the segment is writable.
776		 */
777		if ((((mpp =
778		    find_segment((caddr_t)PLTGOT(lmp), lmp)) != NULL) &&
779		    ((mpp->mr_prot & PROT_WRITE) == 0)) &&
780		    ((set_prot(lmp, mpp, 1) == 0) ||
781		    (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL)))
782			return (0);
783
784		/*
785		 * Install the lm pointer in .PLT2 as per the ABI.
786		 */
787		pltoff = (2 * M_PLT_ENTSIZE) / M_PLT_INSSIZE;
788		elf_plt2_init(PLTGOT(lmp) + pltoff, lmp);
789
790		/*
791		 * The V9 ABI states that the first 32k PLT entries
792		 * use .PLT1, with .PLT0 used by the "latter" entries.
793		 * We don't currently implement the extendend format,
794		 * so install an error handler in .PLT0 to catch anyone
795		 * trying to use it.
796		 */
797		elf_plt_init(PLTGOT(lmp), (caddr_t)elf_rtbndr_far);
798
799		/*
800		 * Initialize .PLT1
801		 */
802		pltoff = M_PLT_ENTSIZE / M_PLT_INSSIZE;
803		elf_plt_init(PLTGOT(lmp) + pltoff, (caddr_t)elf_rtbndr);
804	}
805
806	/*
807	 * Initialize the plt start and end addresses.
808	 */
809	if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0)
810		pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp));
811
812	/*
813	 * If we've been called upon to promote an RTLD_LAZY object to an
814	 * RTLD_NOW then we're only interested in scaning the .plt table.
815	 */
816	if (plt) {
817		relbgn = pltbgn;
818		relend = pltend;
819	} else {
820		/*
821		 * The relocation sections appear to the run-time linker as a
822		 * single table.  Determine the address of the beginning and end
823		 * of this table.  There are two different interpretations of
824		 * the ABI at this point:
825		 *
826		 *   o	The REL table and its associated RELSZ indicate the
827		 *	concatenation of *all* relocation sections (this is the
828		 *	model our link-editor constructs).
829		 *
830		 *   o	The REL table and its associated RELSZ indicate the
831		 *	concatenation of all *but* the .plt relocations.  These
832		 *	relocations are specified individually by the JMPREL and
833		 *	PLTRELSZ entries.
834		 *
835		 * Determine from our knowledege of the relocation range and
836		 * .plt range, the range of the total relocation table.  Note
837		 * that one other ABI assumption seems to be that the .plt
838		 * relocations always follow any other relocations, the
839		 * following range checking drops that assumption.
840		 */
841		relbgn = (ulong_t)(REL(lmp));
842		relend = relbgn + (ulong_t)(RELSZ(lmp));
843		if (pltbgn) {
844			if (!relbgn || (relbgn > pltbgn))
845				relbgn = pltbgn;
846			if (!relbgn || (relend < pltend))
847				relend = pltend;
848		}
849	}
850	if (!relbgn || (relbgn == relend)) {
851		DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE));
852		return (1);
853	}
854
855	relsiz = (ulong_t)(RELENT(lmp));
856	basebgn = ADDR(lmp);
857
858	DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START));
859
860	/*
861	 * If we're processing in lazy mode there is no need to scan the
862	 * .rela.plt table.
863	 */
864	if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0))
865		noplt = 1;
866
867	/*
868	 * Loop through relocations.
869	 */
870	while (relbgn < relend) {
871		mmapobj_result_t	*mpp;
872		uint_t			sb_flags = 0;
873		Addr			vaddr;
874
875		rtype = (Byte)ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH);
876
877		/*
878		 * If this is a RELATIVE relocation in a shared object
879		 * (the common case), and if we are not debugging, then
880		 * jump into a tighter relocaiton loop (elf_reloc_relacount)
881		 * Only make the jump if we've been given a hint on the
882		 * number of relocations.
883		 */
884		if ((rtype == R_SPARC_RELATIVE) &&
885		    ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) {
886			if (relacount) {
887				relbgn = elf_reloc_relative_count(relbgn,
888				    relacount, relsiz, basebgn, lmp, textrel);
889				relacount = 0;
890			} else {
891				relbgn = elf_reloc_relative(relbgn, relend,
892				    relsiz, basebgn, lmp, textrel);
893			}
894			if (relbgn >= relend)
895				break;
896			rtype = (Byte)ELF_R_TYPE(((Rela *)relbgn)->r_info,
897			    M_MACH);
898		}
899
900		roffset = ((Rela *)relbgn)->r_offset;
901
902		reladd = (long)(((Rela *)relbgn)->r_addend);
903		rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info);
904
905		rel = (Rela *)relbgn;
906		relbgn += relsiz;
907
908		/*
909		 * Optimizations.
910		 */
911		if (rtype == R_SPARC_NONE)
912			continue;
913		if (noplt && ((ulong_t)rel >= pltbgn) &&
914		    ((ulong_t)rel < pltend)) {
915			relbgn = pltend;
916			continue;
917		}
918
919		if (rtype != R_SPARC_REGISTER) {
920			/*
921			 * If this is a shared object, add the base address
922			 * to offset.
923			 */
924			if (!(FLAGS(lmp) & FLG_RT_FIXED))
925				roffset += basebgn;
926
927			/*
928			 * If this relocation is not against part of the image
929			 * mapped into memory we skip it.
930			 */
931			if ((mpp = find_segment((caddr_t)roffset,
932			    lmp)) == NULL) {
933				elf_reloc_bad(lmp, (void *)rel, rtype, roffset,
934				    rsymndx);
935				continue;
936			}
937		}
938
939		/*
940		 * If we're promoting plts, determine if this one has already
941		 * been written. An uninitialized plts' second instruction is a
942		 * branch.
943		 */
944		if (plt) {
945			uchar_t	*_roffset = (uchar_t *)roffset;
946
947			_roffset += M_PLT_INSSIZE;
948			/* LINTED */
949			if ((*(uint_t *)_roffset &
950			    (~(S_MASK(19)))) != M_BA_A_XCC)
951				continue;
952		}
953
954		binfo = 0;
955		pltndx = (ulong_t)-1;
956		pbtype = PLT_T_NONE;
957
958		/*
959		 * If a symbol index is specified then get the symbol table
960		 * entry, locate the symbol definition, and determine its
961		 * address.
962		 */
963		if (rsymndx) {
964			/*
965			 * Get the local symbol table entry.
966			 */
967			symref = (Sym *)((ulong_t)SYMTAB(lmp) +
968			    (rsymndx * SYMENT(lmp)));
969
970			/*
971			 * If this is a local symbol, just use the base address.
972			 * (we should have no local relocations in the
973			 * executable).
974			 */
975			if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) {
976				value = basebgn;
977				name = NULL;
978
979				/*
980				 * Special case TLS relocations.
981				 */
982				if ((rtype == R_SPARC_TLS_DTPMOD32) ||
983				    (rtype == R_SPARC_TLS_DTPMOD64)) {
984					/*
985					 * Use the TLS modid.
986					 */
987					value = TLSMODID(lmp);
988
989				} else if ((rtype == R_SPARC_TLS_TPOFF32) ||
990				    (rtype == R_SPARC_TLS_TPOFF64)) {
991					if ((value = elf_static_tls(lmp, symref,
992					    rel, rtype, 0, roffset, 0)) == 0) {
993						ret = 0;
994						break;
995					}
996				}
997			} else {
998				/*
999				 * If the symbol index is equal to the previous
1000				 * symbol index relocation we processed then
1001				 * reuse the previous values. (Note that there
1002				 * have been cases where a relocation exists
1003				 * against a copy relocation symbol, our ld(1)
1004				 * should optimize this away, but make sure we
1005				 * don't use the same symbol information should
1006				 * this case exist).
1007				 */
1008				if ((rsymndx == psymndx) &&
1009				    (rtype != R_SPARC_COPY)) {
1010					/* LINTED */
1011					if (psymdef == 0) {
1012						DBG_CALL(Dbg_bind_weak(lmp,
1013						    (Addr)roffset, (Addr)
1014						    (roffset - basebgn), name));
1015						continue;
1016					}
1017					/* LINTED */
1018					value = pvalue;
1019					/* LINTED */
1020					name = pname;
1021					symdef = psymdef;
1022					/* LINTED */
1023					symref = psymref;
1024					/* LINTED */
1025					_lmp = plmp;
1026					/* LINTED */
1027					binfo = pbinfo;
1028
1029					if ((LIST(_lmp)->lm_tflags |
1030					    AFLAGS(_lmp)) &
1031					    LML_TFLG_AUD_SYMBIND) {
1032						value = audit_symbind(lmp, _lmp,
1033						    /* LINTED */
1034						    symdef, dsymndx, value,
1035						    &sb_flags);
1036					}
1037				} else {
1038					Slookup		sl;
1039					Sresult		sr;
1040
1041					/*
1042					 * Lookup the symbol definition.
1043					 * Initialize the symbol lookup, and
1044					 * symbol result, data structures.
1045					 */
1046					name = (char *)(STRTAB(lmp) +
1047					    symref->st_name);
1048
1049					SLOOKUP_INIT(sl, name, lmp, 0,
1050					    ld_entry_cnt, 0, rsymndx, symref,
1051					    rtype, LKUP_STDRELOC);
1052					SRESULT_INIT(sr, name);
1053					symdef = NULL;
1054
1055					if (lookup_sym(&sl, &sr, &binfo,
1056					    in_nfavl)) {
1057						name = (char *)sr.sr_name;
1058						_lmp = sr.sr_dmap;
1059						symdef = sr.sr_sym;
1060					}
1061
1062					/*
1063					 * If the symbol is not found and the
1064					 * reference was not to a weak symbol,
1065					 * report an error.  Weak references
1066					 * may be unresolved.
1067					 */
1068					/* BEGIN CSTYLED */
1069					if (symdef == 0) {
1070					    if (sl.sl_bind != STB_WEAK) {
1071						if (elf_reloc_error(lmp, name,
1072						    rel, binfo))
1073							continue;
1074
1075						ret = 0;
1076						break;
1077
1078					    } else {
1079						psymndx = rsymndx;
1080						psymdef = 0;
1081
1082						DBG_CALL(Dbg_bind_weak(lmp,
1083						    (Addr)roffset, (Addr)
1084						    (roffset - basebgn), name));
1085						continue;
1086					    }
1087					}
1088					/* END CSTYLED */
1089
1090					/*
1091					 * If symbol was found in an object
1092					 * other than the referencing object
1093					 * then record the binding.
1094					 */
1095					if ((lmp != _lmp) && ((FLAGS1(_lmp) &
1096					    FL1_RT_NOINIFIN) == 0)) {
1097						if (aplist_test(&bound, _lmp,
1098						    AL_CNT_RELBIND) == 0) {
1099							ret = 0;
1100							break;
1101						}
1102					}
1103
1104					/*
1105					 * Calculate the location of definition;
1106					 * symbol value plus base address of
1107					 * containing shared object.
1108					 */
1109					if (IS_SIZE(rtype))
1110						value = symdef->st_size;
1111					else
1112						value = symdef->st_value;
1113
1114					if (!(FLAGS(_lmp) & FLG_RT_FIXED) &&
1115					    !(IS_SIZE(rtype)) &&
1116					    (symdef->st_shndx != SHN_ABS) &&
1117					    (ELF_ST_TYPE(symdef->st_info) !=
1118					    STT_TLS))
1119						value += ADDR(_lmp);
1120
1121					/*
1122					 * Retain this symbol index and the
1123					 * value in case it can be used for the
1124					 * subsequent relocations.
1125					 */
1126					if (rtype != R_SPARC_COPY) {
1127						psymndx = rsymndx;
1128						pvalue = value;
1129						pname = name;
1130						psymdef = symdef;
1131						psymref = symref;
1132						plmp = _lmp;
1133						pbinfo = binfo;
1134					}
1135					if ((LIST(_lmp)->lm_tflags |
1136					    AFLAGS(_lmp)) &
1137					    LML_TFLG_AUD_SYMBIND) {
1138						/* LINTED */
1139						dsymndx = (((uintptr_t)symdef -
1140						    (uintptr_t)SYMTAB(_lmp)) /
1141						    SYMENT(_lmp));
1142						value = audit_symbind(lmp, _lmp,
1143						    symdef, dsymndx, value,
1144						    &sb_flags);
1145					}
1146				}
1147
1148				/*
1149				 * If relocation is PC-relative, subtract
1150				 * offset address.
1151				 */
1152				if (IS_PC_RELATIVE(rtype))
1153					value -= roffset;
1154
1155				/*
1156				 * Special case TLS relocations.
1157				 */
1158				if ((rtype == R_SPARC_TLS_DTPMOD32) ||
1159				    (rtype == R_SPARC_TLS_DTPMOD64)) {
1160					/*
1161					 * Relocation value is the TLS modid.
1162					 */
1163					value = TLSMODID(_lmp);
1164
1165				} else if ((rtype == R_SPARC_TLS_TPOFF64) ||
1166				    (rtype == R_SPARC_TLS_TPOFF32)) {
1167					if ((value = elf_static_tls(_lmp,
1168					    symdef, rel, rtype, name, roffset,
1169					    value)) == 0) {
1170						ret = 0;
1171						break;
1172					}
1173				}
1174			}
1175		} else {
1176			/*
1177			 * Special cases.
1178			 */
1179			if (rtype == R_SPARC_REGISTER) {
1180				/*
1181				 * A register symbol associated with symbol
1182				 * index 0 is initialized (i.e. relocated) to
1183				 * a constant in the r_addend field rather than
1184				 * to a symbol value.
1185				 */
1186				value = 0;
1187
1188			} else if ((rtype == R_SPARC_TLS_DTPMOD32) ||
1189			    (rtype == R_SPARC_TLS_DTPMOD64)) {
1190				/*
1191				 * TLS relocation value is the TLS modid.
1192				 */
1193				value = TLSMODID(lmp);
1194			} else
1195				value = basebgn;
1196
1197			name = NULL;
1198		}
1199
1200		DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH,
1201		    M_REL_SHT_TYPE, rel, NULL, 0, name));
1202
1203		/*
1204		 * Make sure the segment is writable.
1205		 */
1206		if ((rtype != R_SPARC_REGISTER) &&
1207		    ((mpp->mr_prot & PROT_WRITE) == 0) &&
1208		    ((set_prot(lmp, mpp, 1) == 0) ||
1209		    (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) {
1210			ret = 0;
1211			break;
1212		}
1213
1214		/*
1215		 * Call relocation routine to perform required relocation.
1216		 */
1217		switch (rtype) {
1218		case R_SPARC_REGISTER:
1219			/*
1220			 * The v9 ABI 4.2.4 says that system objects may,
1221			 * but are not required to, use register symbols
1222			 * to inidcate how they use global registers. Thus
1223			 * at least %g6, %g7 must be allowed in addition
1224			 * to %g2 and %g3.
1225			 */
1226			value += reladd;
1227			if (roffset == STO_SPARC_REGISTER_G1) {
1228				set_sparc_g1(value);
1229			} else if (roffset == STO_SPARC_REGISTER_G2) {
1230				set_sparc_g2(value);
1231			} else if (roffset == STO_SPARC_REGISTER_G3) {
1232				set_sparc_g3(value);
1233			} else if (roffset == STO_SPARC_REGISTER_G4) {
1234				set_sparc_g4(value);
1235			} else if (roffset == STO_SPARC_REGISTER_G5) {
1236				set_sparc_g5(value);
1237			} else if (roffset == STO_SPARC_REGISTER_G6) {
1238				set_sparc_g6(value);
1239			} else if (roffset == STO_SPARC_REGISTER_G7) {
1240				set_sparc_g7(value);
1241			} else {
1242				eprintf(LIST(lmp), ERR_FATAL,
1243				    MSG_INTL(MSG_REL_BADREG), NAME(lmp),
1244				    EC_ADDR(roffset));
1245				ret = 0;
1246				break;
1247			}
1248
1249			DBG_CALL(Dbg_reloc_apply_reg(LIST(lmp), ELF_DBG_RTLD,
1250			    M_MACH, (Xword)roffset, (Xword)value));
1251			break;
1252		case R_SPARC_COPY:
1253			if (elf_copy_reloc(name, symref, lmp, (void *)roffset,
1254			    symdef, _lmp, (const void *)value) == 0)
1255				ret = 0;
1256			break;
1257		case R_SPARC_JMP_SLOT:
1258			pltndx = ((uintptr_t)rel -
1259			    (uintptr_t)JMPREL(lmp)) / relsiz;
1260
1261			if (FLAGS(lmp) & FLG_RT_FIXED)
1262				vaddr = 0;
1263			else
1264				vaddr = ADDR(lmp);
1265
1266			if (((LIST(lmp)->lm_tflags | AFLAGS(lmp)) &
1267			    (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
1268			    AUDINFO(lmp)->ai_dynplts) {
1269				int	fail = 0;
1270				/* LINTED */
1271				uint_t	symndx = (uint_t)(((uintptr_t)symdef -
1272				    (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp));
1273
1274				(void) elf_plt_trace_write((caddr_t)vaddr,
1275				    (Rela *)rel, lmp, _lmp, symdef, symndx,
1276				    pltndx, (caddr_t)value, sb_flags, &fail);
1277				if (fail)
1278					ret = 0;
1279			} else {
1280				/*
1281				 * Write standard PLT entry to jump directly
1282				 * to newly bound function.
1283				 */
1284				DBG_CALL(Dbg_reloc_apply_val(LIST(lmp),
1285				    ELF_DBG_RTLD, (Xword)roffset,
1286				    (Xword)value));
1287				pbtype = elf_plt_write((uintptr_t)vaddr,
1288				    (uintptr_t)vaddr, (void *)rel, value,
1289				    pltndx);
1290			}
1291			break;
1292		case R_SPARC_WDISP30:
1293			if (PLTPAD(lmp) &&
1294			    (S_INRANGE((Sxword)value, 29) == 0)) {
1295				void *	plt = 0;
1296
1297				if (bindpltpad(lmp, &pltpadlist,
1298				    value + roffset, &plt,
1299				    NAME(_lmp), name) == 0) {
1300					ret = 0;
1301					break;
1302				}
1303				value = (Addr)((Addr)plt - roffset);
1304			}
1305			/* FALLTHROUGH */
1306		default:
1307			value += reladd;
1308			if (IS_EXTOFFSET(rtype))
1309				value += (Word)ELF_R_TYPE_DATA(rel->r_info);
1310
1311			/*
1312			 * Write the relocation out.  If this relocation is a
1313			 * common basic write, skip the doreloc() engine.
1314			 */
1315			if ((rtype == R_SPARC_GLOB_DAT) ||
1316			    (rtype == R_SPARC_64)) {
1317				if (roffset & 0x7) {
1318					Conv_inv_buf_t	inv_buf;
1319
1320					eprintf(LIST(lmp), ERR_FATAL,
1321					    MSG_INTL(MSG_REL_NONALIGN),
1322					    conv_reloc_SPARC_type(rtype,
1323					    0, &inv_buf),
1324					    NAME(lmp), demangle(name),
1325					    EC_OFF(roffset));
1326					ret = 0;
1327				} else
1328					*(ulong_t *)roffset += value;
1329			} else {
1330				if (do_reloc_rtld(rtype, (uchar_t *)roffset,
1331				    (Xword *)&value, name,
1332				    NAME(lmp), LIST(lmp)) == 0)
1333					ret = 0;
1334			}
1335
1336			/*
1337			 * The value now contains the 'bit-shifted' value that
1338			 * was or'ed into memory (this was set by
1339			 * do_reloc_rtld()).
1340			 */
1341			DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD,
1342			    (Xword)roffset, (Xword)value));
1343
1344			/*
1345			 * If this relocation is against a text segment, make
1346			 * sure that the instruction cache is flushed.
1347			 */
1348			if (textrel)
1349				iflush_range((caddr_t)roffset, 0x4);
1350		}
1351
1352		if ((ret == 0) &&
1353		    ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0))
1354			break;
1355
1356		if (binfo) {
1357			DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset,
1358			    (Off)(roffset - basebgn), pltndx, pbtype,
1359			    _lmp, (Addr)value, symdef->st_value, name, binfo));
1360		}
1361	}
1362
1363	/*
1364	 * Free up any items on the pltpadlist if it was allocated
1365	 */
1366	if (pltpadlist)
1367		free(pltpadlist);
1368
1369	return (relocate_finish(lmp, bound, ret));
1370}
1371
1372/*
1373 * Provide a machine specific interface to the conversion routine.  By calling
1374 * the machine specific version, rather than the generic version, we insure that
1375 * the data tables/strings for all known machine versions aren't dragged into
1376 * ld.so.1.
1377 */
1378const char *
1379_conv_reloc_type(uint_t rel)
1380{
1381	static Conv_inv_buf_t	inv_buf;
1382
1383	return (conv_reloc_SPARC_type(rel, 0, &inv_buf));
1384}
1385