1/*- 2 * Copyright 1996, 1997, 1998, 1999 John D. Polstra. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28/* 29 * Dynamic linker for ELF. 30 * 31 * John Polstra <jdp@polstra.com>. 32 */ 33 34#include <sys/param.h> 35#include <sys/mman.h> 36#include <machine/ia64_cpu.h> 37 38#include <dlfcn.h> 39#include <err.h> 40#include <errno.h> 41#include <fcntl.h> 42#include <stdarg.h> 43#include <stdio.h> 44#include <stdlib.h> 45#include <string.h> 46#include <unistd.h> 47 48#include "debug.h" 49#include "rtld.h" 50 51extern Elf_Dyn _DYNAMIC; 52 53/* 54 * Macros for loading/storing unaligned 64-bit values. These are 55 * needed because relocations can point to unaligned data. This 56 * occurs in the DWARF2 exception frame tables generated by the 57 * compiler, for instance. 58 * 59 * We don't use these when relocating jump slots and GOT entries, 60 * since they are guaranteed to be aligned. 61 * 62 * XXX dfr stub for now. 63 */ 64#define load64(p) (*(u_int64_t *) (p)) 65#define store64(p, v) (*(u_int64_t *) (p) = (v)) 66 67/* Allocate an @fptr. */ 68 69#define FPTR_CHUNK_SIZE 64 70 71struct fptr_chunk { 72 struct fptr fptrs[FPTR_CHUNK_SIZE]; 73}; 74 75static struct fptr_chunk first_chunk; 76static struct fptr_chunk *current_chunk = &first_chunk; 77static struct fptr *next_fptr = &first_chunk.fptrs[0]; 78static struct fptr *last_fptr = &first_chunk.fptrs[FPTR_CHUNK_SIZE]; 79 80/* 81 * We use static storage initially so that we don't have to call 82 * malloc during init_rtld(). 83 */ 84static struct fptr * 85alloc_fptr(Elf_Addr target, Elf_Addr gp) 86{ 87 struct fptr* fptr; 88 89 if (next_fptr == last_fptr) { 90 current_chunk = xmalloc(sizeof(struct fptr_chunk)); 91 next_fptr = ¤t_chunk->fptrs[0]; 92 last_fptr = ¤t_chunk->fptrs[FPTR_CHUNK_SIZE]; 93 } 94 fptr = next_fptr; 95 next_fptr++; 96 fptr->target = target; 97 fptr->gp = gp; 98 return fptr; 99} 100 101static struct fptr ** 102alloc_fptrs(Obj_Entry *obj, bool mapped) 103{ 104 struct fptr **fptrs; 105 size_t fbytes; 106 107 fbytes = obj->dynsymcount * sizeof(struct fptr *); 108 109 /* 110 * Avoid malloc, if requested. Happens when relocating 111 * rtld itself on startup. 112 */ 113 if (mapped) { 114 fptrs = mmap(NULL, fbytes, PROT_READ|PROT_WRITE, 115 MAP_ANON, -1, 0); 116 if (fptrs == MAP_FAILED) 117 fptrs = NULL; 118 } else { 119 fptrs = xcalloc(1, fbytes); 120 } 121 122 /* 123 * This assertion is necessary to guarantee function pointer 124 * uniqueness 125 */ 126 assert(fptrs != NULL); 127 128 return (obj->priv = fptrs); 129} 130 131static void 132free_fptrs(Obj_Entry *obj, bool mapped) 133{ 134 struct fptr **fptrs; 135 size_t fbytes; 136 137 fptrs = obj->priv; 138 if (fptrs == NULL) 139 return; 140 141 fbytes = obj->dynsymcount * sizeof(struct fptr *); 142 if (mapped) 143 munmap(fptrs, fbytes); 144 else 145 free(fptrs); 146 obj->priv = NULL; 147} 148 149/* Relocate a non-PLT object with addend. */ 150static int 151reloc_non_plt_obj(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela, 152 SymCache *cache, int flags, RtldLockState *lockstate) 153{ 154 struct fptr **fptrs; 155 Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset); 156 157 switch (ELF_R_TYPE(rela->r_info)) { 158 case R_IA_64_REL64LSB: 159 /* 160 * We handle rtld's relocations in rtld_start.S 161 */ 162 if (obj != obj_rtld) 163 store64(where, 164 load64(where) + (Elf_Addr) obj->relocbase); 165 break; 166 167 case R_IA_64_DIR64LSB: { 168 const Elf_Sym *def; 169 const Obj_Entry *defobj; 170 Elf_Addr target; 171 172 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 173 flags, cache, lockstate); 174 if (def == NULL) 175 return -1; 176 177 target = (def->st_shndx != SHN_UNDEF) 178 ? (Elf_Addr)(defobj->relocbase + def->st_value) : 0; 179 store64(where, target + rela->r_addend); 180 break; 181 } 182 183 case R_IA_64_FPTR64LSB: { 184 /* 185 * We have to make sure that all @fptr references to 186 * the same function are identical so that code can 187 * compare function pointers. 188 */ 189 const Elf_Sym *def; 190 const Obj_Entry *defobj; 191 struct fptr *fptr = 0; 192 Elf_Addr target, gp; 193 int sym_index; 194 195 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 196 SYMLOOK_IN_PLT | flags, cache, lockstate); 197 if (def == NULL) { 198 /* 199 * XXX r_debug_state is problematic and find_symdef() 200 * returns NULL for it. This probably has something to 201 * do with symbol versioning (r_debug_state is in the 202 * symbol map). If we return -1 in that case we abort 203 * relocating rtld, which typically is fatal. So, for 204 * now just skip the symbol when we're relocating 205 * rtld. We don't care about r_debug_state unless we 206 * are being debugged. 207 */ 208 if (obj != obj_rtld) 209 return -1; 210 break; 211 } 212 213 if (def->st_shndx != SHN_UNDEF) { 214 target = (Elf_Addr)(defobj->relocbase + def->st_value); 215 gp = (Elf_Addr)defobj->pltgot; 216 217 /* rtld is allowed to reference itself only */ 218 assert(!obj->rtld || obj == defobj); 219 fptrs = defobj->priv; 220 if (fptrs == NULL) 221 fptrs = alloc_fptrs((Obj_Entry *) defobj, 222 obj->rtld); 223 224 sym_index = def - defobj->symtab; 225 226 /* 227 * Find the @fptr, using fptrs as a helper. 228 */ 229 if (fptrs) 230 fptr = fptrs[sym_index]; 231 if (!fptr) { 232 fptr = alloc_fptr(target, gp); 233 if (fptrs) 234 fptrs[sym_index] = fptr; 235 } 236 } else 237 fptr = NULL; 238 239 store64(where, (Elf_Addr)fptr); 240 break; 241 } 242 243 case R_IA_64_IPLTLSB: { 244 /* 245 * Relocation typically used to populate C++ virtual function 246 * tables. It creates a 128-bit function descriptor at the 247 * specified memory address. 248 */ 249 const Elf_Sym *def; 250 const Obj_Entry *defobj; 251 struct fptr *fptr; 252 Elf_Addr target, gp; 253 254 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 255 flags, cache, lockstate); 256 if (def == NULL) 257 return -1; 258 259 if (def->st_shndx != SHN_UNDEF) { 260 target = (Elf_Addr)(defobj->relocbase + def->st_value); 261 gp = (Elf_Addr)defobj->pltgot; 262 } else { 263 target = 0; 264 gp = 0; 265 } 266 267 fptr = (void*)where; 268 store64(&fptr->target, target); 269 store64(&fptr->gp, gp); 270 break; 271 } 272 273 case R_IA_64_DTPMOD64LSB: { 274 const Elf_Sym *def; 275 const Obj_Entry *defobj; 276 277 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 278 flags, cache, lockstate); 279 if (def == NULL) 280 return -1; 281 282 store64(where, defobj->tlsindex); 283 break; 284 } 285 286 case R_IA_64_DTPREL64LSB: { 287 const Elf_Sym *def; 288 const Obj_Entry *defobj; 289 290 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 291 flags, cache, lockstate); 292 if (def == NULL) 293 return -1; 294 295 store64(where, def->st_value + rela->r_addend); 296 break; 297 } 298 299 case R_IA_64_TPREL64LSB: { 300 const Elf_Sym *def; 301 const Obj_Entry *defobj; 302 303 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, 304 flags, cache, lockstate); 305 if (def == NULL) 306 return -1; 307 308 /* 309 * We lazily allocate offsets for static TLS as we 310 * see the first relocation that references the 311 * TLS block. This allows us to support (small 312 * amounts of) static TLS in dynamically loaded 313 * modules. If we run out of space, we generate an 314 * error. 315 */ 316 if (!defobj->tls_done) { 317 if (!allocate_tls_offset((Obj_Entry*) defobj)) { 318 _rtld_error("%s: No space available for static " 319 "Thread Local Storage", obj->path); 320 return -1; 321 } 322 } 323 324 store64(where, defobj->tlsoffset + def->st_value + rela->r_addend); 325 break; 326 } 327 328 case R_IA_64_NONE: 329 break; 330 331 default: 332 _rtld_error("%s: Unsupported relocation type %u" 333 " in non-PLT relocations\n", obj->path, 334 (unsigned int)ELF_R_TYPE(rela->r_info)); 335 return -1; 336 } 337 338 return(0); 339} 340 341/* Process the non-PLT relocations. */ 342int 343reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, 344 RtldLockState *lockstate) 345{ 346 const Elf_Rel *rellim; 347 const Elf_Rel *rel; 348 const Elf_Rela *relalim; 349 const Elf_Rela *rela; 350 SymCache *cache; 351 int bytes = obj->dynsymcount * sizeof(SymCache); 352 int r = -1; 353 354 /* 355 * The dynamic loader may be called from a thread, we have 356 * limited amounts of stack available so we cannot use alloca(). 357 */ 358 cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON, -1, 0); 359 if (cache == MAP_FAILED) 360 cache = NULL; 361 362 /* Perform relocations without addend if there are any: */ 363 rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize); 364 for (rel = obj->rel; obj->rel != NULL && rel < rellim; rel++) { 365 Elf_Rela locrela; 366 367 locrela.r_info = rel->r_info; 368 locrela.r_offset = rel->r_offset; 369 locrela.r_addend = 0; 370 if (reloc_non_plt_obj(obj_rtld, obj, &locrela, cache, flags, 371 lockstate)) 372 goto done; 373 } 374 375 /* Perform relocations with addend if there are any: */ 376 relalim = (const Elf_Rela *) ((caddr_t) obj->rela + obj->relasize); 377 for (rela = obj->rela; obj->rela != NULL && rela < relalim; rela++) { 378 if (reloc_non_plt_obj(obj_rtld, obj, rela, cache, flags, 379 lockstate)) 380 goto done; 381 } 382 383 r = 0; 384done: 385 if (cache) 386 munmap(cache, bytes); 387 388 /* 389 * Release temporarily mapped fptrs if relocating 390 * rtld object itself. A new table will be created 391 * in make_function_pointer using malloc when needed. 392 */ 393 if (obj->rtld && obj->priv) 394 free_fptrs(obj, true); 395 396 return (r); 397} 398 399/* Process the PLT relocations. */ 400int 401reloc_plt(Obj_Entry *obj) 402{ 403 /* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */ 404 if (obj->pltrelsize != 0) { 405 const Elf_Rel *rellim; 406 const Elf_Rel *rel; 407 408 rellim = (const Elf_Rel *) 409 ((char *)obj->pltrel + obj->pltrelsize); 410 for (rel = obj->pltrel; rel < rellim; rel++) { 411 Elf_Addr *where; 412 413 assert(ELF_R_TYPE(rel->r_info) == R_IA_64_IPLTLSB); 414 415 /* Relocate the @fptr pointing into the PLT. */ 416 where = (Elf_Addr *)(obj->relocbase + rel->r_offset); 417 *where += (Elf_Addr)obj->relocbase; 418 } 419 } else { 420 const Elf_Rela *relalim; 421 const Elf_Rela *rela; 422 423 relalim = (const Elf_Rela *) 424 ((char *)obj->pltrela + obj->pltrelasize); 425 for (rela = obj->pltrela; rela < relalim; rela++) { 426 Elf_Addr *where; 427 428 assert(ELF_R_TYPE(rela->r_info) == R_IA_64_IPLTLSB); 429 430 /* Relocate the @fptr pointing into the PLT. */ 431 where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 432 *where += (Elf_Addr)obj->relocbase; 433 } 434 } 435 return 0; 436} 437 438int 439reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate) 440{ 441 442 /* XXX not implemented */ 443 return (0); 444} 445 446int 447reloc_gnu_ifunc(Obj_Entry *obj, int flags, 448 struct Struct_RtldLockState *lockstate) 449{ 450 451 /* XXX not implemented */ 452 return (0); 453} 454 455/* Relocate the jump slots in an object. */ 456int 457reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate) 458{ 459 if (obj->jmpslots_done) 460 return 0; 461 /* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */ 462 if (obj->pltrelsize != 0) { 463 const Elf_Rel *rellim; 464 const Elf_Rel *rel; 465 466 rellim = (const Elf_Rel *) 467 ((char *)obj->pltrel + obj->pltrelsize); 468 for (rel = obj->pltrel; rel < rellim; rel++) { 469 Elf_Addr *where; 470 const Elf_Sym *def; 471 const Obj_Entry *defobj; 472 473 assert(ELF_R_TYPE(rel->r_info) == R_IA_64_IPLTLSB); 474 where = (Elf_Addr *)(obj->relocbase + rel->r_offset); 475 def = find_symdef(ELF_R_SYM(rel->r_info), obj, 476 &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate); 477 if (def == NULL) 478 return -1; 479 reloc_jmpslot(where, 480 (Elf_Addr)(defobj->relocbase 481 + def->st_value), 482 defobj, obj, rel); 483 } 484 } else { 485 const Elf_Rela *relalim; 486 const Elf_Rela *rela; 487 488 relalim = (const Elf_Rela *) 489 ((char *)obj->pltrela + obj->pltrelasize); 490 for (rela = obj->pltrela; rela < relalim; rela++) { 491 Elf_Addr *where; 492 const Elf_Sym *def; 493 const Obj_Entry *defobj; 494 495 where = (Elf_Addr *)(obj->relocbase + rela->r_offset); 496 def = find_symdef(ELF_R_SYM(rela->r_info), obj, 497 &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate); 498 if (def == NULL) 499 return -1; 500 reloc_jmpslot(where, 501 (Elf_Addr)(defobj->relocbase 502 + def->st_value), 503 defobj, obj, (Elf_Rel *)rela); 504 } 505 } 506 obj->jmpslots_done = true; 507 return 0; 508} 509 510/* Fixup the jump slot at "where" to transfer control to "target". */ 511Elf_Addr 512reloc_jmpslot(Elf_Addr *where, Elf_Addr target, const Obj_Entry *obj, 513 const Obj_Entry *refobj, const Elf_Rel *rel) 514{ 515 Elf_Addr stubaddr; 516 517 dbg(" reloc_jmpslot: where=%p, target=%p, gp=%p", 518 (void *)where, (void *)target, (void *)obj->pltgot); 519 stubaddr = *where; 520 if (stubaddr != target) { 521 522 /* 523 * Point this @fptr directly at the target. Update the 524 * gp value first so that we don't break another cpu 525 * which is currently executing the PLT entry. 526 */ 527 where[1] = (Elf_Addr) obj->pltgot; 528 ia64_mf(); 529 where[0] = target; 530 ia64_mf(); 531 } 532 533 /* 534 * The caller needs an @fptr for the adjusted entry. The PLT 535 * entry serves this purpose nicely. 536 */ 537 return (Elf_Addr) where; 538} 539 540/* 541 * XXX ia64 doesn't seem to have copy relocations. 542 * 543 * Returns 0 on success, -1 on failure. 544 */ 545int 546do_copy_relocations(Obj_Entry *dstobj) 547{ 548 549 return 0; 550} 551 552/* 553 * Return the @fptr representing a given function symbol. 554 */ 555void * 556make_function_pointer(const Elf_Sym *sym, const Obj_Entry *obj) 557{ 558 struct fptr **fptrs = obj->priv; 559 int index = sym - obj->symtab; 560 561 if (!fptrs) { 562 /* 563 * This should only happen for something like 564 * dlsym("dlopen"). Actually, I'm not sure it can ever 565 * happen. 566 */ 567 fptrs = alloc_fptrs((Obj_Entry *) obj, false); 568 } 569 if (!fptrs[index]) { 570 Elf_Addr target, gp; 571 target = (Elf_Addr) (obj->relocbase + sym->st_value); 572 gp = (Elf_Addr) obj->pltgot; 573 fptrs[index] = alloc_fptr(target, gp); 574 } 575 return fptrs[index]; 576} 577 578void 579call_initfini_pointer(const Obj_Entry *obj, Elf_Addr target) 580{ 581 struct fptr fptr; 582 583 fptr.gp = (Elf_Addr) obj->pltgot; 584 fptr.target = target; 585 dbg(" initfini: target=%p, gp=%p", 586 (void *) fptr.target, (void *) fptr.gp); 587 ((InitFunc) &fptr)(); 588} 589 590void 591call_init_pointer(const Obj_Entry *obj, Elf_Addr target) 592{ 593 struct fptr fptr; 594 595 fptr.gp = (Elf_Addr) obj->pltgot; 596 fptr.target = target; 597 dbg(" initfini: target=%p, gp=%p", 598 (void *) fptr.target, (void *) fptr.gp); 599 ((InitArrFunc) &fptr)(main_argc, main_argv, environ); 600} 601 602/* Initialize the special PLT entries. */ 603void 604init_pltgot(Obj_Entry *obj) 605{ 606 const Elf_Dyn *dynp; 607 Elf_Addr *pltres = 0; 608 609 /* 610 * When there are no PLT relocations, the DT_IA_64_PLT_RESERVE entry 611 * is bogus. Do not setup the BOR pointers in that case. An example 612 * of where this happens is /usr/lib/libxpg4.so.3. 613 */ 614 if (obj->pltrelasize == 0 && obj->pltrelsize == 0) 615 return; 616 617 /* 618 * Find the PLT RESERVE section. 619 */ 620 for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) { 621 if (dynp->d_tag == DT_IA_64_PLT_RESERVE) 622 pltres = (u_int64_t *) 623 (obj->relocbase + dynp->d_un.d_ptr); 624 } 625 if (!pltres) 626 errx(1, "Can't find DT_IA_64_PLT_RESERVE entry"); 627 628 /* 629 * The PLT RESERVE section is used to get values to pass to 630 * _rtld_bind when lazy binding. 631 */ 632 pltres[0] = (Elf_Addr) obj; 633 pltres[1] = FPTR_TARGET(_rtld_bind_start); 634 pltres[2] = FPTR_GP(_rtld_bind_start); 635} 636 637void 638allocate_initial_tls(Obj_Entry *list) 639{ 640 void *tpval; 641 642 /* 643 * Fix the size of the static TLS block by using the maximum 644 * offset allocated so far and adding a bit for dynamic modules to 645 * use. 646 */ 647 tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA; 648 649 tpval = allocate_tls(list, NULL, TLS_TCB_SIZE, 16); 650 __asm __volatile("mov r13 = %0" :: "r"(tpval)); 651} 652 653void *__tls_get_addr(unsigned long module, unsigned long offset) 654{ 655 register Elf_Addr** tp __asm__("r13"); 656 657 return tls_get_addr_common(tp, module, offset); 658} 659