1/* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3   Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000,
4   2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5   Free Software Foundation, Inc.
6
7   This file is part of GDB.
8
9   This program is free software; you can redistribute it and/or modify
10   it under the terms of the GNU General Public License as published by
11   the Free Software Foundation; either version 3 of the License, or
12   (at your option) any later version.
13
14   This program is distributed in the hope that it will be useful,
15   but WITHOUT ANY WARRANTY; without even the implied warranty of
16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17   GNU General Public License for more details.
18
19   You should have received a copy of the GNU General Public License
20   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
21
22#include "defs.h"
23
24#include "elf/external.h"
25#include "elf/common.h"
26#include "elf/mips.h"
27
28#include "symtab.h"
29#include "bfd.h"
30#include "symfile.h"
31#include "objfiles.h"
32#include "gdbcore.h"
33#include "target.h"
34#include "inferior.h"
35#include "regcache.h"
36#include "gdbthread.h"
37#include "observer.h"
38
39#include "gdb_assert.h"
40
41#include "solist.h"
42#include "solib.h"
43#include "solib-svr4.h"
44
45#include "bfd-target.h"
46#include "elf-bfd.h"
47#include "exec.h"
48#include "auxv.h"
49#include "exceptions.h"
50
51static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
52static int svr4_have_link_map_offsets (void);
53static void svr4_relocate_main_executable (void);
54
55/* Link map info to include in an allocated so_list entry.  */
56
57struct lm_info
58  {
59    /* Pointer to copy of link map from inferior.  The type is char *
60       rather than void *, so that we may use byte offsets to find the
61       various fields without the need for a cast.  */
62    gdb_byte *lm;
63
64    /* Amount by which addresses in the binary should be relocated to
65       match the inferior.  This could most often be taken directly
66       from lm, but when prelinking is involved and the prelink base
67       address changes, we may need a different offset, we want to
68       warn about the difference and compute it only once.  */
69    CORE_ADDR l_addr;
70
71    /* The target location of lm.  */
72    CORE_ADDR lm_addr;
73  };
74
75/* On SVR4 systems, a list of symbols in the dynamic linker where
76   GDB can try to place a breakpoint to monitor shared library
77   events.
78
79   If none of these symbols are found, or other errors occur, then
80   SVR4 systems will fall back to using a symbol as the "startup
81   mapping complete" breakpoint address.  */
82
83static const char * const solib_break_names[] =
84{
85  "r_debug_state",
86  "_r_debug_state",
87  "_dl_debug_state",
88  "rtld_db_dlactivity",
89  "__dl_rtld_db_dlactivity",
90  "_rtld_debug_state",
91
92  NULL
93};
94
95static const char * const bkpt_names[] =
96{
97  "_start",
98  "__start",
99  "main",
100  NULL
101};
102
103static const  char * const main_name_list[] =
104{
105  "main_$main",
106  NULL
107};
108
109/* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
110   the same shared library.  */
111
112static int
113svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
114{
115  if (strcmp (gdb_so_name, inferior_so_name) == 0)
116    return 1;
117
118  /* On Solaris, when starting inferior we think that dynamic linker is
119     /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
120     contains /lib/ld.so.1.  Sometimes one file is a link to another, but
121     sometimes they have identical content, but are not linked to each
122     other.  We don't restrict this check for Solaris, but the chances
123     of running into this situation elsewhere are very low.  */
124  if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
125      && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
126    return 1;
127
128  /* Similarly, we observed the same issue with sparc64, but with
129     different locations.  */
130  if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
131      && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
132    return 1;
133
134  return 0;
135}
136
137static int
138svr4_same (struct so_list *gdb, struct so_list *inferior)
139{
140  return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
141}
142
143/* link map access functions.  */
144
145static CORE_ADDR
146LM_ADDR_FROM_LINK_MAP (struct so_list *so)
147{
148  struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
149  struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
150
151  return extract_typed_address (so->lm_info->lm + lmo->l_addr_offset,
152				ptr_type);
153}
154
155static int
156HAS_LM_DYNAMIC_FROM_LINK_MAP (void)
157{
158  struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
159
160  return lmo->l_ld_offset >= 0;
161}
162
163static CORE_ADDR
164LM_DYNAMIC_FROM_LINK_MAP (struct so_list *so)
165{
166  struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
167  struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
168
169  return extract_typed_address (so->lm_info->lm + lmo->l_ld_offset,
170				ptr_type);
171}
172
173static CORE_ADDR
174LM_ADDR_CHECK (struct so_list *so, bfd *abfd)
175{
176  if (so->lm_info->l_addr == (CORE_ADDR)-1)
177    {
178      struct bfd_section *dyninfo_sect;
179      CORE_ADDR l_addr, l_dynaddr, dynaddr;
180
181      l_addr = LM_ADDR_FROM_LINK_MAP (so);
182
183      if (! abfd || ! HAS_LM_DYNAMIC_FROM_LINK_MAP ())
184	goto set_addr;
185
186      l_dynaddr = LM_DYNAMIC_FROM_LINK_MAP (so);
187
188      dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
189      if (dyninfo_sect == NULL)
190	goto set_addr;
191
192      dynaddr = bfd_section_vma (abfd, dyninfo_sect);
193
194      if (dynaddr + l_addr != l_dynaddr)
195	{
196	  CORE_ADDR align = 0x1000;
197	  CORE_ADDR minpagesize = align;
198
199	  if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
200	    {
201	      Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
202	      Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
203	      int i;
204
205	      align = 1;
206
207	      for (i = 0; i < ehdr->e_phnum; i++)
208		if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
209		  align = phdr[i].p_align;
210
211	      minpagesize = get_elf_backend_data (abfd)->minpagesize;
212	    }
213
214	  /* Turn it into a mask.  */
215	  align--;
216
217	  /* If the changes match the alignment requirements, we
218	     assume we're using a core file that was generated by the
219	     same binary, just prelinked with a different base offset.
220	     If it doesn't match, we may have a different binary, the
221	     same binary with the dynamic table loaded at an unrelated
222	     location, or anything, really.  To avoid regressions,
223	     don't adjust the base offset in the latter case, although
224	     odds are that, if things really changed, debugging won't
225	     quite work.
226
227	     One could expect more the condition
228	       ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
229	     but the one below is relaxed for PPC.  The PPC kernel supports
230	     either 4k or 64k page sizes.  To be prepared for 64k pages,
231	     PPC ELF files are built using an alignment requirement of 64k.
232	     However, when running on a kernel supporting 4k pages, the memory
233	     mapping of the library may not actually happen on a 64k boundary!
234
235	     (In the usual case where (l_addr & align) == 0, this check is
236	     equivalent to the possibly expected check above.)
237
238	     Even on PPC it must be zero-aligned at least for MINPAGESIZE.  */
239
240	  if ((l_addr & (minpagesize - 1)) == 0
241	      && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
242	    {
243	      l_addr = l_dynaddr - dynaddr;
244
245	      if (info_verbose)
246		printf_unfiltered (_("Using PIC (Position Independent Code) "
247				     "prelink displacement %s for \"%s\".\n"),
248				   paddress (target_gdbarch, l_addr),
249				   so->so_name);
250	    }
251	  else
252	    warning (_(".dynamic section for \"%s\" "
253		       "is not at the expected address "
254		       "(wrong library or version mismatch?)"), so->so_name);
255	}
256
257    set_addr:
258      so->lm_info->l_addr = l_addr;
259    }
260
261  return so->lm_info->l_addr;
262}
263
264static CORE_ADDR
265LM_NEXT (struct so_list *so)
266{
267  struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
268  struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
269
270  return extract_typed_address (so->lm_info->lm + lmo->l_next_offset,
271				ptr_type);
272}
273
274static CORE_ADDR
275LM_PREV (struct so_list *so)
276{
277  struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
278  struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
279
280  return extract_typed_address (so->lm_info->lm + lmo->l_prev_offset,
281				ptr_type);
282}
283
284static CORE_ADDR
285LM_NAME (struct so_list *so)
286{
287  struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
288  struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
289
290  return extract_typed_address (so->lm_info->lm + lmo->l_name_offset,
291				ptr_type);
292}
293
294static int
295IGNORE_FIRST_LINK_MAP_ENTRY (struct so_list *so)
296{
297  /* Assume that everything is a library if the dynamic loader was loaded
298     late by a static executable.  */
299  if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
300    return 0;
301
302  return LM_PREV (so) == 0;
303}
304
305/* Per pspace SVR4 specific data.  */
306
307struct svr4_info
308{
309  CORE_ADDR debug_base;	/* Base of dynamic linker structures.  */
310
311  /* Validity flag for debug_loader_offset.  */
312  int debug_loader_offset_p;
313
314  /* Load address for the dynamic linker, inferred.  */
315  CORE_ADDR debug_loader_offset;
316
317  /* Name of the dynamic linker, valid if debug_loader_offset_p.  */
318  char *debug_loader_name;
319
320  /* Load map address for the main executable.  */
321  CORE_ADDR main_lm_addr;
322
323  CORE_ADDR interp_text_sect_low;
324  CORE_ADDR interp_text_sect_high;
325  CORE_ADDR interp_plt_sect_low;
326  CORE_ADDR interp_plt_sect_high;
327};
328
329/* Per-program-space data key.  */
330static const struct program_space_data *solib_svr4_pspace_data;
331
332static void
333svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
334{
335  struct svr4_info *info;
336
337  info = program_space_data (pspace, solib_svr4_pspace_data);
338  xfree (info);
339}
340
341/* Get the current svr4 data.  If none is found yet, add it now.  This
342   function always returns a valid object.  */
343
344static struct svr4_info *
345get_svr4_info (void)
346{
347  struct svr4_info *info;
348
349  info = program_space_data (current_program_space, solib_svr4_pspace_data);
350  if (info != NULL)
351    return info;
352
353  info = XZALLOC (struct svr4_info);
354  set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
355  return info;
356}
357
358/* Local function prototypes */
359
360static int match_main (const char *);
361
362/*
363
364   LOCAL FUNCTION
365
366   bfd_lookup_symbol -- lookup the value for a specific symbol
367
368   SYNOPSIS
369
370   CORE_ADDR bfd_lookup_symbol (bfd *abfd, char *symname)
371
372   DESCRIPTION
373
374   An expensive way to lookup the value of a single symbol for
375   bfd's that are only temporary anyway.  This is used by the
376   shared library support to find the address of the debugger
377   notification routine in the shared library.
378
379   The returned symbol may be in a code or data section; functions
380   will normally be in a code section, but may be in a data section
381   if this architecture uses function descriptors.
382
383   Note that 0 is specifically allowed as an error return (no
384   such symbol).
385 */
386
387static CORE_ADDR
388bfd_lookup_symbol (bfd *abfd, const char *symname)
389{
390  long storage_needed;
391  asymbol *sym;
392  asymbol **symbol_table;
393  unsigned int number_of_symbols;
394  unsigned int i;
395  struct cleanup *back_to;
396  CORE_ADDR symaddr = 0;
397
398  storage_needed = bfd_get_symtab_upper_bound (abfd);
399
400  if (storage_needed > 0)
401    {
402      symbol_table = (asymbol **) xmalloc (storage_needed);
403      back_to = make_cleanup (xfree, symbol_table);
404      number_of_symbols = bfd_canonicalize_symtab (abfd, symbol_table);
405
406      for (i = 0; i < number_of_symbols; i++)
407	{
408	  sym = *symbol_table++;
409	  if (strcmp (sym->name, symname) == 0
410              && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0)
411	    {
412	      /* BFD symbols are section relative.  */
413	      symaddr = sym->value + sym->section->vma;
414	      break;
415	    }
416	}
417      do_cleanups (back_to);
418    }
419
420  if (symaddr)
421    return symaddr;
422
423  /* On FreeBSD, the dynamic linker is stripped by default.  So we'll
424     have to check the dynamic string table too.  */
425
426  storage_needed = bfd_get_dynamic_symtab_upper_bound (abfd);
427
428  if (storage_needed > 0)
429    {
430      symbol_table = (asymbol **) xmalloc (storage_needed);
431      back_to = make_cleanup (xfree, symbol_table);
432      number_of_symbols = bfd_canonicalize_dynamic_symtab (abfd, symbol_table);
433
434      for (i = 0; i < number_of_symbols; i++)
435	{
436	  sym = *symbol_table++;
437
438	  if (strcmp (sym->name, symname) == 0
439              && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0)
440	    {
441	      /* BFD symbols are section relative.  */
442	      symaddr = sym->value + sym->section->vma;
443	      break;
444	    }
445	}
446      do_cleanups (back_to);
447    }
448
449  return symaddr;
450}
451
452
453/* Read program header TYPE from inferior memory.  The header is found
454   by scanning the OS auxillary vector.
455
456   If TYPE == -1, return the program headers instead of the contents of
457   one program header.
458
459   Return a pointer to allocated memory holding the program header contents,
460   or NULL on failure.  If sucessful, and unless P_SECT_SIZE is NULL, the
461   size of those contents is returned to P_SECT_SIZE.  Likewise, the target
462   architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE.  */
463
464static gdb_byte *
465read_program_header (int type, int *p_sect_size, int *p_arch_size)
466{
467  enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
468  CORE_ADDR at_phdr, at_phent, at_phnum;
469  int arch_size, sect_size;
470  CORE_ADDR sect_addr;
471  gdb_byte *buf;
472
473  /* Get required auxv elements from target.  */
474  if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
475    return 0;
476  if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
477    return 0;
478  if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
479    return 0;
480  if (!at_phdr || !at_phnum)
481    return 0;
482
483  /* Determine ELF architecture type.  */
484  if (at_phent == sizeof (Elf32_External_Phdr))
485    arch_size = 32;
486  else if (at_phent == sizeof (Elf64_External_Phdr))
487    arch_size = 64;
488  else
489    return 0;
490
491  /* Find the requested segment.  */
492  if (type == -1)
493    {
494      sect_addr = at_phdr;
495      sect_size = at_phent * at_phnum;
496    }
497  else if (arch_size == 32)
498    {
499      Elf32_External_Phdr phdr;
500      int i;
501
502      /* Search for requested PHDR.  */
503      for (i = 0; i < at_phnum; i++)
504	{
505	  if (target_read_memory (at_phdr + i * sizeof (phdr),
506				  (gdb_byte *)&phdr, sizeof (phdr)))
507	    return 0;
508
509	  if (extract_unsigned_integer ((gdb_byte *)phdr.p_type,
510					4, byte_order) == type)
511	    break;
512	}
513
514      if (i == at_phnum)
515	return 0;
516
517      /* Retrieve address and size.  */
518      sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
519					    4, byte_order);
520      sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
521					    4, byte_order);
522    }
523  else
524    {
525      Elf64_External_Phdr phdr;
526      int i;
527
528      /* Search for requested PHDR.  */
529      for (i = 0; i < at_phnum; i++)
530	{
531	  if (target_read_memory (at_phdr + i * sizeof (phdr),
532				  (gdb_byte *)&phdr, sizeof (phdr)))
533	    return 0;
534
535	  if (extract_unsigned_integer ((gdb_byte *)phdr.p_type,
536					4, byte_order) == type)
537	    break;
538	}
539
540      if (i == at_phnum)
541	return 0;
542
543      /* Retrieve address and size.  */
544      sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
545					    8, byte_order);
546      sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
547					    8, byte_order);
548    }
549
550  /* Read in requested program header.  */
551  buf = xmalloc (sect_size);
552  if (target_read_memory (sect_addr, buf, sect_size))
553    {
554      xfree (buf);
555      return NULL;
556    }
557
558  if (p_arch_size)
559    *p_arch_size = arch_size;
560  if (p_sect_size)
561    *p_sect_size = sect_size;
562
563  return buf;
564}
565
566
567/* Return program interpreter string.  */
568static gdb_byte *
569find_program_interpreter (void)
570{
571  gdb_byte *buf = NULL;
572
573  /* If we have an exec_bfd, use its section table.  */
574  if (exec_bfd
575      && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
576   {
577     struct bfd_section *interp_sect;
578
579     interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
580     if (interp_sect != NULL)
581      {
582	int sect_size = bfd_section_size (exec_bfd, interp_sect);
583
584	buf = xmalloc (sect_size);
585	bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
586      }
587   }
588
589  /* If we didn't find it, use the target auxillary vector.  */
590  if (!buf)
591    buf = read_program_header (PT_INTERP, NULL, NULL);
592
593  return buf;
594}
595
596
597/* Scan for DYNTAG in .dynamic section of ABFD.  If DYNTAG is found 1 is
598   returned and the corresponding PTR is set.  */
599
600static int
601scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
602{
603  int arch_size, step, sect_size;
604  long dyn_tag;
605  CORE_ADDR dyn_ptr, dyn_addr;
606  gdb_byte *bufend, *bufstart, *buf;
607  Elf32_External_Dyn *x_dynp_32;
608  Elf64_External_Dyn *x_dynp_64;
609  struct bfd_section *sect;
610  struct target_section *target_section;
611
612  if (abfd == NULL)
613    return 0;
614
615  if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
616    return 0;
617
618  arch_size = bfd_get_arch_size (abfd);
619  if (arch_size == -1)
620    return 0;
621
622  /* Find the start address of the .dynamic section.  */
623  sect = bfd_get_section_by_name (abfd, ".dynamic");
624  if (sect == NULL)
625    return 0;
626
627  for (target_section = current_target_sections->sections;
628       target_section < current_target_sections->sections_end;
629       target_section++)
630    if (sect == target_section->the_bfd_section)
631      break;
632  if (target_section < current_target_sections->sections_end)
633    dyn_addr = target_section->addr;
634  else
635    {
636      /* ABFD may come from OBJFILE acting only as a symbol file without being
637	 loaded into the target (see add_symbol_file_command).  This case is
638	 such fallback to the file VMA address without the possibility of
639	 having the section relocated to its actual in-memory address.  */
640
641      dyn_addr = bfd_section_vma (abfd, sect);
642    }
643
644  /* Read in .dynamic from the BFD.  We will get the actual value
645     from memory later.  */
646  sect_size = bfd_section_size (abfd, sect);
647  buf = bufstart = alloca (sect_size);
648  if (!bfd_get_section_contents (abfd, sect,
649				 buf, 0, sect_size))
650    return 0;
651
652  /* Iterate over BUF and scan for DYNTAG.  If found, set PTR and return.  */
653  step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
654			   : sizeof (Elf64_External_Dyn);
655  for (bufend = buf + sect_size;
656       buf < bufend;
657       buf += step)
658  {
659    if (arch_size == 32)
660      {
661	x_dynp_32 = (Elf32_External_Dyn *) buf;
662	dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
663	dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
664      }
665    else
666      {
667	x_dynp_64 = (Elf64_External_Dyn *) buf;
668	dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
669	dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
670      }
671     if (dyn_tag == DT_NULL)
672       return 0;
673     if (dyn_tag == dyntag)
674       {
675	 /* If requested, try to read the runtime value of this .dynamic
676	    entry.  */
677	 if (ptr)
678	   {
679	     struct type *ptr_type;
680	     gdb_byte ptr_buf[8];
681	     CORE_ADDR ptr_addr;
682
683	     ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
684	     ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
685	     if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
686	       dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
687	     *ptr = dyn_ptr;
688	   }
689	 return 1;
690       }
691  }
692
693  return 0;
694}
695
696/* Scan for DYNTAG in .dynamic section of the target's main executable,
697   found by consulting the OS auxillary vector.  If DYNTAG is found 1 is
698   returned and the corresponding PTR is set.  */
699
700static int
701scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
702{
703  enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
704  int sect_size, arch_size, step;
705  long dyn_tag;
706  CORE_ADDR dyn_ptr;
707  gdb_byte *bufend, *bufstart, *buf;
708
709  /* Read in .dynamic section.  */
710  buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
711  if (!buf)
712    return 0;
713
714  /* Iterate over BUF and scan for DYNTAG.  If found, set PTR and return.  */
715  step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
716			   : sizeof (Elf64_External_Dyn);
717  for (bufend = buf + sect_size;
718       buf < bufend;
719       buf += step)
720  {
721    if (arch_size == 32)
722      {
723	Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
724
725	dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
726					    4, byte_order);
727	dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
728					    4, byte_order);
729      }
730    else
731      {
732	Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
733
734	dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
735					    8, byte_order);
736	dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
737					    8, byte_order);
738      }
739    if (dyn_tag == DT_NULL)
740      break;
741
742    if (dyn_tag == dyntag)
743      {
744	if (ptr)
745	  *ptr = dyn_ptr;
746
747	xfree (bufstart);
748	return 1;
749      }
750  }
751
752  xfree (bufstart);
753  return 0;
754}
755
756
757/*
758
759   LOCAL FUNCTION
760
761   elf_locate_base -- locate the base address of dynamic linker structs
762   for SVR4 elf targets.
763
764   SYNOPSIS
765
766   CORE_ADDR elf_locate_base (void)
767
768   DESCRIPTION
769
770   For SVR4 elf targets the address of the dynamic linker's runtime
771   structure is contained within the dynamic info section in the
772   executable file.  The dynamic section is also mapped into the
773   inferior address space.  Because the runtime loader fills in the
774   real address before starting the inferior, we have to read in the
775   dynamic info section from the inferior address space.
776   If there are any errors while trying to find the address, we
777   silently return 0, otherwise the found address is returned.
778
779 */
780
781static CORE_ADDR
782elf_locate_base (void)
783{
784  struct minimal_symbol *msymbol;
785  CORE_ADDR dyn_ptr;
786
787  /* Look for DT_MIPS_RLD_MAP first.  MIPS executables use this
788     instead of DT_DEBUG, although they sometimes contain an unused
789     DT_DEBUG.  */
790  if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
791      || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
792    {
793      struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
794      gdb_byte *pbuf;
795      int pbuf_size = TYPE_LENGTH (ptr_type);
796
797      pbuf = alloca (pbuf_size);
798      /* DT_MIPS_RLD_MAP contains a pointer to the address
799	 of the dynamic link structure.  */
800      if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
801	return 0;
802      return extract_typed_address (pbuf, ptr_type);
803    }
804
805  /* Find DT_DEBUG.  */
806  if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
807      || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
808    return dyn_ptr;
809
810  /* This may be a static executable.  Look for the symbol
811     conventionally named _r_debug, as a last resort.  */
812  msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
813  if (msymbol != NULL)
814    return SYMBOL_VALUE_ADDRESS (msymbol);
815
816  /* DT_DEBUG entry not found.  */
817  return 0;
818}
819
820/*
821
822   LOCAL FUNCTION
823
824   locate_base -- locate the base address of dynamic linker structs
825
826   SYNOPSIS
827
828   CORE_ADDR locate_base (struct svr4_info *)
829
830   DESCRIPTION
831
832   For both the SunOS and SVR4 shared library implementations, if the
833   inferior executable has been linked dynamically, there is a single
834   address somewhere in the inferior's data space which is the key to
835   locating all of the dynamic linker's runtime structures.  This
836   address is the value of the debug base symbol.  The job of this
837   function is to find and return that address, or to return 0 if there
838   is no such address (the executable is statically linked for example).
839
840   For SunOS, the job is almost trivial, since the dynamic linker and
841   all of it's structures are statically linked to the executable at
842   link time.  Thus the symbol for the address we are looking for has
843   already been added to the minimal symbol table for the executable's
844   objfile at the time the symbol file's symbols were read, and all we
845   have to do is look it up there.  Note that we explicitly do NOT want
846   to find the copies in the shared library.
847
848   The SVR4 version is a bit more complicated because the address
849   is contained somewhere in the dynamic info section.  We have to go
850   to a lot more work to discover the address of the debug base symbol.
851   Because of this complexity, we cache the value we find and return that
852   value on subsequent invocations.  Note there is no copy in the
853   executable symbol tables.
854
855 */
856
857static CORE_ADDR
858locate_base (struct svr4_info *info)
859{
860  /* Check to see if we have a currently valid address, and if so, avoid
861     doing all this work again and just return the cached address.  If
862     we have no cached address, try to locate it in the dynamic info
863     section for ELF executables.  There's no point in doing any of this
864     though if we don't have some link map offsets to work with.  */
865
866  if (info->debug_base == 0 && svr4_have_link_map_offsets ())
867    info->debug_base = elf_locate_base ();
868  return info->debug_base;
869}
870
871/* Find the first element in the inferior's dynamic link map, and
872   return its address in the inferior.  Return zero if the address
873   could not be determined.
874
875   FIXME: Perhaps we should validate the info somehow, perhaps by
876   checking r_version for a known version number, or r_state for
877   RT_CONSISTENT.  */
878
879static CORE_ADDR
880solib_svr4_r_map (struct svr4_info *info)
881{
882  struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
883  struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
884  CORE_ADDR addr = 0;
885  volatile struct gdb_exception ex;
886
887  TRY_CATCH (ex, RETURN_MASK_ERROR)
888    {
889      addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
890                                        ptr_type);
891    }
892  exception_print (gdb_stderr, ex);
893  return addr;
894}
895
896/* Find r_brk from the inferior's debug base.  */
897
898static CORE_ADDR
899solib_svr4_r_brk (struct svr4_info *info)
900{
901  struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
902  struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
903
904  return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
905				    ptr_type);
906}
907
908/* Find the link map for the dynamic linker (if it is not in the
909   normal list of loaded shared objects).  */
910
911static CORE_ADDR
912solib_svr4_r_ldsomap (struct svr4_info *info)
913{
914  struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
915  struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
916  enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
917  ULONGEST version;
918
919  /* Check version, and return zero if `struct r_debug' doesn't have
920     the r_ldsomap member.  */
921  version
922    = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
923				    lmo->r_version_size, byte_order);
924  if (version < 2 || lmo->r_ldsomap_offset == -1)
925    return 0;
926
927  return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
928				    ptr_type);
929}
930
931/* On Solaris systems with some versions of the dynamic linker,
932   ld.so's l_name pointer points to the SONAME in the string table
933   rather than into writable memory.  So that GDB can find shared
934   libraries when loading a core file generated by gcore, ensure that
935   memory areas containing the l_name string are saved in the core
936   file.  */
937
938static int
939svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
940{
941  struct svr4_info *info;
942  CORE_ADDR ldsomap;
943  struct so_list *new;
944  struct cleanup *old_chain;
945  struct link_map_offsets *lmo;
946  CORE_ADDR lm_name;
947
948  info = get_svr4_info ();
949
950  info->debug_base = 0;
951  locate_base (info);
952  if (!info->debug_base)
953    return 0;
954
955  ldsomap = solib_svr4_r_ldsomap (info);
956  if (!ldsomap)
957    return 0;
958
959  lmo = svr4_fetch_link_map_offsets ();
960  new = XZALLOC (struct so_list);
961  old_chain = make_cleanup (xfree, new);
962  new->lm_info = xmalloc (sizeof (struct lm_info));
963  make_cleanup (xfree, new->lm_info);
964  new->lm_info->l_addr = (CORE_ADDR)-1;
965  new->lm_info->lm_addr = ldsomap;
966  new->lm_info->lm = xzalloc (lmo->link_map_size);
967  make_cleanup (xfree, new->lm_info->lm);
968  read_memory (ldsomap, new->lm_info->lm, lmo->link_map_size);
969  lm_name = LM_NAME (new);
970  do_cleanups (old_chain);
971
972  return (lm_name >= vaddr && lm_name < vaddr + size);
973}
974
975/*
976
977  LOCAL FUNCTION
978
979  open_symbol_file_object
980
981  SYNOPSIS
982
983  void open_symbol_file_object (void *from_tty)
984
985  DESCRIPTION
986
987  If no open symbol file, attempt to locate and open the main symbol
988  file.  On SVR4 systems, this is the first link map entry.  If its
989  name is here, we can open it.  Useful when attaching to a process
990  without first loading its symbol file.
991
992  If FROM_TTYP dereferences to a non-zero integer, allow messages to
993  be printed.  This parameter is a pointer rather than an int because
994  open_symbol_file_object() is called via catch_errors() and
995  catch_errors() requires a pointer argument.  */
996
997static int
998open_symbol_file_object (void *from_ttyp)
999{
1000  CORE_ADDR lm, l_name;
1001  char *filename;
1002  int errcode;
1003  int from_tty = *(int *)from_ttyp;
1004  struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1005  struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
1006  int l_name_size = TYPE_LENGTH (ptr_type);
1007  gdb_byte *l_name_buf = xmalloc (l_name_size);
1008  struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
1009  struct svr4_info *info = get_svr4_info ();
1010
1011  if (symfile_objfile)
1012    if (!query (_("Attempt to reload symbols from process? ")))
1013      return 0;
1014
1015  /* Always locate the debug struct, in case it has moved.  */
1016  info->debug_base = 0;
1017  if (locate_base (info) == 0)
1018    return 0;	/* failed somehow...  */
1019
1020  /* First link map member should be the executable.  */
1021  lm = solib_svr4_r_map (info);
1022  if (lm == 0)
1023    return 0;	/* failed somehow...  */
1024
1025  /* Read address of name from target memory to GDB.  */
1026  read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
1027
1028  /* Convert the address to host format.  */
1029  l_name = extract_typed_address (l_name_buf, ptr_type);
1030
1031  /* Free l_name_buf.  */
1032  do_cleanups (cleanups);
1033
1034  if (l_name == 0)
1035    return 0;		/* No filename.  */
1036
1037  /* Now fetch the filename from target memory.  */
1038  target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1039  make_cleanup (xfree, filename);
1040
1041  if (errcode)
1042    {
1043      warning (_("failed to read exec filename from attached file: %s"),
1044	       safe_strerror (errcode));
1045      return 0;
1046    }
1047
1048  /* Have a pathname: read the symbol file.  */
1049  symbol_file_add_main (filename, from_tty);
1050
1051  return 1;
1052}
1053
1054/* If no shared library information is available from the dynamic
1055   linker, build a fallback list from other sources.  */
1056
1057static struct so_list *
1058svr4_default_sos (void)
1059{
1060  struct svr4_info *info = get_svr4_info ();
1061
1062  struct so_list *head = NULL;
1063  struct so_list **link_ptr = &head;
1064
1065  if (info->debug_loader_offset_p)
1066    {
1067      struct so_list *new = XZALLOC (struct so_list);
1068
1069      new->lm_info = xmalloc (sizeof (struct lm_info));
1070
1071      /* Nothing will ever check the cached copy of the link
1072	 map if we set l_addr.  */
1073      new->lm_info->l_addr = info->debug_loader_offset;
1074      new->lm_info->lm_addr = 0;
1075      new->lm_info->lm = NULL;
1076
1077      strncpy (new->so_name, info->debug_loader_name,
1078	       SO_NAME_MAX_PATH_SIZE - 1);
1079      new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1080      strcpy (new->so_original_name, new->so_name);
1081
1082      *link_ptr = new;
1083      link_ptr = &new->next;
1084    }
1085
1086  return head;
1087}
1088
1089/* LOCAL FUNCTION
1090
1091   current_sos -- build a list of currently loaded shared objects
1092
1093   SYNOPSIS
1094
1095   struct so_list *current_sos ()
1096
1097   DESCRIPTION
1098
1099   Build a list of `struct so_list' objects describing the shared
1100   objects currently loaded in the inferior.  This list does not
1101   include an entry for the main executable file.
1102
1103   Note that we only gather information directly available from the
1104   inferior --- we don't examine any of the shared library files
1105   themselves.  The declaration of `struct so_list' says which fields
1106   we provide values for.  */
1107
1108static struct so_list *
1109svr4_current_sos (void)
1110{
1111  CORE_ADDR lm, prev_lm;
1112  struct so_list *head = 0;
1113  struct so_list **link_ptr = &head;
1114  CORE_ADDR ldsomap = 0;
1115  struct svr4_info *info;
1116
1117  info = get_svr4_info ();
1118
1119  /* Always locate the debug struct, in case it has moved.  */
1120  info->debug_base = 0;
1121  locate_base (info);
1122
1123  /* If we can't find the dynamic linker's base structure, this
1124     must not be a dynamically linked executable.  Hmm.  */
1125  if (! info->debug_base)
1126    return svr4_default_sos ();
1127
1128  /* Walk the inferior's link map list, and build our list of
1129     `struct so_list' nodes.  */
1130  prev_lm = 0;
1131  lm = solib_svr4_r_map (info);
1132
1133  while (lm)
1134    {
1135      struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1136      struct so_list *new = XZALLOC (struct so_list);
1137      struct cleanup *old_chain = make_cleanup (xfree, new);
1138      CORE_ADDR next_lm;
1139
1140      new->lm_info = xmalloc (sizeof (struct lm_info));
1141      make_cleanup (xfree, new->lm_info);
1142
1143      new->lm_info->l_addr = (CORE_ADDR)-1;
1144      new->lm_info->lm_addr = lm;
1145      new->lm_info->lm = xzalloc (lmo->link_map_size);
1146      make_cleanup (xfree, new->lm_info->lm);
1147
1148      read_memory (lm, new->lm_info->lm, lmo->link_map_size);
1149
1150      next_lm = LM_NEXT (new);
1151
1152      if (LM_PREV (new) != prev_lm)
1153	{
1154	  warning (_("Corrupted shared library list"));
1155	  free_so (new);
1156	  next_lm = 0;
1157	}
1158
1159      /* For SVR4 versions, the first entry in the link map is for the
1160         inferior executable, so we must ignore it.  For some versions of
1161         SVR4, it has no name.  For others (Solaris 2.3 for example), it
1162         does have a name, so we can no longer use a missing name to
1163         decide when to ignore it.  */
1164      else if (IGNORE_FIRST_LINK_MAP_ENTRY (new) && ldsomap == 0)
1165	{
1166	  info->main_lm_addr = new->lm_info->lm_addr;
1167	  free_so (new);
1168	}
1169      else
1170	{
1171	  int errcode;
1172	  char *buffer;
1173
1174	  /* Extract this shared object's name.  */
1175	  target_read_string (LM_NAME (new), &buffer,
1176			      SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1177	  if (errcode != 0)
1178	    warning (_("Can't read pathname for load map: %s."),
1179		     safe_strerror (errcode));
1180	  else
1181	    {
1182	      strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1183	      new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1184	      strcpy (new->so_original_name, new->so_name);
1185	    }
1186	  xfree (buffer);
1187
1188	  /* If this entry has no name, or its name matches the name
1189	     for the main executable, don't include it in the list.  */
1190	  if (! new->so_name[0]
1191	      || match_main (new->so_name))
1192	    free_so (new);
1193	  else
1194	    {
1195	      new->next = 0;
1196	      *link_ptr = new;
1197	      link_ptr = &new->next;
1198	    }
1199	}
1200
1201      prev_lm = lm;
1202      lm = next_lm;
1203
1204      /* On Solaris, the dynamic linker is not in the normal list of
1205	 shared objects, so make sure we pick it up too.  Having
1206	 symbol information for the dynamic linker is quite crucial
1207	 for skipping dynamic linker resolver code.  */
1208      if (lm == 0 && ldsomap == 0)
1209	{
1210	  lm = ldsomap = solib_svr4_r_ldsomap (info);
1211	  prev_lm = 0;
1212	}
1213
1214      discard_cleanups (old_chain);
1215    }
1216
1217  if (head == NULL)
1218    return svr4_default_sos ();
1219
1220  return head;
1221}
1222
1223/* Get the address of the link_map for a given OBJFILE.  */
1224
1225CORE_ADDR
1226svr4_fetch_objfile_link_map (struct objfile *objfile)
1227{
1228  struct so_list *so;
1229  struct svr4_info *info = get_svr4_info ();
1230
1231  /* Cause svr4_current_sos() to be run if it hasn't been already.  */
1232  if (info->main_lm_addr == 0)
1233    solib_add (NULL, 0, &current_target, auto_solib_add);
1234
1235  /* svr4_current_sos() will set main_lm_addr for the main executable.  */
1236  if (objfile == symfile_objfile)
1237    return info->main_lm_addr;
1238
1239  /* The other link map addresses may be found by examining the list
1240     of shared libraries.  */
1241  for (so = master_so_list (); so; so = so->next)
1242    if (so->objfile == objfile)
1243      return so->lm_info->lm_addr;
1244
1245  /* Not found!  */
1246  return 0;
1247}
1248
1249/* On some systems, the only way to recognize the link map entry for
1250   the main executable file is by looking at its name.  Return
1251   non-zero iff SONAME matches one of the known main executable names.  */
1252
1253static int
1254match_main (const char *soname)
1255{
1256  const char * const *mainp;
1257
1258  for (mainp = main_name_list; *mainp != NULL; mainp++)
1259    {
1260      if (strcmp (soname, *mainp) == 0)
1261	return (1);
1262    }
1263
1264  return (0);
1265}
1266
1267/* Return 1 if PC lies in the dynamic symbol resolution code of the
1268   SVR4 run time loader.  */
1269
1270int
1271svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1272{
1273  struct svr4_info *info = get_svr4_info ();
1274
1275  return ((pc >= info->interp_text_sect_low
1276	   && pc < info->interp_text_sect_high)
1277	  || (pc >= info->interp_plt_sect_low
1278	      && pc < info->interp_plt_sect_high)
1279	  || in_plt_section (pc, NULL)
1280	  || in_gnu_ifunc_stub (pc));
1281}
1282
1283/* Given an executable's ABFD and target, compute the entry-point
1284   address.  */
1285
1286static CORE_ADDR
1287exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1288{
1289  /* KevinB wrote ... for most targets, the address returned by
1290     bfd_get_start_address() is the entry point for the start
1291     function.  But, for some targets, bfd_get_start_address() returns
1292     the address of a function descriptor from which the entry point
1293     address may be extracted.  This address is extracted by
1294     gdbarch_convert_from_func_ptr_addr().  The method
1295     gdbarch_convert_from_func_ptr_addr() is the merely the identify
1296     function for targets which don't use function descriptors.  */
1297  return gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1298					     bfd_get_start_address (abfd),
1299					     targ);
1300}
1301
1302/*
1303
1304   LOCAL FUNCTION
1305
1306   enable_break -- arrange for dynamic linker to hit breakpoint
1307
1308   SYNOPSIS
1309
1310   int enable_break (void)
1311
1312   DESCRIPTION
1313
1314   Both the SunOS and the SVR4 dynamic linkers have, as part of their
1315   debugger interface, support for arranging for the inferior to hit
1316   a breakpoint after mapping in the shared libraries.  This function
1317   enables that breakpoint.
1318
1319   For SunOS, there is a special flag location (in_debugger) which we
1320   set to 1.  When the dynamic linker sees this flag set, it will set
1321   a breakpoint at a location known only to itself, after saving the
1322   original contents of that place and the breakpoint address itself,
1323   in it's own internal structures.  When we resume the inferior, it
1324   will eventually take a SIGTRAP when it runs into the breakpoint.
1325   We handle this (in a different place) by restoring the contents of
1326   the breakpointed location (which is only known after it stops),
1327   chasing around to locate the shared libraries that have been
1328   loaded, then resuming.
1329
1330   For SVR4, the debugger interface structure contains a member (r_brk)
1331   which is statically initialized at the time the shared library is
1332   built, to the offset of a function (_r_debug_state) which is guaran-
1333   teed to be called once before mapping in a library, and again when
1334   the mapping is complete.  At the time we are examining this member,
1335   it contains only the unrelocated offset of the function, so we have
1336   to do our own relocation.  Later, when the dynamic linker actually
1337   runs, it relocates r_brk to be the actual address of _r_debug_state().
1338
1339   The debugger interface structure also contains an enumeration which
1340   is set to either RT_ADD or RT_DELETE prior to changing the mapping,
1341   depending upon whether or not the library is being mapped or unmapped,
1342   and then set to RT_CONSISTENT after the library is mapped/unmapped.
1343 */
1344
1345static int
1346enable_break (struct svr4_info *info, int from_tty)
1347{
1348  struct minimal_symbol *msymbol;
1349  const char * const *bkpt_namep;
1350  asection *interp_sect;
1351  gdb_byte *interp_name;
1352  CORE_ADDR sym_addr;
1353
1354  info->interp_text_sect_low = info->interp_text_sect_high = 0;
1355  info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
1356
1357  /* If we already have a shared library list in the target, and
1358     r_debug contains r_brk, set the breakpoint there - this should
1359     mean r_brk has already been relocated.  Assume the dynamic linker
1360     is the object containing r_brk.  */
1361
1362  solib_add (NULL, from_tty, &current_target, auto_solib_add);
1363  sym_addr = 0;
1364  if (info->debug_base && solib_svr4_r_map (info) != 0)
1365    sym_addr = solib_svr4_r_brk (info);
1366
1367  if (sym_addr != 0)
1368    {
1369      struct obj_section *os;
1370
1371      sym_addr = gdbarch_addr_bits_remove
1372	(target_gdbarch, gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1373							     sym_addr,
1374							     &current_target));
1375
1376      /* On at least some versions of Solaris there's a dynamic relocation
1377	 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
1378	 we get control before the dynamic linker has self-relocated.
1379	 Check if SYM_ADDR is in a known section, if it is assume we can
1380	 trust its value.  This is just a heuristic though, it could go away
1381	 or be replaced if it's getting in the way.
1382
1383	 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
1384	 however it's spelled in your particular system) is ARM or Thumb.
1385	 That knowledge is encoded in the address, if it's Thumb the low bit
1386	 is 1.  However, we've stripped that info above and it's not clear
1387	 what all the consequences are of passing a non-addr_bits_remove'd
1388	 address to create_solib_event_breakpoint.  The call to
1389	 find_pc_section verifies we know about the address and have some
1390	 hope of computing the right kind of breakpoint to use (via
1391	 symbol info).  It does mean that GDB needs to be pointed at a
1392	 non-stripped version of the dynamic linker in order to obtain
1393	 information it already knows about.  Sigh.  */
1394
1395      os = find_pc_section (sym_addr);
1396      if (os != NULL)
1397	{
1398	  /* Record the relocated start and end address of the dynamic linker
1399	     text and plt section for svr4_in_dynsym_resolve_code.  */
1400	  bfd *tmp_bfd;
1401	  CORE_ADDR load_addr;
1402
1403	  tmp_bfd = os->objfile->obfd;
1404	  load_addr = ANOFFSET (os->objfile->section_offsets,
1405				os->objfile->sect_index_text);
1406
1407	  interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1408	  if (interp_sect)
1409	    {
1410	      info->interp_text_sect_low =
1411		bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1412	      info->interp_text_sect_high =
1413		info->interp_text_sect_low
1414		+ bfd_section_size (tmp_bfd, interp_sect);
1415	    }
1416	  interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1417	  if (interp_sect)
1418	    {
1419	      info->interp_plt_sect_low =
1420		bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1421	      info->interp_plt_sect_high =
1422		info->interp_plt_sect_low
1423		+ bfd_section_size (tmp_bfd, interp_sect);
1424	    }
1425
1426	  create_solib_event_breakpoint (target_gdbarch, sym_addr);
1427	  return 1;
1428	}
1429    }
1430
1431  /* Find the program interpreter; if not found, warn the user and drop
1432     into the old breakpoint at symbol code.  */
1433  interp_name = find_program_interpreter ();
1434  if (interp_name)
1435    {
1436      CORE_ADDR load_addr = 0;
1437      int load_addr_found = 0;
1438      int loader_found_in_list = 0;
1439      struct so_list *so;
1440      bfd *tmp_bfd = NULL;
1441      struct target_ops *tmp_bfd_target;
1442      volatile struct gdb_exception ex;
1443
1444      sym_addr = 0;
1445
1446      /* Now we need to figure out where the dynamic linker was
1447         loaded so that we can load its symbols and place a breakpoint
1448         in the dynamic linker itself.
1449
1450         This address is stored on the stack.  However, I've been unable
1451         to find any magic formula to find it for Solaris (appears to
1452         be trivial on GNU/Linux).  Therefore, we have to try an alternate
1453         mechanism to find the dynamic linker's base address.  */
1454
1455      TRY_CATCH (ex, RETURN_MASK_ALL)
1456        {
1457	  tmp_bfd = solib_bfd_open (interp_name);
1458	}
1459      if (tmp_bfd == NULL)
1460	goto bkpt_at_symbol;
1461
1462      /* Now convert the TMP_BFD into a target.  That way target, as
1463         well as BFD operations can be used.  Note that closing the
1464         target will also close the underlying bfd.  */
1465      tmp_bfd_target = target_bfd_reopen (tmp_bfd);
1466
1467      /* On a running target, we can get the dynamic linker's base
1468         address from the shared library table.  */
1469      so = master_so_list ();
1470      while (so)
1471	{
1472	  if (svr4_same_1 (interp_name, so->so_original_name))
1473	    {
1474	      load_addr_found = 1;
1475	      loader_found_in_list = 1;
1476	      load_addr = LM_ADDR_CHECK (so, tmp_bfd);
1477	      break;
1478	    }
1479	  so = so->next;
1480	}
1481
1482      /* If we were not able to find the base address of the loader
1483         from our so_list, then try using the AT_BASE auxilliary entry.  */
1484      if (!load_addr_found)
1485        if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
1486	  {
1487	    int addr_bit = gdbarch_addr_bit (target_gdbarch);
1488
1489	    /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
1490	       that `+ load_addr' will overflow CORE_ADDR width not creating
1491	       invalid addresses like 0x101234567 for 32bit inferiors on 64bit
1492	       GDB.  */
1493
1494	    if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
1495	      {
1496		CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
1497		CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
1498							      tmp_bfd_target);
1499
1500		gdb_assert (load_addr < space_size);
1501
1502		/* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
1503		   64bit ld.so with 32bit executable, it should not happen.  */
1504
1505		if (tmp_entry_point < space_size
1506		    && tmp_entry_point + load_addr >= space_size)
1507		  load_addr -= space_size;
1508	      }
1509
1510	    load_addr_found = 1;
1511	  }
1512
1513      /* Otherwise we find the dynamic linker's base address by examining
1514	 the current pc (which should point at the entry point for the
1515	 dynamic linker) and subtracting the offset of the entry point.
1516
1517         This is more fragile than the previous approaches, but is a good
1518         fallback method because it has actually been working well in
1519         most cases.  */
1520      if (!load_addr_found)
1521	{
1522	  struct regcache *regcache
1523	    = get_thread_arch_regcache (inferior_ptid, target_gdbarch);
1524
1525	  load_addr = (regcache_read_pc (regcache)
1526		       - exec_entry_point (tmp_bfd, tmp_bfd_target));
1527	}
1528
1529      if (!loader_found_in_list)
1530	{
1531	  info->debug_loader_name = xstrdup (interp_name);
1532	  info->debug_loader_offset_p = 1;
1533	  info->debug_loader_offset = load_addr;
1534	  solib_add (NULL, from_tty, &current_target, auto_solib_add);
1535	}
1536
1537      /* Record the relocated start and end address of the dynamic linker
1538         text and plt section for svr4_in_dynsym_resolve_code.  */
1539      interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1540      if (interp_sect)
1541	{
1542	  info->interp_text_sect_low =
1543	    bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1544	  info->interp_text_sect_high =
1545	    info->interp_text_sect_low
1546	    + bfd_section_size (tmp_bfd, interp_sect);
1547	}
1548      interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1549      if (interp_sect)
1550	{
1551	  info->interp_plt_sect_low =
1552	    bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1553	  info->interp_plt_sect_high =
1554	    info->interp_plt_sect_low
1555	    + bfd_section_size (tmp_bfd, interp_sect);
1556	}
1557
1558      /* Now try to set a breakpoint in the dynamic linker.  */
1559      for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1560	{
1561	  sym_addr = bfd_lookup_symbol (tmp_bfd, *bkpt_namep);
1562	  if (sym_addr != 0)
1563	    break;
1564	}
1565
1566      if (sym_addr != 0)
1567	/* Convert 'sym_addr' from a function pointer to an address.
1568	   Because we pass tmp_bfd_target instead of the current
1569	   target, this will always produce an unrelocated value.  */
1570	sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1571						       sym_addr,
1572						       tmp_bfd_target);
1573
1574      /* We're done with both the temporary bfd and target.  Remember,
1575         closing the target closes the underlying bfd.  */
1576      target_close (tmp_bfd_target, 0);
1577
1578      if (sym_addr != 0)
1579	{
1580	  create_solib_event_breakpoint (target_gdbarch, load_addr + sym_addr);
1581	  xfree (interp_name);
1582	  return 1;
1583	}
1584
1585      /* For whatever reason we couldn't set a breakpoint in the dynamic
1586         linker.  Warn and drop into the old code.  */
1587    bkpt_at_symbol:
1588      xfree (interp_name);
1589      warning (_("Unable to find dynamic linker breakpoint function.\n"
1590               "GDB will be unable to debug shared library initializers\n"
1591               "and track explicitly loaded dynamic code."));
1592    }
1593
1594  /* Scan through the lists of symbols, trying to look up the symbol and
1595     set a breakpoint there.  Terminate loop when we/if we succeed.  */
1596
1597  for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1598    {
1599      msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1600      if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1601	{
1602	  sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1603	  sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1604							 sym_addr,
1605							 &current_target);
1606	  create_solib_event_breakpoint (target_gdbarch, sym_addr);
1607	  return 1;
1608	}
1609    }
1610
1611  if (!current_inferior ()->attach_flag)
1612    {
1613      for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
1614	{
1615	  msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1616	  if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1617	    {
1618	      sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1619	      sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1620							     sym_addr,
1621							     &current_target);
1622	      create_solib_event_breakpoint (target_gdbarch, sym_addr);
1623	      return 1;
1624	    }
1625	}
1626    }
1627  return 0;
1628}
1629
1630/*
1631
1632   LOCAL FUNCTION
1633
1634   special_symbol_handling -- additional shared library symbol handling
1635
1636   SYNOPSIS
1637
1638   void special_symbol_handling ()
1639
1640   DESCRIPTION
1641
1642   Once the symbols from a shared object have been loaded in the usual
1643   way, we are called to do any system specific symbol handling that
1644   is needed.
1645
1646   For SunOS4, this consisted of grunging around in the dynamic
1647   linkers structures to find symbol definitions for "common" symbols
1648   and adding them to the minimal symbol table for the runtime common
1649   objfile.
1650
1651   However, for SVR4, there's nothing to do.
1652
1653 */
1654
1655static void
1656svr4_special_symbol_handling (void)
1657{
1658}
1659
1660/* Read the ELF program headers from ABFD.  Return the contents and
1661   set *PHDRS_SIZE to the size of the program headers.  */
1662
1663static gdb_byte *
1664read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
1665{
1666  Elf_Internal_Ehdr *ehdr;
1667  gdb_byte *buf;
1668
1669  ehdr = elf_elfheader (abfd);
1670
1671  *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
1672  if (*phdrs_size == 0)
1673    return NULL;
1674
1675  buf = xmalloc (*phdrs_size);
1676  if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
1677      || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
1678    {
1679      xfree (buf);
1680      return NULL;
1681    }
1682
1683  return buf;
1684}
1685
1686/* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
1687   exec_bfd.  Otherwise return 0.
1688
1689   We relocate all of the sections by the same amount.  This
1690   behavior is mandated by recent editions of the System V ABI.
1691   According to the System V Application Binary Interface,
1692   Edition 4.1, page 5-5:
1693
1694     ...  Though the system chooses virtual addresses for
1695     individual processes, it maintains the segments' relative
1696     positions.  Because position-independent code uses relative
1697     addressesing between segments, the difference between
1698     virtual addresses in memory must match the difference
1699     between virtual addresses in the file.  The difference
1700     between the virtual address of any segment in memory and
1701     the corresponding virtual address in the file is thus a
1702     single constant value for any one executable or shared
1703     object in a given process.  This difference is the base
1704     address.  One use of the base address is to relocate the
1705     memory image of the program during dynamic linking.
1706
1707   The same language also appears in Edition 4.0 of the System V
1708   ABI and is left unspecified in some of the earlier editions.
1709
1710   Decide if the objfile needs to be relocated.  As indicated above, we will
1711   only be here when execution is stopped.  But during attachment PC can be at
1712   arbitrary address therefore regcache_read_pc can be misleading (contrary to
1713   the auxv AT_ENTRY value).  Moreover for executable with interpreter section
1714   regcache_read_pc would point to the interpreter and not the main executable.
1715
1716   So, to summarize, relocations are necessary when the start address obtained
1717   from the executable is different from the address in auxv AT_ENTRY entry.
1718
1719   [ The astute reader will note that we also test to make sure that
1720     the executable in question has the DYNAMIC flag set.  It is my
1721     opinion that this test is unnecessary (undesirable even).  It
1722     was added to avoid inadvertent relocation of an executable
1723     whose e_type member in the ELF header is not ET_DYN.  There may
1724     be a time in the future when it is desirable to do relocations
1725     on other types of files as well in which case this condition
1726     should either be removed or modified to accomodate the new file
1727     type.  - Kevin, Nov 2000. ]  */
1728
1729static int
1730svr4_exec_displacement (CORE_ADDR *displacementp)
1731{
1732  /* ENTRY_POINT is a possible function descriptor - before
1733     a call to gdbarch_convert_from_func_ptr_addr.  */
1734  CORE_ADDR entry_point, displacement;
1735
1736  if (exec_bfd == NULL)
1737    return 0;
1738
1739  /* Therefore for ELF it is ET_EXEC and not ET_DYN.  Both shared libraries
1740     being executed themselves and PIE (Position Independent Executable)
1741     executables are ET_DYN.  */
1742
1743  if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
1744    return 0;
1745
1746  if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
1747    return 0;
1748
1749  displacement = entry_point - bfd_get_start_address (exec_bfd);
1750
1751  /* Verify the DISPLACEMENT candidate complies with the required page
1752     alignment.  It is cheaper than the program headers comparison below.  */
1753
1754  if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1755    {
1756      const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
1757
1758      /* p_align of PT_LOAD segments does not specify any alignment but
1759	 only congruency of addresses:
1760	   p_offset % p_align == p_vaddr % p_align
1761	 Kernel is free to load the executable with lower alignment.  */
1762
1763      if ((displacement & (elf->minpagesize - 1)) != 0)
1764	return 0;
1765    }
1766
1767  /* Verify that the auxilliary vector describes the same file as exec_bfd, by
1768     comparing their program headers.  If the program headers in the auxilliary
1769     vector do not match the program headers in the executable, then we are
1770     looking at a different file than the one used by the kernel - for
1771     instance, "gdb program" connected to "gdbserver :PORT ld.so program".  */
1772
1773  if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1774    {
1775      /* Be optimistic and clear OK only if GDB was able to verify the headers
1776	 really do not match.  */
1777      int phdrs_size, phdrs2_size, ok = 1;
1778      gdb_byte *buf, *buf2;
1779      int arch_size;
1780
1781      buf = read_program_header (-1, &phdrs_size, &arch_size);
1782      buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
1783      if (buf != NULL && buf2 != NULL)
1784	{
1785	  enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
1786
1787	  /* We are dealing with three different addresses.  EXEC_BFD
1788	     represents current address in on-disk file.  target memory content
1789	     may be different from EXEC_BFD as the file may have been prelinked
1790	     to a different address after the executable has been loaded.
1791	     Moreover the address of placement in target memory can be
1792	     different from what the program headers in target memory say -
1793	     this is the goal of PIE.
1794
1795	     Detected DISPLACEMENT covers both the offsets of PIE placement and
1796	     possible new prelink performed after start of the program.  Here
1797	     relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
1798	     content offset for the verification purpose.  */
1799
1800	  if (phdrs_size != phdrs2_size
1801	      || bfd_get_arch_size (exec_bfd) != arch_size)
1802	    ok = 0;
1803	  else if (arch_size == 32
1804		   && phdrs_size >= sizeof (Elf32_External_Phdr)
1805	           && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
1806	    {
1807	      Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1808	      Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1809	      CORE_ADDR displacement = 0;
1810	      int i;
1811
1812	      /* DISPLACEMENT could be found more easily by the difference of
1813		 ehdr2->e_entry.  But we haven't read the ehdr yet, and we
1814		 already have enough information to compute that displacement
1815		 with what we've read.  */
1816
1817	      for (i = 0; i < ehdr2->e_phnum; i++)
1818		if (phdr2[i].p_type == PT_LOAD)
1819		  {
1820		    Elf32_External_Phdr *phdrp;
1821		    gdb_byte *buf_vaddr_p, *buf_paddr_p;
1822		    CORE_ADDR vaddr, paddr;
1823		    CORE_ADDR displacement_vaddr = 0;
1824		    CORE_ADDR displacement_paddr = 0;
1825
1826		    phdrp = &((Elf32_External_Phdr *) buf)[i];
1827		    buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1828		    buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1829
1830		    vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1831						      byte_order);
1832		    displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1833
1834		    paddr = extract_unsigned_integer (buf_paddr_p, 4,
1835						      byte_order);
1836		    displacement_paddr = paddr - phdr2[i].p_paddr;
1837
1838		    if (displacement_vaddr == displacement_paddr)
1839		      displacement = displacement_vaddr;
1840
1841		    break;
1842		  }
1843
1844	      /* Now compare BUF and BUF2 with optional DISPLACEMENT.  */
1845
1846	      for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
1847		{
1848		  Elf32_External_Phdr *phdrp;
1849		  Elf32_External_Phdr *phdr2p;
1850		  gdb_byte *buf_vaddr_p, *buf_paddr_p;
1851		  CORE_ADDR vaddr, paddr;
1852		  asection *plt2_asect;
1853
1854		  phdrp = &((Elf32_External_Phdr *) buf)[i];
1855		  buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1856		  buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1857		  phdr2p = &((Elf32_External_Phdr *) buf2)[i];
1858
1859		  /* PT_GNU_STACK is an exception by being never relocated by
1860		     prelink as its addresses are always zero.  */
1861
1862		  if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1863		    continue;
1864
1865		  /* Check also other adjustment combinations - PR 11786.  */
1866
1867		  vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1868						    byte_order);
1869		  vaddr -= displacement;
1870		  store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
1871
1872		  paddr = extract_unsigned_integer (buf_paddr_p, 4,
1873						    byte_order);
1874		  paddr -= displacement;
1875		  store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
1876
1877		  if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1878		    continue;
1879
1880		  /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS.  */
1881		  plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
1882		  if (plt2_asect)
1883		    {
1884		      int content2;
1885		      gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
1886		      CORE_ADDR filesz;
1887
1888		      content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
1889				  & SEC_HAS_CONTENTS) != 0;
1890
1891		      filesz = extract_unsigned_integer (buf_filesz_p, 4,
1892							 byte_order);
1893
1894		      /* PLT2_ASECT is from on-disk file (exec_bfd) while
1895			 FILESZ is from the in-memory image.  */
1896		      if (content2)
1897			filesz += bfd_get_section_size (plt2_asect);
1898		      else
1899			filesz -= bfd_get_section_size (plt2_asect);
1900
1901		      store_unsigned_integer (buf_filesz_p, 4, byte_order,
1902					      filesz);
1903
1904		      if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1905			continue;
1906		    }
1907
1908		  ok = 0;
1909		  break;
1910		}
1911	    }
1912	  else if (arch_size == 64
1913		   && phdrs_size >= sizeof (Elf64_External_Phdr)
1914	           && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
1915	    {
1916	      Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1917	      Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1918	      CORE_ADDR displacement = 0;
1919	      int i;
1920
1921	      /* DISPLACEMENT could be found more easily by the difference of
1922		 ehdr2->e_entry.  But we haven't read the ehdr yet, and we
1923		 already have enough information to compute that displacement
1924		 with what we've read.  */
1925
1926	      for (i = 0; i < ehdr2->e_phnum; i++)
1927		if (phdr2[i].p_type == PT_LOAD)
1928		  {
1929		    Elf64_External_Phdr *phdrp;
1930		    gdb_byte *buf_vaddr_p, *buf_paddr_p;
1931		    CORE_ADDR vaddr, paddr;
1932		    CORE_ADDR displacement_vaddr = 0;
1933		    CORE_ADDR displacement_paddr = 0;
1934
1935		    phdrp = &((Elf64_External_Phdr *) buf)[i];
1936		    buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1937		    buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1938
1939		    vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
1940						      byte_order);
1941		    displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1942
1943		    paddr = extract_unsigned_integer (buf_paddr_p, 8,
1944						      byte_order);
1945		    displacement_paddr = paddr - phdr2[i].p_paddr;
1946
1947		    if (displacement_vaddr == displacement_paddr)
1948		      displacement = displacement_vaddr;
1949
1950		    break;
1951		  }
1952
1953	      /* Now compare BUF and BUF2 with optional DISPLACEMENT.  */
1954
1955	      for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
1956		{
1957		  Elf64_External_Phdr *phdrp;
1958		  Elf64_External_Phdr *phdr2p;
1959		  gdb_byte *buf_vaddr_p, *buf_paddr_p;
1960		  CORE_ADDR vaddr, paddr;
1961		  asection *plt2_asect;
1962
1963		  phdrp = &((Elf64_External_Phdr *) buf)[i];
1964		  buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1965		  buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1966		  phdr2p = &((Elf64_External_Phdr *) buf2)[i];
1967
1968		  /* PT_GNU_STACK is an exception by being never relocated by
1969		     prelink as its addresses are always zero.  */
1970
1971		  if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1972		    continue;
1973
1974		  /* Check also other adjustment combinations - PR 11786.  */
1975
1976		  vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
1977						    byte_order);
1978		  vaddr -= displacement;
1979		  store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
1980
1981		  paddr = extract_unsigned_integer (buf_paddr_p, 8,
1982						    byte_order);
1983		  paddr -= displacement;
1984		  store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
1985
1986		  if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1987		    continue;
1988
1989		  /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS.  */
1990		  plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
1991		  if (plt2_asect)
1992		    {
1993		      int content2;
1994		      gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
1995		      CORE_ADDR filesz;
1996
1997		      content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
1998				  & SEC_HAS_CONTENTS) != 0;
1999
2000		      filesz = extract_unsigned_integer (buf_filesz_p, 8,
2001							 byte_order);
2002
2003		      /* PLT2_ASECT is from on-disk file (exec_bfd) while
2004			 FILESZ is from the in-memory image.  */
2005		      if (content2)
2006			filesz += bfd_get_section_size (plt2_asect);
2007		      else
2008			filesz -= bfd_get_section_size (plt2_asect);
2009
2010		      store_unsigned_integer (buf_filesz_p, 8, byte_order,
2011					      filesz);
2012
2013		      if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2014			continue;
2015		    }
2016
2017		  ok = 0;
2018		  break;
2019		}
2020	    }
2021	  else
2022	    ok = 0;
2023	}
2024
2025      xfree (buf);
2026      xfree (buf2);
2027
2028      if (!ok)
2029	return 0;
2030    }
2031
2032  if (info_verbose)
2033    {
2034      /* It can be printed repeatedly as there is no easy way to check
2035	 the executable symbols/file has been already relocated to
2036	 displacement.  */
2037
2038      printf_unfiltered (_("Using PIE (Position Independent Executable) "
2039			   "displacement %s for \"%s\".\n"),
2040			 paddress (target_gdbarch, displacement),
2041			 bfd_get_filename (exec_bfd));
2042    }
2043
2044  *displacementp = displacement;
2045  return 1;
2046}
2047
2048/* Relocate the main executable.  This function should be called upon
2049   stopping the inferior process at the entry point to the program.
2050   The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2051   different, the main executable is relocated by the proper amount.  */
2052
2053static void
2054svr4_relocate_main_executable (void)
2055{
2056  CORE_ADDR displacement;
2057
2058  /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2059     probably contains the offsets computed using the PIE displacement
2060     from the previous run, which of course are irrelevant for this run.
2061     So we need to determine the new PIE displacement and recompute the
2062     section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2063     already contains pre-computed offsets.
2064
2065     If we cannot compute the PIE displacement, either:
2066
2067       - The executable is not PIE.
2068
2069       - SYMFILE_OBJFILE does not match the executable started in the target.
2070	 This can happen for main executable symbols loaded at the host while
2071	 `ld.so --ld-args main-executable' is loaded in the target.
2072
2073     Then we leave the section offsets untouched and use them as is for
2074     this run.  Either:
2075
2076       - These section offsets were properly reset earlier, and thus
2077	 already contain the correct values.  This can happen for instance
2078	 when reconnecting via the remote protocol to a target that supports
2079	 the `qOffsets' packet.
2080
2081       - The section offsets were not reset earlier, and the best we can
2082	 hope is that the old offsets are still applicable to the new run.  */
2083
2084  if (! svr4_exec_displacement (&displacement))
2085    return;
2086
2087  /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2088     addresses.  */
2089
2090  if (symfile_objfile)
2091    {
2092      struct section_offsets *new_offsets;
2093      int i;
2094
2095      new_offsets = alloca (symfile_objfile->num_sections
2096			    * sizeof (*new_offsets));
2097
2098      for (i = 0; i < symfile_objfile->num_sections; i++)
2099	new_offsets->offsets[i] = displacement;
2100
2101      objfile_relocate (symfile_objfile, new_offsets);
2102    }
2103  else if (exec_bfd)
2104    {
2105      asection *asect;
2106
2107      for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2108	exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2109				  (bfd_section_vma (exec_bfd, asect)
2110				   + displacement));
2111    }
2112}
2113
2114/*
2115
2116   GLOBAL FUNCTION
2117
2118   svr4_solib_create_inferior_hook -- shared library startup support
2119
2120   SYNOPSIS
2121
2122   void svr4_solib_create_inferior_hook (int from_tty)
2123
2124   DESCRIPTION
2125
2126   When gdb starts up the inferior, it nurses it along (through the
2127   shell) until it is ready to execute it's first instruction.  At this
2128   point, this function gets called via expansion of the macro
2129   SOLIB_CREATE_INFERIOR_HOOK.
2130
2131   For SunOS executables, this first instruction is typically the
2132   one at "_start", or a similar text label, regardless of whether
2133   the executable is statically or dynamically linked.  The runtime
2134   startup code takes care of dynamically linking in any shared
2135   libraries, once gdb allows the inferior to continue.
2136
2137   For SVR4 executables, this first instruction is either the first
2138   instruction in the dynamic linker (for dynamically linked
2139   executables) or the instruction at "start" for statically linked
2140   executables.  For dynamically linked executables, the system
2141   first exec's /lib/libc.so.N, which contains the dynamic linker,
2142   and starts it running.  The dynamic linker maps in any needed
2143   shared libraries, maps in the actual user executable, and then
2144   jumps to "start" in the user executable.
2145
2146   For both SunOS shared libraries, and SVR4 shared libraries, we
2147   can arrange to cooperate with the dynamic linker to discover the
2148   names of shared libraries that are dynamically linked, and the
2149   base addresses to which they are linked.
2150
2151   This function is responsible for discovering those names and
2152   addresses, and saving sufficient information about them to allow
2153   their symbols to be read at a later time.
2154
2155   FIXME
2156
2157   Between enable_break() and disable_break(), this code does not
2158   properly handle hitting breakpoints which the user might have
2159   set in the startup code or in the dynamic linker itself.  Proper
2160   handling will probably have to wait until the implementation is
2161   changed to use the "breakpoint handler function" method.
2162
2163   Also, what if child has exit()ed?  Must exit loop somehow.
2164 */
2165
2166static void
2167svr4_solib_create_inferior_hook (int from_tty)
2168{
2169#if defined(_SCO_DS)
2170  struct inferior *inf;
2171  struct thread_info *tp;
2172#endif /* defined(_SCO_DS) */
2173  struct svr4_info *info;
2174
2175  info = get_svr4_info ();
2176
2177  /* Relocate the main executable if necessary.  */
2178  svr4_relocate_main_executable ();
2179
2180  /* No point setting a breakpoint in the dynamic linker if we can't
2181     hit it (e.g., a core file, or a trace file).  */
2182  if (!target_has_execution)
2183    return;
2184
2185  if (!svr4_have_link_map_offsets ())
2186    return;
2187
2188  if (!enable_break (info, from_tty))
2189    return;
2190
2191#if defined(_SCO_DS)
2192  /* SCO needs the loop below, other systems should be using the
2193     special shared library breakpoints and the shared library breakpoint
2194     service routine.
2195
2196     Now run the target.  It will eventually hit the breakpoint, at
2197     which point all of the libraries will have been mapped in and we
2198     can go groveling around in the dynamic linker structures to find
2199     out what we need to know about them.  */
2200
2201  inf = current_inferior ();
2202  tp = inferior_thread ();
2203
2204  clear_proceed_status ();
2205  inf->control.stop_soon = STOP_QUIETLY;
2206  tp->suspend.stop_signal = TARGET_SIGNAL_0;
2207  do
2208    {
2209      target_resume (pid_to_ptid (-1), 0, tp->suspend.stop_signal);
2210      wait_for_inferior (0);
2211    }
2212  while (tp->suspend.stop_signal != TARGET_SIGNAL_TRAP);
2213  inf->control.stop_soon = NO_STOP_QUIETLY;
2214#endif /* defined(_SCO_DS) */
2215}
2216
2217static void
2218svr4_clear_solib (void)
2219{
2220  struct svr4_info *info;
2221
2222  info = get_svr4_info ();
2223  info->debug_base = 0;
2224  info->debug_loader_offset_p = 0;
2225  info->debug_loader_offset = 0;
2226  xfree (info->debug_loader_name);
2227  info->debug_loader_name = NULL;
2228}
2229
2230static void
2231svr4_free_so (struct so_list *so)
2232{
2233  xfree (so->lm_info->lm);
2234  xfree (so->lm_info);
2235}
2236
2237
2238/* Clear any bits of ADDR that wouldn't fit in a target-format
2239   data pointer.  "Data pointer" here refers to whatever sort of
2240   address the dynamic linker uses to manage its sections.  At the
2241   moment, we don't support shared libraries on any processors where
2242   code and data pointers are different sizes.
2243
2244   This isn't really the right solution.  What we really need here is
2245   a way to do arithmetic on CORE_ADDR values that respects the
2246   natural pointer/address correspondence.  (For example, on the MIPS,
2247   converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
2248   sign-extend the value.  There, simply truncating the bits above
2249   gdbarch_ptr_bit, as we do below, is no good.)  This should probably
2250   be a new gdbarch method or something.  */
2251static CORE_ADDR
2252svr4_truncate_ptr (CORE_ADDR addr)
2253{
2254  if (gdbarch_ptr_bit (target_gdbarch) == sizeof (CORE_ADDR) * 8)
2255    /* We don't need to truncate anything, and the bit twiddling below
2256       will fail due to overflow problems.  */
2257    return addr;
2258  else
2259    return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch)) - 1);
2260}
2261
2262
2263static void
2264svr4_relocate_section_addresses (struct so_list *so,
2265                                 struct target_section *sec)
2266{
2267  sec->addr    = svr4_truncate_ptr (sec->addr    + LM_ADDR_CHECK (so,
2268								  sec->bfd));
2269  sec->endaddr = svr4_truncate_ptr (sec->endaddr + LM_ADDR_CHECK (so,
2270								  sec->bfd));
2271}
2272
2273
2274/* Architecture-specific operations.  */
2275
2276/* Per-architecture data key.  */
2277static struct gdbarch_data *solib_svr4_data;
2278
2279struct solib_svr4_ops
2280{
2281  /* Return a description of the layout of `struct link_map'.  */
2282  struct link_map_offsets *(*fetch_link_map_offsets)(void);
2283};
2284
2285/* Return a default for the architecture-specific operations.  */
2286
2287static void *
2288solib_svr4_init (struct obstack *obstack)
2289{
2290  struct solib_svr4_ops *ops;
2291
2292  ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
2293  ops->fetch_link_map_offsets = NULL;
2294  return ops;
2295}
2296
2297/* Set the architecture-specific `struct link_map_offsets' fetcher for
2298   GDBARCH to FLMO.  Also, install SVR4 solib_ops into GDBARCH.  */
2299
2300void
2301set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
2302                                       struct link_map_offsets *(*flmo) (void))
2303{
2304  struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
2305
2306  ops->fetch_link_map_offsets = flmo;
2307
2308  set_solib_ops (gdbarch, &svr4_so_ops);
2309}
2310
2311/* Fetch a link_map_offsets structure using the architecture-specific
2312   `struct link_map_offsets' fetcher.  */
2313
2314static struct link_map_offsets *
2315svr4_fetch_link_map_offsets (void)
2316{
2317  struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2318
2319  gdb_assert (ops->fetch_link_map_offsets);
2320  return ops->fetch_link_map_offsets ();
2321}
2322
2323/* Return 1 if a link map offset fetcher has been defined, 0 otherwise.  */
2324
2325static int
2326svr4_have_link_map_offsets (void)
2327{
2328  struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2329
2330  return (ops->fetch_link_map_offsets != NULL);
2331}
2332
2333
2334/* Most OS'es that have SVR4-style ELF dynamic libraries define a
2335   `struct r_debug' and a `struct link_map' that are binary compatible
2336   with the origional SVR4 implementation.  */
2337
2338/* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2339   for an ILP32 SVR4 system.  */
2340
2341struct link_map_offsets *
2342svr4_ilp32_fetch_link_map_offsets (void)
2343{
2344  static struct link_map_offsets lmo;
2345  static struct link_map_offsets *lmp = NULL;
2346
2347  if (lmp == NULL)
2348    {
2349      lmp = &lmo;
2350
2351      lmo.r_version_offset = 0;
2352      lmo.r_version_size = 4;
2353      lmo.r_map_offset = 4;
2354      lmo.r_brk_offset = 8;
2355      lmo.r_ldsomap_offset = 20;
2356
2357      /* Everything we need is in the first 20 bytes.  */
2358      lmo.link_map_size = 20;
2359      lmo.l_addr_offset = 0;
2360      lmo.l_name_offset = 4;
2361      lmo.l_ld_offset = 8;
2362      lmo.l_next_offset = 12;
2363      lmo.l_prev_offset = 16;
2364    }
2365
2366  return lmp;
2367}
2368
2369/* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2370   for an LP64 SVR4 system.  */
2371
2372struct link_map_offsets *
2373svr4_lp64_fetch_link_map_offsets (void)
2374{
2375  static struct link_map_offsets lmo;
2376  static struct link_map_offsets *lmp = NULL;
2377
2378  if (lmp == NULL)
2379    {
2380      lmp = &lmo;
2381
2382      lmo.r_version_offset = 0;
2383      lmo.r_version_size = 4;
2384      lmo.r_map_offset = 8;
2385      lmo.r_brk_offset = 16;
2386      lmo.r_ldsomap_offset = 40;
2387
2388      /* Everything we need is in the first 40 bytes.  */
2389      lmo.link_map_size = 40;
2390      lmo.l_addr_offset = 0;
2391      lmo.l_name_offset = 8;
2392      lmo.l_ld_offset = 16;
2393      lmo.l_next_offset = 24;
2394      lmo.l_prev_offset = 32;
2395    }
2396
2397  return lmp;
2398}
2399
2400
2401struct target_so_ops svr4_so_ops;
2402
2403/* Lookup global symbol for ELF DSOs linked with -Bsymbolic.  Those DSOs have a
2404   different rule for symbol lookup.  The lookup begins here in the DSO, not in
2405   the main executable.  */
2406
2407static struct symbol *
2408elf_lookup_lib_symbol (const struct objfile *objfile,
2409		       const char *name,
2410		       const domain_enum domain)
2411{
2412  bfd *abfd;
2413
2414  if (objfile == symfile_objfile)
2415    abfd = exec_bfd;
2416  else
2417    {
2418      /* OBJFILE should have been passed as the non-debug one.  */
2419      gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
2420
2421      abfd = objfile->obfd;
2422    }
2423
2424  if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
2425    return NULL;
2426
2427  return lookup_global_symbol_from_objfile (objfile, name, domain);
2428}
2429
2430extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
2431
2432void
2433_initialize_svr4_solib (void)
2434{
2435  solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
2436  solib_svr4_pspace_data
2437    = register_program_space_data_with_cleanup (svr4_pspace_data_cleanup);
2438
2439  svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
2440  svr4_so_ops.free_so = svr4_free_so;
2441  svr4_so_ops.clear_solib = svr4_clear_solib;
2442  svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
2443  svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
2444  svr4_so_ops.current_sos = svr4_current_sos;
2445  svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
2446  svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
2447  svr4_so_ops.bfd_open = solib_bfd_open;
2448  svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
2449  svr4_so_ops.same = svr4_same;
2450  svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
2451}
2452