1299966Sjkim/* SPU specific support for 32-bit ELF
2299966Sjkim
3238405Sjkim   Copyright 2006, 2007 Free Software Foundation, Inc.
4238405Sjkim
5238405Sjkim   This file is part of BFD, the Binary File Descriptor library.
6238405Sjkim
7238405Sjkim   This program is free software; you can redistribute it and/or modify
8238405Sjkim   it under the terms of the GNU General Public License as published by
9238405Sjkim   the Free Software Foundation; either version 2 of the License, or
10238405Sjkim   (at your option) any later version.
11238405Sjkim
12238405Sjkim   This program is distributed in the hope that it will be useful,
13238405Sjkim   but WITHOUT ANY WARRANTY; without even the implied warranty of
14238405Sjkim   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15238405Sjkim   GNU General Public License for more details.
16238405Sjkim
17238405Sjkim   You should have received a copy of the GNU General Public License along
18238405Sjkim   with this program; if not, write to the Free Software Foundation, Inc.,
19238405Sjkim   51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
20238405Sjkim
21238405Sjkim#include "sysdep.h"
22238405Sjkim#include "bfd.h"
23238405Sjkim#include "bfdlink.h"
24238405Sjkim#include "libbfd.h"
25238405Sjkim#include "elf-bfd.h"
26238405Sjkim#include "elf/spu.h"
27238405Sjkim#include "elf32-spu.h"
28238405Sjkim
29238405Sjkim/* We use RELA style relocs.  Don't define USE_REL.  */
30238405Sjkim
31238405Sjkimstatic bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
32238405Sjkim					   void *, asection *,
33238405Sjkim					   bfd *, char **);
34238405Sjkim
35238405Sjkim/* Values of type 'enum elf_spu_reloc_type' are used to index this
36238405Sjkim   array, so it must be declared in the order of that type.  */
37238405Sjkim
38238405Sjkimstatic reloc_howto_type elf_howto_table[] = {
39238405Sjkim  HOWTO (R_SPU_NONE,       0, 0,  0, FALSE,  0, complain_overflow_dont,
40238405Sjkim	 bfd_elf_generic_reloc, "SPU_NONE",
41238405Sjkim	 FALSE, 0, 0x00000000, FALSE),
42238405Sjkim  HOWTO (R_SPU_ADDR10,     4, 2, 10, FALSE, 14, complain_overflow_bitfield,
43238405Sjkim	 bfd_elf_generic_reloc, "SPU_ADDR10",
44238405Sjkim	 FALSE, 0, 0x00ffc000, FALSE),
45238405Sjkim  HOWTO (R_SPU_ADDR16,     2, 2, 16, FALSE,  7, complain_overflow_bitfield,
46238405Sjkim	 bfd_elf_generic_reloc, "SPU_ADDR16",
47238405Sjkim	 FALSE, 0, 0x007fff80, FALSE),
48238405Sjkim  HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE,  7, complain_overflow_bitfield,
49238405Sjkim	 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
50238405Sjkim	 FALSE, 0, 0x007fff80, FALSE),
51238405Sjkim  HOWTO (R_SPU_ADDR16_LO,  0, 2, 16, FALSE,  7, complain_overflow_dont,
52238405Sjkim	 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
53238405Sjkim	 FALSE, 0, 0x007fff80, FALSE),
54238405Sjkim  HOWTO (R_SPU_ADDR18,     0, 2, 18, FALSE,  7, complain_overflow_bitfield,
55238405Sjkim	 bfd_elf_generic_reloc, "SPU_ADDR18",
56238405Sjkim	 FALSE, 0, 0x01ffff80, FALSE),
57238405Sjkim  HOWTO (R_SPU_ADDR32,   0, 2, 32, FALSE,  0, complain_overflow_dont,
58238405Sjkim	 bfd_elf_generic_reloc, "SPU_ADDR32",
59238405Sjkim	 FALSE, 0, 0xffffffff, FALSE),
60238405Sjkim  HOWTO (R_SPU_REL16,      2, 2, 16,  TRUE,  7, complain_overflow_bitfield,
61238405Sjkim	 bfd_elf_generic_reloc, "SPU_REL16",
62238405Sjkim	 FALSE, 0, 0x007fff80, TRUE),
63238405Sjkim  HOWTO (R_SPU_ADDR7,      0, 2,  7, FALSE, 14, complain_overflow_dont,
64238405Sjkim	 bfd_elf_generic_reloc, "SPU_ADDR7",
65238405Sjkim	 FALSE, 0, 0x001fc000, FALSE),
66238405Sjkim  HOWTO (R_SPU_REL9,       2, 2,  9,  TRUE,  0, complain_overflow_signed,
67238405Sjkim	 spu_elf_rel9,          "SPU_REL9",
68238405Sjkim	 FALSE, 0, 0x0180007f, TRUE),
69238405Sjkim  HOWTO (R_SPU_REL9I,      2, 2,  9,  TRUE,  0, complain_overflow_signed,
70238405Sjkim	 spu_elf_rel9,          "SPU_REL9I",
71238405Sjkim	 FALSE, 0, 0x0000c07f, TRUE),
72238405Sjkim  HOWTO (R_SPU_ADDR10I,    0, 2, 10, FALSE, 14, complain_overflow_signed,
73238405Sjkim	 bfd_elf_generic_reloc, "SPU_ADDR10I",
74238405Sjkim	 FALSE, 0, 0x00ffc000, FALSE),
75238405Sjkim  HOWTO (R_SPU_ADDR16I,    0, 2, 16, FALSE,  7, complain_overflow_signed,
76238405Sjkim	 bfd_elf_generic_reloc, "SPU_ADDR16I",
77238405Sjkim	 FALSE, 0, 0x007fff80, FALSE),
78238405Sjkim  HOWTO (R_SPU_REL32,   0, 2, 32, TRUE,  0, complain_overflow_dont,
79238405Sjkim	 bfd_elf_generic_reloc, "SPU_REL32",
80238405Sjkim	 FALSE, 0, 0xffffffff, TRUE),
81238405Sjkim  HOWTO (R_SPU_ADDR16X,    0, 2, 16, FALSE,  7, complain_overflow_bitfield,
82238405Sjkim	 bfd_elf_generic_reloc, "SPU_ADDR16X",
83238405Sjkim	 FALSE, 0, 0x007fff80, FALSE),
84238405Sjkim  HOWTO (R_SPU_PPU32,   0, 2, 32, FALSE,  0, complain_overflow_dont,
85238405Sjkim	 bfd_elf_generic_reloc, "SPU_PPU32",
86238405Sjkim	 FALSE, 0, 0xffffffff, FALSE),
87238405Sjkim  HOWTO (R_SPU_PPU64,   0, 4, 64, FALSE,  0, complain_overflow_dont,
88238405Sjkim	 bfd_elf_generic_reloc, "SPU_PPU64",
89238405Sjkim	 FALSE, 0, -1, FALSE),
90238405Sjkim};
91238405Sjkim
92238405Sjkimstatic struct bfd_elf_special_section const spu_elf_special_sections[] = {
93238405Sjkim  { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
94238405Sjkim  { NULL, 0, 0, 0, 0 }
95238405Sjkim};
96238405Sjkim
97238405Sjkimstatic enum elf_spu_reloc_type
98238405Sjkimspu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
99238405Sjkim{
100238405Sjkim  switch (code)
101238405Sjkim    {
102238405Sjkim    default:
103238405Sjkim      return R_SPU_NONE;
104238405Sjkim    case BFD_RELOC_SPU_IMM10W:
105238405Sjkim      return R_SPU_ADDR10;
106238405Sjkim    case BFD_RELOC_SPU_IMM16W:
107238405Sjkim      return R_SPU_ADDR16;
108238405Sjkim    case BFD_RELOC_SPU_LO16:
109238405Sjkim      return R_SPU_ADDR16_LO;
110238405Sjkim    case BFD_RELOC_SPU_HI16:
111238405Sjkim      return R_SPU_ADDR16_HI;
112238405Sjkim    case BFD_RELOC_SPU_IMM18:
113238405Sjkim      return R_SPU_ADDR18;
114238405Sjkim    case BFD_RELOC_SPU_PCREL16:
115238405Sjkim      return R_SPU_REL16;
116238405Sjkim    case BFD_RELOC_SPU_IMM7:
117238405Sjkim      return R_SPU_ADDR7;
118238405Sjkim    case BFD_RELOC_SPU_IMM8:
119238405Sjkim      return R_SPU_NONE;
120238405Sjkim    case BFD_RELOC_SPU_PCREL9a:
121238405Sjkim      return R_SPU_REL9;
122238405Sjkim    case BFD_RELOC_SPU_PCREL9b:
123238405Sjkim      return R_SPU_REL9I;
124238405Sjkim    case BFD_RELOC_SPU_IMM10:
125238405Sjkim      return R_SPU_ADDR10I;
126238405Sjkim    case BFD_RELOC_SPU_IMM16:
127238405Sjkim      return R_SPU_ADDR16I;
128238405Sjkim    case BFD_RELOC_32:
129238405Sjkim      return R_SPU_ADDR32;
130238405Sjkim    case BFD_RELOC_32_PCREL:
131238405Sjkim      return R_SPU_REL32;
132238405Sjkim    case BFD_RELOC_SPU_PPU32:
133238405Sjkim      return R_SPU_PPU32;
134238405Sjkim    case BFD_RELOC_SPU_PPU64:
135238405Sjkim      return R_SPU_PPU64;
136238405Sjkim    }
137238405Sjkim}
138238405Sjkim
139238405Sjkimstatic void
140238405Sjkimspu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
141238405Sjkim		       arelent *cache_ptr,
142238405Sjkim		       Elf_Internal_Rela *dst)
143238405Sjkim{
144238405Sjkim  enum elf_spu_reloc_type r_type;
145238405Sjkim
146238405Sjkim  r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
147238405Sjkim  BFD_ASSERT (r_type < R_SPU_max);
148238405Sjkim  cache_ptr->howto = &elf_howto_table[(int) r_type];
149238405Sjkim}
150238405Sjkim
151238405Sjkimstatic reloc_howto_type *
152238405Sjkimspu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
153238405Sjkim			   bfd_reloc_code_real_type code)
154238405Sjkim{
155238405Sjkim  enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
156238405Sjkim
157238405Sjkim  if (r_type == R_SPU_NONE)
158238405Sjkim    return NULL;
159238405Sjkim
160238405Sjkim  return elf_howto_table + r_type;
161238405Sjkim}
162238405Sjkim
163238405Sjkimstatic reloc_howto_type *
164238405Sjkimspu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
165238405Sjkim			   const char *r_name)
166238405Sjkim{
167238405Sjkim  unsigned int i;
168238405Sjkim
169238405Sjkim  for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
170238405Sjkim    if (elf_howto_table[i].name != NULL
171238405Sjkim	&& strcasecmp (elf_howto_table[i].name, r_name) == 0)
172238405Sjkim      return &elf_howto_table[i];
173238405Sjkim
174238405Sjkim  return NULL;
175238405Sjkim}
176238405Sjkim
177238405Sjkim/* Apply R_SPU_REL9 and R_SPU_REL9I relocs.  */
178238405Sjkim
179238405Sjkimstatic bfd_reloc_status_type
180238405Sjkimspu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
181238405Sjkim	      void *data, asection *input_section,
182238405Sjkim	      bfd *output_bfd, char **error_message)
183238405Sjkim{
184238405Sjkim  bfd_size_type octets;
185238405Sjkim  bfd_vma val;
186238405Sjkim  long insn;
187238405Sjkim
188238405Sjkim  /* If this is a relocatable link (output_bfd test tells us), just
189238405Sjkim     call the generic function.  Any adjustment will be done at final
190238405Sjkim     link time.  */
191238405Sjkim  if (output_bfd != NULL)
192238405Sjkim    return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
193238405Sjkim				  input_section, output_bfd, error_message);
194238405Sjkim
195238405Sjkim  if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
196238405Sjkim    return bfd_reloc_outofrange;
197238405Sjkim  octets = reloc_entry->address * bfd_octets_per_byte (abfd);
198238405Sjkim
199238405Sjkim  /* Get symbol value.  */
200238405Sjkim  val = 0;
201238405Sjkim  if (!bfd_is_com_section (symbol->section))
202238405Sjkim    val = symbol->value;
203238405Sjkim  if (symbol->section->output_section)
204238405Sjkim    val += symbol->section->output_section->vma;
205238405Sjkim
206238405Sjkim  val += reloc_entry->addend;
207238405Sjkim
208238405Sjkim  /* Make it pc-relative.  */
209238405Sjkim  val -= input_section->output_section->vma + input_section->output_offset;
210238405Sjkim
211238405Sjkim  val >>= 2;
212238405Sjkim  if (val + 256 >= 512)
213238405Sjkim    return bfd_reloc_overflow;
214238405Sjkim
215238405Sjkim  insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
216238405Sjkim
217238405Sjkim  /* Move two high bits of value to REL9I and REL9 position.
218238405Sjkim     The mask will take care of selecting the right field.  */
219238405Sjkim  val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
220238405Sjkim  insn &= ~reloc_entry->howto->dst_mask;
221238405Sjkim  insn |= val & reloc_entry->howto->dst_mask;
222238405Sjkim  bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
223238405Sjkim  return bfd_reloc_ok;
224238405Sjkim}
225238405Sjkim
226238405Sjkimstatic bfd_boolean
227238405Sjkimspu_elf_new_section_hook (bfd *abfd, asection *sec)
228238405Sjkim{
229238405Sjkim  if (!sec->used_by_bfd)
230238405Sjkim    {
231238405Sjkim      struct _spu_elf_section_data *sdata;
232238405Sjkim
233238405Sjkim      sdata = bfd_zalloc (abfd, sizeof (*sdata));
234238405Sjkim      if (sdata == NULL)
235238405Sjkim	return FALSE;
236238405Sjkim      sec->used_by_bfd = sdata;
237238405Sjkim    }
238238405Sjkim
239238405Sjkim  return _bfd_elf_new_section_hook (abfd, sec);
240238405Sjkim}
241238405Sjkim
242238405Sjkim/* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
243238405Sjkim   strip --strip-unneeded will not remove them.  */
244238405Sjkim
245238405Sjkimstatic void
246238405Sjkimspu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
247238405Sjkim{
248238405Sjkim  if (sym->name != NULL
249238405Sjkim      && sym->section != bfd_abs_section_ptr
250238405Sjkim      && strncmp (sym->name, "_EAR_", 5) == 0)
251238405Sjkim    sym->flags |= BSF_KEEP;
252238405Sjkim}
253238405Sjkim
254238405Sjkim/* SPU ELF linker hash table.  */
255238405Sjkim
256238405Sjkimstruct spu_link_hash_table
257238405Sjkim{
258238405Sjkim  struct elf_link_hash_table elf;
259238405Sjkim
260238405Sjkim  /* The stub hash table.  */
261238405Sjkim  struct bfd_hash_table stub_hash_table;
262238405Sjkim
263238405Sjkim  /* Shortcuts to overlay sections.  */
264238405Sjkim  asection *stub;
265238405Sjkim  asection *ovtab;
266238405Sjkim
267238405Sjkim  struct elf_link_hash_entry *ovly_load;
268238405Sjkim
269238405Sjkim  /* An array of two output sections per overlay region, chosen such that
270238405Sjkim     the first section vma is the overlay buffer vma (ie. the section has
271238405Sjkim     the lowest vma in the group that occupy the region), and the second
272238405Sjkim     section vma+size specifies the end of the region.  We keep pointers
273238405Sjkim     to sections like this because section vmas may change when laying
274238405Sjkim     them out.  */
275238405Sjkim  asection **ovl_region;
276238405Sjkim
277238405Sjkim  /* Number of overlay buffers.  */
278238405Sjkim  unsigned int num_buf;
279238405Sjkim
280238405Sjkim  /* Total number of overlays.  */
281238405Sjkim  unsigned int num_overlays;
282238405Sjkim
283238405Sjkim  /* Set if we should emit symbols for stubs.  */
284238405Sjkim  unsigned int emit_stub_syms:1;
285238405Sjkim
286238405Sjkim  /* Set if we want stubs on calls out of overlay regions to
287238405Sjkim     non-overlay regions.  */
288238405Sjkim  unsigned int non_overlay_stubs : 1;
289238405Sjkim
290238405Sjkim  /* Set on error.  */
291238405Sjkim  unsigned int stub_overflow : 1;
292238405Sjkim
293238405Sjkim  /* Set if stack size analysis should be done.  */
294238405Sjkim  unsigned int stack_analysis : 1;
295238405Sjkim
296238405Sjkim  /* Set if __stack_* syms will be emitted.  */
297238405Sjkim  unsigned int emit_stack_syms : 1;
298238405Sjkim};
299238405Sjkim
300238405Sjkim#define spu_hash_table(p) \
301238405Sjkim  ((struct spu_link_hash_table *) ((p)->hash))
302238405Sjkim
303238405Sjkimstruct spu_stub_hash_entry
304238405Sjkim{
305238405Sjkim  struct bfd_hash_entry root;
306238405Sjkim
307238405Sjkim  /* Destination of this stub.  */
308238405Sjkim  asection *target_section;
309238405Sjkim  bfd_vma target_off;
310238405Sjkim
311238405Sjkim  /* Offset of entry in stub section.  */
312238405Sjkim  bfd_vma off;
313238405Sjkim
314238405Sjkim  /* Offset from this stub to stub that loads the overlay index.  */
315238405Sjkim  bfd_vma delta;
316238405Sjkim};
317238405Sjkim
318238405Sjkim/* Create an entry in a spu stub hash table.  */
319238405Sjkim
320238405Sjkimstatic struct bfd_hash_entry *
321238405Sjkimstub_hash_newfunc (struct bfd_hash_entry *entry,
322238405Sjkim		   struct bfd_hash_table *table,
323238405Sjkim		   const char *string)
324238405Sjkim{
325238405Sjkim  /* Allocate the structure if it has not already been allocated by a
326238405Sjkim     subclass.  */
327238405Sjkim  if (entry == NULL)
328238405Sjkim    {
329238405Sjkim      entry = bfd_hash_allocate (table, sizeof (struct spu_stub_hash_entry));
330238405Sjkim      if (entry == NULL)
331238405Sjkim	return entry;
332238405Sjkim    }
333238405Sjkim
334238405Sjkim  /* Call the allocation method of the superclass.  */
335238405Sjkim  entry = bfd_hash_newfunc (entry, table, string);
336238405Sjkim  if (entry != NULL)
337238405Sjkim    {
338238405Sjkim      struct spu_stub_hash_entry *sh = (struct spu_stub_hash_entry *) entry;
339238405Sjkim
340238405Sjkim      sh->target_section = NULL;
341238405Sjkim      sh->target_off = 0;
342238405Sjkim      sh->off = 0;
343238405Sjkim      sh->delta = 0;
344238405Sjkim    }
345238405Sjkim
346238405Sjkim  return entry;
347238405Sjkim}
348238405Sjkim
349238405Sjkim/* Create a spu ELF linker hash table.  */
350238405Sjkim
351238405Sjkimstatic struct bfd_link_hash_table *
352238405Sjkimspu_elf_link_hash_table_create (bfd *abfd)
353238405Sjkim{
354238405Sjkim  struct spu_link_hash_table *htab;
355238405Sjkim
356238405Sjkim  htab = bfd_malloc (sizeof (*htab));
357238405Sjkim  if (htab == NULL)
358238405Sjkim    return NULL;
359238405Sjkim
360238405Sjkim  if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
361238405Sjkim				      _bfd_elf_link_hash_newfunc,
362238405Sjkim				      sizeof (struct elf_link_hash_entry)))
363238405Sjkim    {
364238405Sjkim      free (htab);
365238405Sjkim      return NULL;
366238405Sjkim    }
367238405Sjkim
368238405Sjkim  /* Init the stub hash table too.  */
369238405Sjkim  if (!bfd_hash_table_init (&htab->stub_hash_table, stub_hash_newfunc,
370238405Sjkim			    sizeof (struct spu_stub_hash_entry)))
371238405Sjkim    return NULL;
372238405Sjkim
373238405Sjkim  memset (&htab->stub, 0,
374238405Sjkim	  sizeof (*htab) - offsetof (struct spu_link_hash_table, stub));
375238405Sjkim
376238405Sjkim  return &htab->elf.root;
377238405Sjkim}
378238405Sjkim
379238405Sjkim/* Free the derived linker hash table.  */
380238405Sjkim
381238405Sjkimstatic void
382238405Sjkimspu_elf_link_hash_table_free (struct bfd_link_hash_table *hash)
383238405Sjkim{
384238405Sjkim  struct spu_link_hash_table *ret = (struct spu_link_hash_table *) hash;
385238405Sjkim
386238405Sjkim  bfd_hash_table_free (&ret->stub_hash_table);
387238405Sjkim  _bfd_generic_link_hash_table_free (hash);
388238405Sjkim}
389238405Sjkim
390238405Sjkim/* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
391238405Sjkim   to (hash, NULL) for global symbols, and (NULL, sym) for locals.  Set
392238405Sjkim   *SYMSECP to the symbol's section.  *LOCSYMSP caches local syms.  */
393238405Sjkim
394238405Sjkimstatic bfd_boolean
395238405Sjkimget_sym_h (struct elf_link_hash_entry **hp,
396238405Sjkim	   Elf_Internal_Sym **symp,
397238405Sjkim	   asection **symsecp,
398238405Sjkim	   Elf_Internal_Sym **locsymsp,
399238405Sjkim	   unsigned long r_symndx,
400238405Sjkim	   bfd *ibfd)
401238405Sjkim{
402238405Sjkim  Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
403238405Sjkim
404238405Sjkim  if (r_symndx >= symtab_hdr->sh_info)
405238405Sjkim    {
406238405Sjkim      struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
407238405Sjkim      struct elf_link_hash_entry *h;
408238405Sjkim
409238405Sjkim      h = sym_hashes[r_symndx - symtab_hdr->sh_info];
410238405Sjkim      while (h->root.type == bfd_link_hash_indirect
411238405Sjkim	     || h->root.type == bfd_link_hash_warning)
412238405Sjkim	h = (struct elf_link_hash_entry *) h->root.u.i.link;
413238405Sjkim
414238405Sjkim      if (hp != NULL)
415238405Sjkim	*hp = h;
416238405Sjkim
417238405Sjkim      if (symp != NULL)
418238405Sjkim	*symp = NULL;
419238405Sjkim
420238405Sjkim      if (symsecp != NULL)
421238405Sjkim	{
422238405Sjkim	  asection *symsec = NULL;
423238405Sjkim	  if (h->root.type == bfd_link_hash_defined
424238405Sjkim	      || h->root.type == bfd_link_hash_defweak)
425238405Sjkim	    symsec = h->root.u.def.section;
426238405Sjkim	  *symsecp = symsec;
427238405Sjkim	}
428238405Sjkim    }
429238405Sjkim  else
430238405Sjkim    {
431238405Sjkim      Elf_Internal_Sym *sym;
432238405Sjkim      Elf_Internal_Sym *locsyms = *locsymsp;
433238405Sjkim
434238405Sjkim      if (locsyms == NULL)
435238405Sjkim	{
436238405Sjkim	  locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
437238405Sjkim	  if (locsyms == NULL)
438238405Sjkim	    {
439238405Sjkim	      size_t symcount = symtab_hdr->sh_info;
440238405Sjkim
441238405Sjkim	      /* If we are reading symbols into the contents, then
442238405Sjkim		 read the global syms too.  This is done to cache
443238405Sjkim		 syms for later stack analysis.  */
444238405Sjkim	      if ((unsigned char **) locsymsp == &symtab_hdr->contents)
445238405Sjkim		symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
446238405Sjkim	      locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
447238405Sjkim					      NULL, NULL, NULL);
448238405Sjkim	    }
449238405Sjkim	  if (locsyms == NULL)
450238405Sjkim	    return FALSE;
451238405Sjkim	  *locsymsp = locsyms;
452238405Sjkim	}
453238405Sjkim      sym = locsyms + r_symndx;
454238405Sjkim
455238405Sjkim      if (hp != NULL)
456238405Sjkim	*hp = NULL;
457238405Sjkim
458238405Sjkim      if (symp != NULL)
459238405Sjkim	*symp = sym;
460238405Sjkim
461238405Sjkim      if (symsecp != NULL)
462238405Sjkim	{
463238405Sjkim	  asection *symsec = NULL;
464238405Sjkim	  if ((sym->st_shndx != SHN_UNDEF
465238405Sjkim	       && sym->st_shndx < SHN_LORESERVE)
466238405Sjkim	      || sym->st_shndx > SHN_HIRESERVE)
467238405Sjkim	    symsec = bfd_section_from_elf_index (ibfd, sym->st_shndx);
468238405Sjkim	  *symsecp = symsec;
469238405Sjkim	}
470238405Sjkim    }
471238405Sjkim
472238405Sjkim  return TRUE;
473238405Sjkim}
474238405Sjkim
475238405Sjkim/* Build a name for an entry in the stub hash table.  We can't use a
476238405Sjkim   local symbol name because ld -r might generate duplicate local symbols.  */
477238405Sjkim
478238405Sjkimstatic char *
479238405Sjkimspu_stub_name (const asection *sym_sec,
480238405Sjkim	       const struct elf_link_hash_entry *h,
481238405Sjkim	       const Elf_Internal_Rela *rel)
482238405Sjkim{
483238405Sjkim  char *stub_name;
484238405Sjkim  bfd_size_type len;
485238405Sjkim
486238405Sjkim  if (h)
487238405Sjkim    {
488238405Sjkim      len = strlen (h->root.root.string) + 1 + 8 + 1;
489238405Sjkim      stub_name = bfd_malloc (len);
490238405Sjkim      if (stub_name == NULL)
491238405Sjkim	return stub_name;
492238405Sjkim
493238405Sjkim      sprintf (stub_name, "%s+%x",
494238405Sjkim	       h->root.root.string,
495238405Sjkim	       (int) rel->r_addend & 0xffffffff);
496238405Sjkim      len -= 8;
497238405Sjkim    }
498238405Sjkim  else
499238405Sjkim    {
500238405Sjkim      len = 8 + 1 + 8 + 1 + 8 + 1;
501238405Sjkim      stub_name = bfd_malloc (len);
502238405Sjkim      if (stub_name == NULL)
503238405Sjkim	return stub_name;
504238405Sjkim
505238405Sjkim      sprintf (stub_name, "%x:%x+%x",
506238405Sjkim	       sym_sec->id & 0xffffffff,
507238405Sjkim	       (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
508238405Sjkim	       (int) rel->r_addend & 0xffffffff);
509238405Sjkim      len = strlen (stub_name);
510238405Sjkim    }
511238405Sjkim
512238405Sjkim  if (stub_name[len - 2] == '+'
513238405Sjkim      && stub_name[len - 1] == '0'
514238405Sjkim      && stub_name[len] == 0)
515238405Sjkim    stub_name[len - 2] = 0;
516238405Sjkim
517238405Sjkim  return stub_name;
518238405Sjkim}
519238405Sjkim
520238405Sjkim/* Create the note section if not already present.  This is done early so
521238405Sjkim   that the linker maps the sections to the right place in the output.  */
522238405Sjkim
523238405Sjkimbfd_boolean
524238405Sjkimspu_elf_create_sections (bfd *output_bfd,
525238405Sjkim			 struct bfd_link_info *info,
526238405Sjkim			 int stack_analysis,
527238405Sjkim			 int emit_stack_syms)
528238405Sjkim{
529238405Sjkim  bfd *ibfd;
530238405Sjkim  struct spu_link_hash_table *htab = spu_hash_table (info);
531238405Sjkim
532238405Sjkim  /* Stash some options away where we can get at them later.  */
533238405Sjkim  htab->stack_analysis = stack_analysis;
534238405Sjkim  htab->emit_stack_syms = emit_stack_syms;
535238405Sjkim
536238405Sjkim  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
537238405Sjkim    if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
538238405Sjkim      break;
539238405Sjkim
540238405Sjkim  if (ibfd == NULL)
541238405Sjkim    {
542238405Sjkim      /* Make SPU_PTNOTE_SPUNAME section.  */
543238405Sjkim      asection *s;
544238405Sjkim      size_t name_len;
545238405Sjkim      size_t size;
546238405Sjkim      bfd_byte *data;
547238405Sjkim      flagword flags;
548238405Sjkim
549238405Sjkim      ibfd = info->input_bfds;
550238405Sjkim      flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
551238405Sjkim      s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
552238405Sjkim      if (s == NULL
553238405Sjkim	  || !bfd_set_section_alignment (ibfd, s, 4))
554238405Sjkim	return FALSE;
555238405Sjkim
556238405Sjkim      name_len = strlen (bfd_get_filename (output_bfd)) + 1;
557238405Sjkim      size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
558238405Sjkim      size += (name_len + 3) & -4;
559238405Sjkim
560238405Sjkim      if (!bfd_set_section_size (ibfd, s, size))
561238405Sjkim	return FALSE;
562238405Sjkim
563238405Sjkim      data = bfd_zalloc (ibfd, size);
564238405Sjkim      if (data == NULL)
565238405Sjkim	return FALSE;
566238405Sjkim
567238405Sjkim      bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
568238405Sjkim      bfd_put_32 (ibfd, name_len, data + 4);
569238405Sjkim      bfd_put_32 (ibfd, 1, data + 8);
570238405Sjkim      memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
571238405Sjkim      memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
572238405Sjkim	      bfd_get_filename (output_bfd), name_len);
573238405Sjkim      s->contents = data;
574238405Sjkim    }
575238405Sjkim
576238405Sjkim  return TRUE;
577238405Sjkim}
578238405Sjkim
579238405Sjkim/* qsort predicate to sort sections by vma.  */
580238405Sjkim
581238405Sjkimstatic int
582238405Sjkimsort_sections (const void *a, const void *b)
583238405Sjkim{
584238405Sjkim  const asection *const *s1 = a;
585238405Sjkim  const asection *const *s2 = b;
586238405Sjkim  bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
587238405Sjkim
588238405Sjkim  if (delta != 0)
589238405Sjkim    return delta < 0 ? -1 : 1;
590238405Sjkim
591238405Sjkim  return (*s1)->index - (*s2)->index;
592238405Sjkim}
593238405Sjkim
594238405Sjkim/* Identify overlays in the output bfd, and number them.  */
595238405Sjkim
596238405Sjkimbfd_boolean
597238405Sjkimspu_elf_find_overlays (bfd *output_bfd, struct bfd_link_info *info)
598238405Sjkim{
599238405Sjkim  struct spu_link_hash_table *htab = spu_hash_table (info);
600238405Sjkim  asection **alloc_sec;
601238405Sjkim  unsigned int i, n, ovl_index, num_buf;
602238405Sjkim  asection *s;
603238405Sjkim  bfd_vma ovl_end;
604238405Sjkim
605238405Sjkim  if (output_bfd->section_count < 2)
606238405Sjkim    return FALSE;
607238405Sjkim
608238405Sjkim  alloc_sec = bfd_malloc (output_bfd->section_count * sizeof (*alloc_sec));
609238405Sjkim  if (alloc_sec == NULL)
610238405Sjkim    return FALSE;
611238405Sjkim
612238405Sjkim  /* Pick out all the alloced sections.  */
613238405Sjkim  for (n = 0, s = output_bfd->sections; s != NULL; s = s->next)
614238405Sjkim    if ((s->flags & SEC_ALLOC) != 0
615238405Sjkim	&& (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
616238405Sjkim	&& s->size != 0)
617238405Sjkim      alloc_sec[n++] = s;
618238405Sjkim
619238405Sjkim  if (n == 0)
620238405Sjkim    {
621238405Sjkim      free (alloc_sec);
622238405Sjkim      return FALSE;
623238405Sjkim    }
624238405Sjkim
625238405Sjkim  /* Sort them by vma.  */
626238405Sjkim  qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
627238405Sjkim
628238405Sjkim  /* Look for overlapping vmas.  Any with overlap must be overlays.
629238405Sjkim     Count them.  Also count the number of overlay regions and for
630238405Sjkim     each region save a section from that region with the lowest vma
631238405Sjkim     and another section with the highest end vma.  */
632238405Sjkim  ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
633238405Sjkim  for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
634238405Sjkim    {
635238405Sjkim      s = alloc_sec[i];
636238405Sjkim      if (s->vma < ovl_end)
637238405Sjkim	{
638238405Sjkim	  asection *s0 = alloc_sec[i - 1];
639238405Sjkim
640238405Sjkim	  if (spu_elf_section_data (s0)->ovl_index == 0)
641238405Sjkim	    {
642238405Sjkim	      spu_elf_section_data (s0)->ovl_index = ++ovl_index;
643238405Sjkim	      alloc_sec[num_buf * 2] = s0;
644238405Sjkim	      alloc_sec[num_buf * 2 + 1] = s0;
645238405Sjkim	      num_buf++;
646238405Sjkim	    }
647238405Sjkim	  spu_elf_section_data (s)->ovl_index = ++ovl_index;
648238405Sjkim	  if (ovl_end < s->vma + s->size)
649238405Sjkim	    {
650238405Sjkim	      ovl_end = s->vma + s->size;
651238405Sjkim	      alloc_sec[num_buf * 2 - 1] = s;
652238405Sjkim	    }
653238405Sjkim	}
654238405Sjkim      else
655238405Sjkim	ovl_end = s->vma + s->size;
656238405Sjkim    }
657238405Sjkim
658238405Sjkim  htab->num_overlays = ovl_index;
659238405Sjkim  htab->num_buf = num_buf;
660238405Sjkim  if (ovl_index == 0)
661238405Sjkim    {
662238405Sjkim      free (alloc_sec);
663238405Sjkim      return FALSE;
664238405Sjkim    }
665238405Sjkim
666238405Sjkim  alloc_sec = bfd_realloc (alloc_sec, num_buf * 2 * sizeof (*alloc_sec));
667238405Sjkim  if (alloc_sec == NULL)
668238405Sjkim    return FALSE;
669238405Sjkim
670238405Sjkim  htab->ovl_region = alloc_sec;
671238405Sjkim  return TRUE;
672238405Sjkim}
673238405Sjkim
674238405Sjkim/* One of these per stub.  */
675238405Sjkim#define SIZEOF_STUB1 8
676238405Sjkim#define ILA_79	0x4200004f		/* ila $79,function_address */
677238405Sjkim#define BR	0x32000000		/* br stub2 */
678238405Sjkim
679238405Sjkim/* One of these per overlay.  */
680238405Sjkim#define SIZEOF_STUB2 8
681238405Sjkim#define ILA_78	0x4200004e		/* ila $78,overlay_number */
682238405Sjkim					/* br __ovly_load */
683238405Sjkim#define NOP	0x40200000
684238405Sjkim
685238405Sjkim/* Return true for all relative and absolute branch instructions.
686238405Sjkim   bra   00110000 0..
687238405Sjkim   brasl 00110001 0..
688238405Sjkim   br    00110010 0..
689238405Sjkim   brsl  00110011 0..
690238405Sjkim   brz   00100000 0..
691238405Sjkim   brnz  00100001 0..
692238405Sjkim   brhz  00100010 0..
693238405Sjkim   brhnz 00100011 0..  */
694238405Sjkim
695238405Sjkimstatic bfd_boolean
696238405Sjkimis_branch (const unsigned char *insn)
697238405Sjkim{
698238405Sjkim  return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
699238405Sjkim}
700238405Sjkim
701238405Sjkim/* Return true for branch hint instructions.
702238405Sjkim   hbra  0001000..
703238405Sjkim   hbrr  0001001..  */
704238405Sjkim
705238405Sjkimstatic bfd_boolean
706238405Sjkimis_hint (const unsigned char *insn)
707238405Sjkim{
708238405Sjkim  return (insn[0] & 0xfc) == 0x10;
709238405Sjkim}
710238405Sjkim
711238405Sjkim/* Return TRUE if this reloc symbol should possibly go via an overlay stub.  */
712238405Sjkim
713238405Sjkimstatic bfd_boolean
714238405Sjkimneeds_ovl_stub (const char *sym_name,
715238405Sjkim		asection *sym_sec,
716238405Sjkim		asection *input_section,
717238405Sjkim		struct spu_link_hash_table *htab,
718238405Sjkim		bfd_boolean is_branch)
719238405Sjkim{
720238405Sjkim  if (htab->num_overlays == 0)
721238405Sjkim    return FALSE;
722238405Sjkim
723238405Sjkim  if (sym_sec == NULL
724238405Sjkim      || sym_sec->output_section == NULL
725238405Sjkim      || spu_elf_section_data (sym_sec->output_section) == NULL)
726238405Sjkim    return FALSE;
727238405Sjkim
728238405Sjkim  /* setjmp always goes via an overlay stub, because then the return
729238405Sjkim     and hence the longjmp goes via __ovly_return.  That magically
730238405Sjkim     makes setjmp/longjmp between overlays work.  */
731238405Sjkim  if (strncmp (sym_name, "setjmp", 6) == 0
732238405Sjkim      && (sym_name[6] == '\0' || sym_name[6] == '@'))
733238405Sjkim    return TRUE;
734238405Sjkim
735238405Sjkim  /* Usually, symbols in non-overlay sections don't need stubs.  */
736238405Sjkim  if (spu_elf_section_data (sym_sec->output_section)->ovl_index == 0
737238405Sjkim      && !htab->non_overlay_stubs)
738238405Sjkim    return FALSE;
739238405Sjkim
740238405Sjkim  /* A reference from some other section to a symbol in an overlay
741238405Sjkim     section needs a stub.  */
742238405Sjkim  if (spu_elf_section_data (sym_sec->output_section)->ovl_index
743238405Sjkim       != spu_elf_section_data (input_section->output_section)->ovl_index)
744238405Sjkim    return TRUE;
745238405Sjkim
746238405Sjkim  /* If this insn isn't a branch then we are possibly taking the
747238405Sjkim     address of a function and passing it out somehow.  */
748238405Sjkim  return !is_branch;
749238405Sjkim}
750238405Sjkim
751238405Sjkimstruct stubarr {
752238405Sjkim  struct bfd_hash_table *stub_hash_table;
753238405Sjkim  struct spu_stub_hash_entry **sh;
754238405Sjkim  unsigned int count;
755238405Sjkim  int err;
756238405Sjkim};
757238405Sjkim
758238405Sjkim/* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
759238405Sjkim   symbols.  */
760238405Sjkim
761238405Sjkimstatic bfd_boolean
762238405Sjkimallocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
763238405Sjkim{
764238405Sjkim  /* Symbols starting with _SPUEAR_ need a stub because they may be
765238405Sjkim     invoked by the PPU.  */
766238405Sjkim  if ((h->root.type == bfd_link_hash_defined
767238405Sjkim       || h->root.type == bfd_link_hash_defweak)
768238405Sjkim      && h->def_regular
769238405Sjkim      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
770238405Sjkim    {
771238405Sjkim      struct stubarr *stubs = inf;
772238405Sjkim      static Elf_Internal_Rela zero_rel;
773238405Sjkim      char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
774238405Sjkim      struct spu_stub_hash_entry *sh;
775238405Sjkim
776238405Sjkim      if (stub_name == NULL)
777238405Sjkim	{
778238405Sjkim	  stubs->err = 1;
779238405Sjkim	  return FALSE;
780238405Sjkim	}
781238405Sjkim
782238405Sjkim      sh = (struct spu_stub_hash_entry *)
783238405Sjkim	bfd_hash_lookup (stubs->stub_hash_table, stub_name, TRUE, FALSE);
784238405Sjkim      if (sh == NULL)
785238405Sjkim	{
786238405Sjkim	  free (stub_name);
787238405Sjkim	  return FALSE;
788238405Sjkim	}
789238405Sjkim
790238405Sjkim      /* If this entry isn't new, we already have a stub.  */
791238405Sjkim      if (sh->target_section != NULL)
792238405Sjkim	{
793238405Sjkim	  free (stub_name);
794238405Sjkim	  return TRUE;
795238405Sjkim	}
796238405Sjkim
797238405Sjkim      sh->target_section = h->root.u.def.section;
798238405Sjkim      sh->target_off = h->root.u.def.value;
799238405Sjkim      stubs->count += 1;
800238405Sjkim    }
801238405Sjkim
802238405Sjkim  return TRUE;
803238405Sjkim}
804238405Sjkim
805238405Sjkim/* Called via bfd_hash_traverse to set up pointers to all symbols
806238405Sjkim   in the stub hash table.  */
807238405Sjkim
808238405Sjkimstatic bfd_boolean
809238405Sjkimpopulate_stubs (struct bfd_hash_entry *bh, void *inf)
810238405Sjkim{
811238405Sjkim  struct stubarr *stubs = inf;
812238405Sjkim
813238405Sjkim  stubs->sh[--stubs->count] = (struct spu_stub_hash_entry *) bh;
814238405Sjkim  return TRUE;
815238405Sjkim}
816238405Sjkim
817238405Sjkim/* qsort predicate to sort stubs by overlay number.  */
818238405Sjkim
819238405Sjkimstatic int
820238405Sjkimsort_stubs (const void *a, const void *b)
821238405Sjkim{
822238405Sjkim  const struct spu_stub_hash_entry *const *sa = a;
823238405Sjkim  const struct spu_stub_hash_entry *const *sb = b;
824238405Sjkim  int i;
825238405Sjkim  bfd_signed_vma d;
826238405Sjkim
827238405Sjkim  i = spu_elf_section_data ((*sa)->target_section->output_section)->ovl_index;
828238405Sjkim  i -= spu_elf_section_data ((*sb)->target_section->output_section)->ovl_index;
829238405Sjkim  if (i != 0)
830238405Sjkim    return i;
831238405Sjkim
832238405Sjkim  d = ((*sa)->target_section->output_section->vma
833238405Sjkim       + (*sa)->target_section->output_offset
834238405Sjkim       + (*sa)->target_off
835238405Sjkim       - (*sb)->target_section->output_section->vma
836238405Sjkim       - (*sb)->target_section->output_offset
837238405Sjkim       - (*sb)->target_off);
838238405Sjkim  if (d != 0)
839238405Sjkim    return d < 0 ? -1 : 1;
840238405Sjkim
841238405Sjkim  /* Two functions at the same address.  Aliases perhaps.  */
842238405Sjkim  i = strcmp ((*sb)->root.string, (*sa)->root.string);
843238405Sjkim  BFD_ASSERT (i != 0);
844238405Sjkim  return i;
845238405Sjkim}
846238405Sjkim
847238405Sjkim/* Allocate space for overlay call and return stubs.  */
848238405Sjkim
849238405Sjkimbfd_boolean
850238405Sjkimspu_elf_size_stubs (bfd *output_bfd,
851238405Sjkim		    struct bfd_link_info *info,
852238405Sjkim		    int non_overlay_stubs,
853238405Sjkim		    int stack_analysis,
854238405Sjkim		    asection **stub,
855238405Sjkim		    asection **ovtab,
856238405Sjkim		    asection **toe)
857238405Sjkim{
858238405Sjkim  struct spu_link_hash_table *htab = spu_hash_table (info);
859238405Sjkim  bfd *ibfd;
860238405Sjkim  struct stubarr stubs;
861238405Sjkim  unsigned i, group;
862238405Sjkim  flagword flags;
863238405Sjkim
864238405Sjkim  htab->non_overlay_stubs = non_overlay_stubs;
865238405Sjkim  stubs.stub_hash_table = &htab->stub_hash_table;
866238405Sjkim  stubs.count = 0;
867238405Sjkim  stubs.err = 0;
868238405Sjkim  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
869238405Sjkim    {
870238405Sjkim      extern const bfd_target bfd_elf32_spu_vec;
871238405Sjkim      Elf_Internal_Shdr *symtab_hdr;
872238405Sjkim      asection *section;
873238405Sjkim      Elf_Internal_Sym *local_syms = NULL;
874238405Sjkim      void *psyms;
875238405Sjkim
876238405Sjkim      if (ibfd->xvec != &bfd_elf32_spu_vec)
877238405Sjkim	continue;
878238405Sjkim
879238405Sjkim      /* We'll need the symbol table in a second.  */
880238405Sjkim      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
881238405Sjkim      if (symtab_hdr->sh_info == 0)
882238405Sjkim	continue;
883238405Sjkim
884238405Sjkim      /* Arrange to read and keep global syms for later stack analysis.  */
885238405Sjkim      psyms = &local_syms;
886238405Sjkim      if (stack_analysis)
887238405Sjkim	psyms = &symtab_hdr->contents;
888238405Sjkim
889238405Sjkim      /* Walk over each section attached to the input bfd.  */
890238405Sjkim      for (section = ibfd->sections; section != NULL; section = section->next)
891238405Sjkim	{
892238405Sjkim	  Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
893238405Sjkim
894238405Sjkim	  /* If there aren't any relocs, then there's nothing more to do.  */
895238405Sjkim	  if ((section->flags & SEC_RELOC) == 0
896238405Sjkim	      || (section->flags & SEC_ALLOC) == 0
897238405Sjkim	      || (section->flags & SEC_LOAD) == 0
898238405Sjkim	      || section->reloc_count == 0)
899238405Sjkim	    continue;
900238405Sjkim
901238405Sjkim	  /* If this section is a link-once section that will be
902238405Sjkim	     discarded, then don't create any stubs.  */
903238405Sjkim	  if (section->output_section == NULL
904238405Sjkim	      || section->output_section->owner != output_bfd)
905238405Sjkim	    continue;
906238405Sjkim
907238405Sjkim	  /* Get the relocs.  */
908238405Sjkim	  internal_relocs
909238405Sjkim	    = _bfd_elf_link_read_relocs (ibfd, section, NULL, NULL,
910238405Sjkim					 info->keep_memory);
911238405Sjkim	  if (internal_relocs == NULL)
912238405Sjkim	    goto error_ret_free_local;
913238405Sjkim
914238405Sjkim	  /* Now examine each relocation.  */
915238405Sjkim	  irela = internal_relocs;
916238405Sjkim	  irelaend = irela + section->reloc_count;
917238405Sjkim	  for (; irela < irelaend; irela++)
918238405Sjkim	    {
919238405Sjkim	      enum elf_spu_reloc_type r_type;
920238405Sjkim	      unsigned int r_indx;
921238405Sjkim	      asection *sym_sec;
922238405Sjkim	      Elf_Internal_Sym *sym;
923238405Sjkim	      struct elf_link_hash_entry *h;
924238405Sjkim	      const char *sym_name;
925238405Sjkim	      char *stub_name;
926238405Sjkim	      struct spu_stub_hash_entry *sh;
927238405Sjkim	      unsigned int sym_type;
928238405Sjkim	      enum _insn_type { non_branch, branch, call } insn_type;
929238405Sjkim
930238405Sjkim	      r_type = ELF32_R_TYPE (irela->r_info);
931238405Sjkim	      r_indx = ELF32_R_SYM (irela->r_info);
932238405Sjkim
933238405Sjkim	      if (r_type >= R_SPU_max)
934238405Sjkim		{
935238405Sjkim		  bfd_set_error (bfd_error_bad_value);
936238405Sjkim		  goto error_ret_free_internal;
937238405Sjkim		}
938238405Sjkim
939238405Sjkim	      /* Determine the reloc target section.  */
940238405Sjkim	      if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, ibfd))
941238405Sjkim		goto error_ret_free_internal;
942238405Sjkim
943238405Sjkim	      if (sym_sec == NULL
944238405Sjkim		  || sym_sec->output_section == NULL
945238405Sjkim		  || sym_sec->output_section->owner != output_bfd)
946238405Sjkim		continue;
947238405Sjkim
948238405Sjkim	      /* Ensure no stubs for user supplied overlay manager syms.  */
949238405Sjkim	      if (h != NULL
950238405Sjkim		  && (strcmp (h->root.root.string, "__ovly_load") == 0
951238405Sjkim		      || strcmp (h->root.root.string, "__ovly_return") == 0))
952238405Sjkim		continue;
953238405Sjkim
954238405Sjkim	      insn_type = non_branch;
955238405Sjkim	      if (r_type == R_SPU_REL16
956238405Sjkim		  || r_type == R_SPU_ADDR16)
957238405Sjkim		{
958238405Sjkim		  unsigned char insn[4];
959238405Sjkim
960238405Sjkim		  if (!bfd_get_section_contents (ibfd, section, insn,
961238405Sjkim						 irela->r_offset, 4))
962238405Sjkim		    goto error_ret_free_internal;
963238405Sjkim
964238405Sjkim		  if (is_branch (insn) || is_hint (insn))
965238405Sjkim		    {
966238405Sjkim		      insn_type = branch;
967238405Sjkim		      if ((insn[0] & 0xfd) == 0x31)
968238405Sjkim			insn_type = call;
969238405Sjkim		    }
970238405Sjkim		}
971238405Sjkim
972238405Sjkim	      /* We are only interested in function symbols.  */
973238405Sjkim	      if (h != NULL)
974238405Sjkim		{
975238405Sjkim		  sym_type = h->type;
976238405Sjkim		  sym_name = h->root.root.string;
977238405Sjkim		}
978238405Sjkim	      else
979238405Sjkim		{
980238405Sjkim		  sym_type = ELF_ST_TYPE (sym->st_info);
981238405Sjkim		  sym_name = bfd_elf_sym_name (sym_sec->owner,
982238405Sjkim					       symtab_hdr,
983238405Sjkim					       sym,
984238405Sjkim					       sym_sec);
985238405Sjkim		}
986238405Sjkim	      if (sym_type != STT_FUNC)
987238405Sjkim		{
988238405Sjkim		  /* It's common for people to write assembly and forget
989238405Sjkim		     to give function symbols the right type.  Handle
990238405Sjkim		     calls to such symbols, but warn so that (hopefully)
991238405Sjkim		     people will fix their code.  We need the symbol
992238405Sjkim		     type to be correct to distinguish function pointer
993238405Sjkim		     initialisation from other pointer initialisation.  */
994238405Sjkim		  if (insn_type == call)
995238405Sjkim		    (*_bfd_error_handler) (_("warning: call to non-function"
996238405Sjkim					     " symbol %s defined in %B"),
997238405Sjkim					   sym_sec->owner, sym_name);
998238405Sjkim		  else
999238405Sjkim		    continue;
1000238405Sjkim		}
1001238405Sjkim
1002238405Sjkim	      if (!needs_ovl_stub (sym_name, sym_sec, section, htab,
1003238405Sjkim				   insn_type != non_branch))
1004238405Sjkim		continue;
1005238405Sjkim
1006238405Sjkim	      stub_name = spu_stub_name (sym_sec, h, irela);
1007238405Sjkim	      if (stub_name == NULL)
1008238405Sjkim		goto error_ret_free_internal;
1009238405Sjkim
1010238405Sjkim	      sh = (struct spu_stub_hash_entry *)
1011238405Sjkim		bfd_hash_lookup (&htab->stub_hash_table, stub_name,
1012238405Sjkim				 TRUE, FALSE);
1013238405Sjkim	      if (sh == NULL)
1014238405Sjkim		{
1015238405Sjkim		  free (stub_name);
1016238405Sjkim		error_ret_free_internal:
1017238405Sjkim		  if (elf_section_data (section)->relocs != internal_relocs)
1018238405Sjkim		    free (internal_relocs);
1019238405Sjkim		error_ret_free_local:
1020238405Sjkim		  if (local_syms != NULL
1021238405Sjkim		      && (symtab_hdr->contents
1022238405Sjkim			  != (unsigned char *) local_syms))
1023238405Sjkim		    free (local_syms);
1024238405Sjkim		  return FALSE;
1025238405Sjkim		}
1026238405Sjkim
1027238405Sjkim	      /* If this entry isn't new, we already have a stub.  */
1028238405Sjkim	      if (sh->target_section != NULL)
1029238405Sjkim		{
1030238405Sjkim		  free (stub_name);
1031238405Sjkim		  continue;
1032238405Sjkim		}
1033238405Sjkim
1034238405Sjkim	      sh->target_section = sym_sec;
1035238405Sjkim	      if (h != NULL)
1036238405Sjkim		sh->target_off = h->root.u.def.value;
1037238405Sjkim	      else
1038238405Sjkim		sh->target_off = sym->st_value;
1039238405Sjkim	      sh->target_off += irela->r_addend;
1040238405Sjkim
1041238405Sjkim	      stubs.count += 1;
1042238405Sjkim	    }
1043238405Sjkim
1044238405Sjkim	  /* We're done with the internal relocs, free them.  */
1045238405Sjkim	  if (elf_section_data (section)->relocs != internal_relocs)
1046238405Sjkim	    free (internal_relocs);
1047238405Sjkim	}
1048238405Sjkim
1049238405Sjkim      if (local_syms != NULL
1050238405Sjkim	  && symtab_hdr->contents != (unsigned char *) local_syms)
1051238405Sjkim	{
1052238405Sjkim	  if (!info->keep_memory)
1053238405Sjkim	    free (local_syms);
1054238405Sjkim	  else
1055238405Sjkim	    symtab_hdr->contents = (unsigned char *) local_syms;
1056238405Sjkim	}
1057238405Sjkim    }
1058238405Sjkim
1059238405Sjkim  elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, &stubs);
1060238405Sjkim  if (stubs.err)
1061238405Sjkim    return FALSE;
1062238405Sjkim
1063238405Sjkim  *stub = NULL;
1064238405Sjkim  if (stubs.count == 0)
1065238405Sjkim    return TRUE;
1066238405Sjkim
1067238405Sjkim  ibfd = info->input_bfds;
1068238405Sjkim  flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1069238405Sjkim	   | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1070238405Sjkim  htab->stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1071238405Sjkim  *stub = htab->stub;
1072238405Sjkim  if (htab->stub == NULL
1073238405Sjkim      || !bfd_set_section_alignment (ibfd, htab->stub, 2))
1074238405Sjkim    return FALSE;
1075238405Sjkim
1076238405Sjkim  flags = (SEC_ALLOC | SEC_LOAD
1077238405Sjkim	   | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1078238405Sjkim  htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1079238405Sjkim  *ovtab = htab->ovtab;
1080238405Sjkim  if (htab->ovtab == NULL
1081238405Sjkim      || !bfd_set_section_alignment (ibfd, htab->stub, 4))
1082238405Sjkim    return FALSE;
1083238405Sjkim
1084238405Sjkim  *toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1085238405Sjkim  if (*toe == NULL
1086238405Sjkim      || !bfd_set_section_alignment (ibfd, *toe, 4))
1087238405Sjkim    return FALSE;
1088238405Sjkim  (*toe)->size = 16;
1089238405Sjkim
1090238405Sjkim  /* Retrieve all the stubs and sort.  */
1091238405Sjkim  stubs.sh = bfd_malloc (stubs.count * sizeof (*stubs.sh));
1092238405Sjkim  if (stubs.sh == NULL)
1093238405Sjkim    return FALSE;
1094238405Sjkim  i = stubs.count;
1095238405Sjkim  bfd_hash_traverse (&htab->stub_hash_table, populate_stubs, &stubs);
1096238405Sjkim  BFD_ASSERT (stubs.count == 0);
1097238405Sjkim
1098238405Sjkim  stubs.count = i;
1099238405Sjkim  qsort (stubs.sh, stubs.count, sizeof (*stubs.sh), sort_stubs);
1100238405Sjkim
1101238405Sjkim  /* Now that the stubs are sorted, place them in the stub section.
1102238405Sjkim     Stubs are grouped per overlay
1103238405Sjkim     .	    ila $79,func1
1104238405Sjkim     .	    br 1f
1105238405Sjkim     .	    ila $79,func2
1106238405Sjkim     .	    br 1f
1107238405Sjkim     .
1108238405Sjkim     .
1109238405Sjkim     .	    ila $79,funcn
1110238405Sjkim     .	    nop
1111238405Sjkim     .	1:
1112238405Sjkim     .	    ila $78,ovl_index
1113238405Sjkim     .	    br __ovly_load  */
1114238405Sjkim
1115238405Sjkim  group = 0;
1116238405Sjkim  for (i = 0; i < stubs.count; i++)
1117238405Sjkim    {
1118238405Sjkim      if (spu_elf_section_data (stubs.sh[group]->target_section
1119238405Sjkim				->output_section)->ovl_index
1120238405Sjkim	  != spu_elf_section_data (stubs.sh[i]->target_section
1121238405Sjkim				   ->output_section)->ovl_index)
1122238405Sjkim	{
1123238405Sjkim	  htab->stub->size += SIZEOF_STUB2;
1124238405Sjkim	  for (; group != i; group++)
1125238405Sjkim	    stubs.sh[group]->delta
1126238405Sjkim	      = stubs.sh[i - 1]->off - stubs.sh[group]->off;
1127238405Sjkim	}
1128238405Sjkim      if (group == i
1129238405Sjkim	  || ((stubs.sh[i - 1]->target_section->output_section->vma
1130238405Sjkim	       + stubs.sh[i - 1]->target_section->output_offset
1131238405Sjkim	       + stubs.sh[i - 1]->target_off)
1132238405Sjkim	      != (stubs.sh[i]->target_section->output_section->vma
1133238405Sjkim		  + stubs.sh[i]->target_section->output_offset
1134238405Sjkim		  + stubs.sh[i]->target_off)))
1135238405Sjkim	{
1136238405Sjkim	  stubs.sh[i]->off = htab->stub->size;
1137238405Sjkim	  htab->stub->size += SIZEOF_STUB1;
1138238405Sjkim	}
1139238405Sjkim      else
1140238405Sjkim	stubs.sh[i]->off = stubs.sh[i - 1]->off;
1141238405Sjkim    }
1142238405Sjkim  if (group != i)
1143238405Sjkim    htab->stub->size += SIZEOF_STUB2;
1144238405Sjkim  for (; group != i; group++)
1145238405Sjkim    stubs.sh[group]->delta = stubs.sh[i - 1]->off - stubs.sh[group]->off;
1146238405Sjkim
1147238405Sjkim /* htab->ovtab consists of two arrays.
1148238405Sjkim    .	struct {
1149238405Sjkim    .	  u32 vma;
1150238405Sjkim    .	  u32 size;
1151238405Sjkim    .	  u32 file_off;
1152238405Sjkim    .	  u32 buf;
1153238405Sjkim    .	} _ovly_table[];
1154238405Sjkim    .
1155238405Sjkim    .	struct {
1156238405Sjkim    .	  u32 mapped;
1157238405Sjkim    .	} _ovly_buf_table[];  */
1158238405Sjkim
1159238405Sjkim  htab->ovtab->alignment_power = 4;
1160238405Sjkim  htab->ovtab->size = htab->num_overlays * 16 + htab->num_buf * 4;
1161238405Sjkim
1162238405Sjkim  return TRUE;
1163238405Sjkim}
1164238405Sjkim
1165238405Sjkim/* Functions to handle embedded spu_ovl.o object.  */
1166238405Sjkim
1167238405Sjkimstatic void *
1168238405Sjkimovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1169238405Sjkim{
1170238405Sjkim  return stream;
1171238405Sjkim}
1172238405Sjkim
1173238405Sjkimstatic file_ptr
1174238405Sjkimovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1175238405Sjkim	       void *stream,
1176238405Sjkim	       void *buf,
1177238405Sjkim	       file_ptr nbytes,
1178238405Sjkim	       file_ptr offset)
1179238405Sjkim{
1180238405Sjkim  struct _ovl_stream *os;
1181238405Sjkim  size_t count;
1182238405Sjkim  size_t max;
1183238405Sjkim
1184238405Sjkim  os = (struct _ovl_stream *) stream;
1185238405Sjkim  max = (const char *) os->end - (const char *) os->start;
1186238405Sjkim
1187238405Sjkim  if ((ufile_ptr) offset >= max)
1188238405Sjkim    return 0;
1189238405Sjkim
1190238405Sjkim  count = nbytes;
1191238405Sjkim  if (count > max - offset)
1192238405Sjkim    count = max - offset;
1193238405Sjkim
1194238405Sjkim  memcpy (buf, (const char *) os->start + offset, count);
1195238405Sjkim  return count;
1196238405Sjkim}
1197238405Sjkim
1198238405Sjkimbfd_boolean
1199238405Sjkimspu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1200238405Sjkim{
1201238405Sjkim  *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1202238405Sjkim			      "elf32-spu",
1203238405Sjkim			      ovl_mgr_open,
1204238405Sjkim			      (void *) stream,
1205238405Sjkim			      ovl_mgr_pread,
1206238405Sjkim			      NULL,
1207238405Sjkim			      NULL);
1208238405Sjkim  return *ovl_bfd != NULL;
1209238405Sjkim}
1210238405Sjkim
1211238405Sjkim/* Fill in the ila and br for a stub.  On the last stub for a group,
1212238405Sjkim   write the stub that sets the overlay number too.  */
1213238405Sjkim
1214238405Sjkimstatic bfd_boolean
1215238405Sjkimwrite_one_stub (struct bfd_hash_entry *bh, void *inf)
1216238405Sjkim{
1217238405Sjkim  struct spu_stub_hash_entry *ent = (struct spu_stub_hash_entry *) bh;
1218238405Sjkim  struct spu_link_hash_table *htab = inf;
1219238405Sjkim  asection *sec = htab->stub;
1220238405Sjkim  asection *s = ent->target_section;
1221238405Sjkim  unsigned int ovl;
1222238405Sjkim  bfd_vma val;
1223238405Sjkim
1224238405Sjkim  val = ent->target_off + s->output_offset + s->output_section->vma;
1225238405Sjkim  bfd_put_32 (sec->owner, ILA_79 + ((val << 7) & 0x01ffff80),
1226238405Sjkim	      sec->contents + ent->off);
1227238405Sjkim  val = ent->delta + 4;
1228238405Sjkim  bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1229238405Sjkim	      sec->contents + ent->off + 4);
1230238405Sjkim
1231238405Sjkim  /* If this is the last stub of this group, write stub2.  */
1232238405Sjkim  if (ent->delta == 0)
1233238405Sjkim    {
1234238405Sjkim      bfd_put_32 (sec->owner, NOP,
1235238405Sjkim		  sec->contents + ent->off + 4);
1236238405Sjkim
1237238405Sjkim      ovl = spu_elf_section_data (s->output_section)->ovl_index;
1238238405Sjkim      bfd_put_32 (sec->owner, ILA_78 + ((ovl << 7) & 0x01ffff80),
1239238405Sjkim		  sec->contents + ent->off + 8);
1240238405Sjkim
1241238405Sjkim      val = (htab->ovly_load->root.u.def.section->output_section->vma
1242238405Sjkim	     + htab->ovly_load->root.u.def.section->output_offset
1243238405Sjkim	     + htab->ovly_load->root.u.def.value
1244238405Sjkim	     - (sec->output_section->vma
1245238405Sjkim		+ sec->output_offset
1246238405Sjkim		+ ent->off + 12));
1247238405Sjkim
1248238405Sjkim      if (val + 0x20000 >= 0x40000)
1249238405Sjkim	htab->stub_overflow = TRUE;
1250238405Sjkim
1251238405Sjkim      bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1252238405Sjkim		  sec->contents + ent->off + 12);
1253238405Sjkim    }
1254238405Sjkim
1255238405Sjkim  if (htab->emit_stub_syms)
1256238405Sjkim    {
1257238405Sjkim      struct elf_link_hash_entry *h;
1258238405Sjkim      size_t len1, len2;
1259238405Sjkim      char *name;
1260238405Sjkim
1261238405Sjkim      len1 = sizeof ("00000000.ovl_call.") - 1;
1262238405Sjkim      len2 = strlen (ent->root.string);
1263238405Sjkim      name = bfd_malloc (len1 + len2 + 1);
1264238405Sjkim      if (name == NULL)
1265238405Sjkim	return FALSE;
1266238405Sjkim      memcpy (name, "00000000.ovl_call.", len1);
1267238405Sjkim      memcpy (name + len1, ent->root.string, len2 + 1);
1268238405Sjkim      h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1269238405Sjkim      free (name);
1270238405Sjkim      if (h == NULL)
1271238405Sjkim	return FALSE;
1272238405Sjkim      if (h->root.type == bfd_link_hash_new)
1273238405Sjkim	{
1274238405Sjkim	  h->root.type = bfd_link_hash_defined;
1275238405Sjkim	  h->root.u.def.section = sec;
1276238405Sjkim	  h->root.u.def.value = ent->off;
1277238405Sjkim	  h->size = (ent->delta == 0
1278238405Sjkim		     ? SIZEOF_STUB1 + SIZEOF_STUB2 : SIZEOF_STUB1);
1279238405Sjkim	  h->type = STT_FUNC;
1280238405Sjkim	  h->ref_regular = 1;
1281238405Sjkim	  h->def_regular = 1;
1282238405Sjkim	  h->ref_regular_nonweak = 1;
1283238405Sjkim	  h->forced_local = 1;
1284238405Sjkim	  h->non_elf = 0;
1285238405Sjkim	}
1286238405Sjkim    }
1287238405Sjkim
1288238405Sjkim  return TRUE;
1289238405Sjkim}
1290238405Sjkim
1291238405Sjkim/* Define an STT_OBJECT symbol.  */
1292238405Sjkim
1293238405Sjkimstatic struct elf_link_hash_entry *
1294238405Sjkimdefine_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1295238405Sjkim{
1296238405Sjkim  struct elf_link_hash_entry *h;
1297238405Sjkim
1298238405Sjkim  h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1299238405Sjkim  if (h == NULL)
1300238405Sjkim    return NULL;
1301238405Sjkim
1302238405Sjkim  if (h->root.type != bfd_link_hash_defined
1303238405Sjkim      || !h->def_regular)
1304238405Sjkim    {
1305238405Sjkim      h->root.type = bfd_link_hash_defined;
1306238405Sjkim      h->root.u.def.section = htab->ovtab;
1307238405Sjkim      h->type = STT_OBJECT;
1308238405Sjkim      h->ref_regular = 1;
1309238405Sjkim      h->def_regular = 1;
1310238405Sjkim      h->ref_regular_nonweak = 1;
1311238405Sjkim      h->non_elf = 0;
1312238405Sjkim    }
1313238405Sjkim  else
1314238405Sjkim    {
1315238405Sjkim      (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1316238405Sjkim			     h->root.u.def.section->owner,
1317238405Sjkim			     h->root.root.string);
1318238405Sjkim      bfd_set_error (bfd_error_bad_value);
1319238405Sjkim      return NULL;
1320238405Sjkim    }
1321238405Sjkim
1322238405Sjkim  return h;
1323238405Sjkim}
1324238405Sjkim
1325238405Sjkim/* Fill in all stubs and the overlay tables.  */
1326238405Sjkim
1327238405Sjkimbfd_boolean
1328238405Sjkimspu_elf_build_stubs (struct bfd_link_info *info, int emit_syms, asection *toe)
1329238405Sjkim{
1330238405Sjkim  struct spu_link_hash_table *htab = spu_hash_table (info);
1331238405Sjkim  struct elf_link_hash_entry *h;
1332238405Sjkim  bfd_byte *p;
1333238405Sjkim  asection *s;
1334238405Sjkim  bfd *obfd;
1335238405Sjkim  unsigned int i;
1336238405Sjkim
1337238405Sjkim  htab->emit_stub_syms = emit_syms;
1338238405Sjkim  htab->stub->contents = bfd_zalloc (htab->stub->owner, htab->stub->size);
1339238405Sjkim  if (htab->stub->contents == NULL)
1340238405Sjkim    return FALSE;
1341238405Sjkim
1342238405Sjkim  h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1343238405Sjkim  htab->ovly_load = h;
1344238405Sjkim  BFD_ASSERT (h != NULL
1345238405Sjkim	      && (h->root.type == bfd_link_hash_defined
1346238405Sjkim		  || h->root.type == bfd_link_hash_defweak)
1347238405Sjkim	      && h->def_regular);
1348238405Sjkim
1349238405Sjkim  s = h->root.u.def.section->output_section;
1350238405Sjkim  if (spu_elf_section_data (s)->ovl_index)
1351238405Sjkim    {
1352238405Sjkim      (*_bfd_error_handler) (_("%s in overlay section"),
1353238405Sjkim			     h->root.u.def.section->owner);
1354238405Sjkim      bfd_set_error (bfd_error_bad_value);
1355238405Sjkim      return FALSE;
1356238405Sjkim    }
1357238405Sjkim
1358238405Sjkim  /* Write out all the stubs.  */
1359238405Sjkim  bfd_hash_traverse (&htab->stub_hash_table, write_one_stub, htab);
1360238405Sjkim
1361238405Sjkim  if (htab->stub_overflow)
1362238405Sjkim    {
1363238405Sjkim      (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1364238405Sjkim      bfd_set_error (bfd_error_bad_value);
1365238405Sjkim      return FALSE;
1366238405Sjkim    }
1367238405Sjkim
1368238405Sjkim  htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1369238405Sjkim  if (htab->ovtab->contents == NULL)
1370238405Sjkim    return FALSE;
1371238405Sjkim
1372238405Sjkim  /* Write out _ovly_table.  */
1373238405Sjkim  p = htab->ovtab->contents;
1374238405Sjkim  obfd = htab->ovtab->output_section->owner;
1375238405Sjkim  for (s = obfd->sections; s != NULL; s = s->next)
1376238405Sjkim    {
1377238405Sjkim      unsigned int ovl_index = spu_elf_section_data (s)->ovl_index;
1378238405Sjkim
1379238405Sjkim      if (ovl_index != 0)
1380238405Sjkim	{
1381238405Sjkim	  unsigned int lo, hi, mid;
1382238405Sjkim	  unsigned long off = (ovl_index - 1) * 16;
1383238405Sjkim	  bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1384238405Sjkim	  bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1385238405Sjkim	  /* file_off written later in spu_elf_modify_program_headers.  */
1386238405Sjkim
1387238405Sjkim	  lo = 0;
1388238405Sjkim	  hi = htab->num_buf;
1389238405Sjkim	  while (lo < hi)
1390238405Sjkim	    {
1391238405Sjkim	      mid = (lo + hi) >> 1;
1392238405Sjkim	      if (htab->ovl_region[2 * mid + 1]->vma
1393238405Sjkim		  + htab->ovl_region[2 * mid + 1]->size <= s->vma)
1394238405Sjkim		lo = mid + 1;
1395238405Sjkim	      else if (htab->ovl_region[2 * mid]->vma > s->vma)
1396238405Sjkim		hi = mid;
1397238405Sjkim	      else
1398238405Sjkim		{
1399238405Sjkim		  bfd_put_32 (htab->ovtab->owner, mid + 1, p + off + 12);
1400238405Sjkim		  break;
1401238405Sjkim		}
1402238405Sjkim	    }
1403238405Sjkim	  BFD_ASSERT (lo < hi);
1404238405Sjkim	}
1405238405Sjkim    }
1406238405Sjkim
1407238405Sjkim  /* Write out _ovly_buf_table.  */
1408238405Sjkim  p = htab->ovtab->contents + htab->num_overlays * 16;
1409238405Sjkim  for (i = 0; i < htab->num_buf; i++)
1410238405Sjkim    {
1411238405Sjkim      bfd_put_32 (htab->ovtab->owner, 0, p);
1412238405Sjkim      p += 4;
1413238405Sjkim    }
1414238405Sjkim
1415238405Sjkim  h = define_ovtab_symbol (htab, "_ovly_table");
1416238405Sjkim  if (h == NULL)
1417238405Sjkim    return FALSE;
1418238405Sjkim  h->root.u.def.value = 0;
1419238405Sjkim  h->size = htab->num_overlays * 16;
1420238405Sjkim
1421238405Sjkim  h = define_ovtab_symbol (htab, "_ovly_table_end");
1422238405Sjkim  if (h == NULL)
1423238405Sjkim    return FALSE;
1424238405Sjkim  h->root.u.def.value = htab->num_overlays * 16;
1425238405Sjkim  h->size = 0;
1426238405Sjkim
1427238405Sjkim  h = define_ovtab_symbol (htab, "_ovly_buf_table");
1428238405Sjkim  if (h == NULL)
1429238405Sjkim    return FALSE;
1430238405Sjkim  h->root.u.def.value = htab->num_overlays * 16;
1431238405Sjkim  h->size = htab->num_buf * 4;
1432238405Sjkim
1433238405Sjkim  h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1434238405Sjkim  if (h == NULL)
1435238405Sjkim    return FALSE;
1436238405Sjkim  h->root.u.def.value = htab->num_overlays * 16 + htab->num_buf * 4;
1437238405Sjkim  h->size = 0;
1438238405Sjkim
1439238405Sjkim  h = define_ovtab_symbol (htab, "_EAR_");
1440238405Sjkim  if (h == NULL)
1441238405Sjkim    return FALSE;
1442238405Sjkim  h->root.u.def.section = toe;
1443238405Sjkim  h->root.u.def.value = 0;
1444238405Sjkim  h->size = 16;
1445238405Sjkim
1446238405Sjkim  return TRUE;
1447238405Sjkim}
1448238405Sjkim
1449238405Sjkim/* OFFSET in SEC (presumably) is the beginning of a function prologue.
1450238405Sjkim   Search for stack adjusting insns, and return the sp delta.  */
1451238405Sjkim
1452238405Sjkimstatic int
1453238405Sjkimfind_function_stack_adjust (asection *sec, bfd_vma offset)
1454238405Sjkim{
1455238405Sjkim  int unrecog;
1456238405Sjkim  int reg[128];
1457238405Sjkim
1458238405Sjkim  memset (reg, 0, sizeof (reg));
1459238405Sjkim  for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
1460238405Sjkim    {
1461238405Sjkim      unsigned char buf[4];
1462238405Sjkim      int rt, ra;
1463238405Sjkim      int imm;
1464238405Sjkim
1465238405Sjkim      /* Assume no relocs on stack adjusing insns.  */
1466238405Sjkim      if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1467238405Sjkim	break;
1468238405Sjkim
1469238405Sjkim      if (buf[0] == 0x24 /* stqd */)
1470238405Sjkim	continue;
1471238405Sjkim
1472238405Sjkim      rt = buf[3] & 0x7f;
1473238405Sjkim      ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1474238405Sjkim      /* Partly decoded immediate field.  */
1475238405Sjkim      imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1476238405Sjkim
1477238405Sjkim      if (buf[0] == 0x1c /* ai */)
1478238405Sjkim	{
1479238405Sjkim	  imm >>= 7;
1480238405Sjkim	  imm = (imm ^ 0x200) - 0x200;
1481238405Sjkim	  reg[rt] = reg[ra] + imm;
1482238405Sjkim
1483238405Sjkim	  if (rt == 1 /* sp */)
1484238405Sjkim	    {
1485238405Sjkim	      if (imm > 0)
1486238405Sjkim		break;
1487238405Sjkim	      return reg[rt];
1488238405Sjkim	    }
1489238405Sjkim	}
1490238405Sjkim      else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1491238405Sjkim	{
1492238405Sjkim	  int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1493238405Sjkim
1494238405Sjkim	  reg[rt] = reg[ra] + reg[rb];
1495238405Sjkim	  if (rt == 1)
1496238405Sjkim	    return reg[rt];
1497238405Sjkim	}
1498238405Sjkim      else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1499238405Sjkim	{
1500238405Sjkim	  if (buf[0] >= 0x42 /* ila */)
1501238405Sjkim	    imm |= (buf[0] & 1) << 17;
1502238405Sjkim	  else
1503238405Sjkim	    {
1504238405Sjkim	      imm &= 0xffff;
1505238405Sjkim
1506238405Sjkim	      if (buf[0] == 0x40 /* il */)
1507238405Sjkim		{
1508238405Sjkim		  if ((buf[1] & 0x80) == 0)
1509238405Sjkim		    goto unknown_insn;
1510238405Sjkim		  imm = (imm ^ 0x8000) - 0x8000;
1511238405Sjkim		}
1512238405Sjkim	      else if ((buf[1] & 0x80) == 0 /* ilhu */)
1513238405Sjkim		imm <<= 16;
1514238405Sjkim	    }
1515238405Sjkim	  reg[rt] = imm;
1516238405Sjkim	  continue;
1517238405Sjkim	}
1518238405Sjkim      else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1519238405Sjkim	{
1520238405Sjkim	  reg[rt] |= imm & 0xffff;
1521238405Sjkim	  continue;
1522238405Sjkim	}
1523238405Sjkim      else if (buf[0] == 0x04 /* ori */)
1524238405Sjkim	{
1525238405Sjkim	  imm >>= 7;
1526238405Sjkim	  imm = (imm ^ 0x200) - 0x200;
1527238405Sjkim	  reg[rt] = reg[ra] | imm;
1528238405Sjkim	  continue;
1529238405Sjkim	}
1530238405Sjkim      else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1531238405Sjkim	       || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
1532238405Sjkim	{
1533238405Sjkim	  /* Used in pic reg load.  Say rt is trashed.  */
1534238405Sjkim	  reg[rt] = 0;
1535238405Sjkim	  continue;
1536238405Sjkim	}
1537238405Sjkim      else if (is_branch (buf))
1538238405Sjkim	/* If we hit a branch then we must be out of the prologue.  */
1539238405Sjkim	break;
1540238405Sjkim    unknown_insn:
1541238405Sjkim      ++unrecog;
1542238405Sjkim    }
1543238405Sjkim
1544238405Sjkim  return 0;
1545238405Sjkim}
1546238405Sjkim
1547238405Sjkim/* qsort predicate to sort symbols by section and value.  */
1548238405Sjkim
1549238405Sjkimstatic Elf_Internal_Sym *sort_syms_syms;
1550238405Sjkimstatic asection **sort_syms_psecs;
1551238405Sjkim
1552238405Sjkimstatic int
1553238405Sjkimsort_syms (const void *a, const void *b)
1554238405Sjkim{
1555238405Sjkim  Elf_Internal_Sym *const *s1 = a;
1556238405Sjkim  Elf_Internal_Sym *const *s2 = b;
1557238405Sjkim  asection *sec1,*sec2;
1558238405Sjkim  bfd_signed_vma delta;
1559238405Sjkim
1560238405Sjkim  sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1561238405Sjkim  sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1562238405Sjkim
1563238405Sjkim  if (sec1 != sec2)
1564238405Sjkim    return sec1->index - sec2->index;
1565238405Sjkim
1566238405Sjkim  delta = (*s1)->st_value - (*s2)->st_value;
1567238405Sjkim  if (delta != 0)
1568238405Sjkim    return delta < 0 ? -1 : 1;
1569238405Sjkim
1570238405Sjkim  delta = (*s2)->st_size - (*s1)->st_size;
1571238405Sjkim  if (delta != 0)
1572238405Sjkim    return delta < 0 ? -1 : 1;
1573238405Sjkim
1574238405Sjkim  return *s1 < *s2 ? -1 : 1;
1575238405Sjkim}
1576238405Sjkim
1577238405Sjkimstruct call_info
1578238405Sjkim{
1579238405Sjkim  struct function_info *fun;
1580238405Sjkim  struct call_info *next;
1581238405Sjkim  int is_tail;
1582238405Sjkim};
1583238405Sjkim
1584238405Sjkimstruct function_info
1585238405Sjkim{
1586238405Sjkim  /* List of functions called.  Also branches to hot/cold part of
1587238405Sjkim     function.  */
1588238405Sjkim  struct call_info *call_list;
1589238405Sjkim  /* For hot/cold part of function, point to owner.  */
1590238405Sjkim  struct function_info *start;
1591238405Sjkim  /* Symbol at start of function.  */
1592238405Sjkim  union {
1593238405Sjkim    Elf_Internal_Sym *sym;
1594238405Sjkim    struct elf_link_hash_entry *h;
1595238405Sjkim  } u;
1596238405Sjkim  /* Function section.  */
1597238405Sjkim  asection *sec;
1598238405Sjkim  /* Address range of (this part of) function.  */
1599238405Sjkim  bfd_vma lo, hi;
1600238405Sjkim  /* Stack usage.  */
1601238405Sjkim  int stack;
1602238405Sjkim  /* Set if global symbol.  */
1603238405Sjkim  unsigned int global : 1;
1604238405Sjkim  /* Set if known to be start of function (as distinct from a hunk
1605238405Sjkim     in hot/cold section.  */
1606238405Sjkim  unsigned int is_func : 1;
1607238405Sjkim  /* Flags used during call tree traversal.  */
1608238405Sjkim  unsigned int visit1 : 1;
1609238405Sjkim  unsigned int non_root : 1;
1610238405Sjkim  unsigned int visit2 : 1;
1611238405Sjkim  unsigned int marking : 1;
1612238405Sjkim  unsigned int visit3 : 1;
1613238405Sjkim};
1614238405Sjkim
1615238405Sjkimstruct spu_elf_stack_info
1616238405Sjkim{
1617238405Sjkim  int num_fun;
1618238405Sjkim  int max_fun;
1619238405Sjkim  /* Variable size array describing functions, one per contiguous
1620238405Sjkim     address range belonging to a function.  */
1621238405Sjkim  struct function_info fun[1];
1622238405Sjkim};
1623238405Sjkim
1624238405Sjkim/* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1625238405Sjkim   entries for section SEC.  */
1626238405Sjkim
1627238405Sjkimstatic struct spu_elf_stack_info *
1628238405Sjkimalloc_stack_info (asection *sec, int max_fun)
1629238405Sjkim{
1630238405Sjkim  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1631238405Sjkim  bfd_size_type amt;
1632238405Sjkim
1633238405Sjkim  amt = sizeof (struct spu_elf_stack_info);
1634238405Sjkim  amt += (max_fun - 1) * sizeof (struct function_info);
1635238405Sjkim  sec_data->stack_info = bfd_zmalloc (amt);
1636238405Sjkim  if (sec_data->stack_info != NULL)
1637238405Sjkim    sec_data->stack_info->max_fun = max_fun;
1638238405Sjkim  return sec_data->stack_info;
1639238405Sjkim}
1640238405Sjkim
1641238405Sjkim/* Add a new struct function_info describing a (part of a) function
1642238405Sjkim   starting at SYM_H.  Keep the array sorted by address.  */
1643238405Sjkim
1644238405Sjkimstatic struct function_info *
1645238405Sjkimmaybe_insert_function (asection *sec,
1646238405Sjkim		       void *sym_h,
1647238405Sjkim		       bfd_boolean global,
1648238405Sjkim		       bfd_boolean is_func)
1649238405Sjkim{
1650238405Sjkim  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1651238405Sjkim  struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1652238405Sjkim  int i;
1653238405Sjkim  bfd_vma off, size;
1654238405Sjkim
1655238405Sjkim  if (sinfo == NULL)
1656238405Sjkim    {
1657238405Sjkim      sinfo = alloc_stack_info (sec, 20);
1658238405Sjkim      if (sinfo == NULL)
1659238405Sjkim	return NULL;
1660238405Sjkim    }
1661238405Sjkim
1662238405Sjkim  if (!global)
1663238405Sjkim    {
1664238405Sjkim      Elf_Internal_Sym *sym = sym_h;
1665238405Sjkim      off = sym->st_value;
1666238405Sjkim      size = sym->st_size;
1667238405Sjkim    }
1668238405Sjkim  else
1669238405Sjkim    {
1670238405Sjkim      struct elf_link_hash_entry *h = sym_h;
1671238405Sjkim      off = h->root.u.def.value;
1672238405Sjkim      size = h->size;
1673238405Sjkim    }
1674238405Sjkim
1675238405Sjkim  for (i = sinfo->num_fun; --i >= 0; )
1676238405Sjkim    if (sinfo->fun[i].lo <= off)
1677238405Sjkim      break;
1678238405Sjkim
1679238405Sjkim  if (i >= 0)
1680238405Sjkim    {
1681238405Sjkim      /* Don't add another entry for an alias, but do update some
1682238405Sjkim	 info.  */
1683238405Sjkim      if (sinfo->fun[i].lo == off)
1684238405Sjkim	{
1685238405Sjkim	  /* Prefer globals over local syms.  */
1686238405Sjkim	  if (global && !sinfo->fun[i].global)
1687238405Sjkim	    {
1688238405Sjkim	      sinfo->fun[i].global = TRUE;
1689238405Sjkim	      sinfo->fun[i].u.h = sym_h;
1690238405Sjkim	    }
1691238405Sjkim	  if (is_func)
1692238405Sjkim	    sinfo->fun[i].is_func = TRUE;
1693238405Sjkim	  return &sinfo->fun[i];
1694238405Sjkim	}
1695238405Sjkim      /* Ignore a zero-size symbol inside an existing function.  */
1696238405Sjkim      else if (sinfo->fun[i].hi > off && size == 0)
1697238405Sjkim	return &sinfo->fun[i];
1698238405Sjkim    }
1699238405Sjkim
1700238405Sjkim  if (++i < sinfo->num_fun)
1701238405Sjkim    memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1702238405Sjkim	     (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
1703238405Sjkim  else if (i >= sinfo->max_fun)
1704238405Sjkim    {
1705238405Sjkim      bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1706238405Sjkim      bfd_size_type old = amt;
1707238405Sjkim
1708238405Sjkim      old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1709238405Sjkim      sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1710238405Sjkim      amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1711238405Sjkim      sinfo = bfd_realloc (sinfo, amt);
1712238405Sjkim      if (sinfo == NULL)
1713238405Sjkim	return NULL;
1714238405Sjkim      memset ((char *) sinfo + old, 0, amt - old);
1715238405Sjkim      sec_data->stack_info = sinfo;
1716238405Sjkim    }
1717238405Sjkim  sinfo->fun[i].is_func = is_func;
1718238405Sjkim  sinfo->fun[i].global = global;
1719238405Sjkim  sinfo->fun[i].sec = sec;
1720238405Sjkim  if (global)
1721238405Sjkim    sinfo->fun[i].u.h = sym_h;
1722238405Sjkim  else
1723238405Sjkim    sinfo->fun[i].u.sym = sym_h;
1724238405Sjkim  sinfo->fun[i].lo = off;
1725238405Sjkim  sinfo->fun[i].hi = off + size;
1726238405Sjkim  sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1727238405Sjkim  sinfo->num_fun += 1;
1728238405Sjkim  return &sinfo->fun[i];
1729238405Sjkim}
1730238405Sjkim
1731238405Sjkim/* Return the name of FUN.  */
1732238405Sjkim
1733238405Sjkimstatic const char *
1734238405Sjkimfunc_name (struct function_info *fun)
1735238405Sjkim{
1736238405Sjkim  asection *sec;
1737238405Sjkim  bfd *ibfd;
1738238405Sjkim  Elf_Internal_Shdr *symtab_hdr;
1739238405Sjkim
1740238405Sjkim  while (fun->start != NULL)
1741238405Sjkim    fun = fun->start;
1742238405Sjkim
1743238405Sjkim  if (fun->global)
1744238405Sjkim    return fun->u.h->root.root.string;
1745238405Sjkim
1746238405Sjkim  sec = fun->sec;
1747238405Sjkim  if (fun->u.sym->st_name == 0)
1748238405Sjkim    {
1749238405Sjkim      size_t len = strlen (sec->name);
1750238405Sjkim      char *name = bfd_malloc (len + 10);
1751238405Sjkim      if (name == NULL)
1752238405Sjkim	return "(null)";
1753238405Sjkim      sprintf (name, "%s+%lx", sec->name,
1754238405Sjkim	       (unsigned long) fun->u.sym->st_value & 0xffffffff);
1755238405Sjkim      return name;
1756238405Sjkim    }
1757238405Sjkim  ibfd = sec->owner;
1758238405Sjkim  symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1759238405Sjkim  return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1760238405Sjkim}
1761238405Sjkim
1762238405Sjkim/* Read the instruction at OFF in SEC.  Return true iff the instruction
1763238405Sjkim   is a nop, lnop, or stop 0 (all zero insn).  */
1764238405Sjkim
1765238405Sjkimstatic bfd_boolean
1766238405Sjkimis_nop (asection *sec, bfd_vma off)
1767238405Sjkim{
1768238405Sjkim  unsigned char insn[4];
1769238405Sjkim
1770238405Sjkim  if (off + 4 > sec->size
1771238405Sjkim      || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1772238405Sjkim    return FALSE;
1773238405Sjkim  if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1774238405Sjkim    return TRUE;
1775238405Sjkim  if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1776238405Sjkim    return TRUE;
1777238405Sjkim  return FALSE;
1778238405Sjkim}
1779238405Sjkim
1780238405Sjkim/* Extend the range of FUN to cover nop padding up to LIMIT.
1781238405Sjkim   Return TRUE iff some instruction other than a NOP was found.  */
1782238405Sjkim
1783238405Sjkimstatic bfd_boolean
1784238405Sjkiminsns_at_end (struct function_info *fun, bfd_vma limit)
1785238405Sjkim{
1786238405Sjkim  bfd_vma off = (fun->hi + 3) & -4;
1787238405Sjkim
1788238405Sjkim  while (off < limit && is_nop (fun->sec, off))
1789238405Sjkim    off += 4;
1790238405Sjkim  if (off < limit)
1791238405Sjkim    {
1792238405Sjkim      fun->hi = off;
1793238405Sjkim      return TRUE;
1794238405Sjkim    }
1795238405Sjkim  fun->hi = limit;
1796238405Sjkim  return FALSE;
1797238405Sjkim}
1798238405Sjkim
1799238405Sjkim/* Check and fix overlapping function ranges.  Return TRUE iff there
1800238405Sjkim   are gaps in the current info we have about functions in SEC.  */
1801238405Sjkim
1802238405Sjkimstatic bfd_boolean
1803238405Sjkimcheck_function_ranges (asection *sec, struct bfd_link_info *info)
1804238405Sjkim{
1805238405Sjkim  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1806238405Sjkim  struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1807238405Sjkim  int i;
1808238405Sjkim  bfd_boolean gaps = FALSE;
1809238405Sjkim
1810238405Sjkim  if (sinfo == NULL)
1811238405Sjkim    return FALSE;
1812238405Sjkim
1813238405Sjkim  for (i = 1; i < sinfo->num_fun; i++)
1814238405Sjkim    if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1815238405Sjkim      {
1816238405Sjkim	/* Fix overlapping symbols.  */
1817238405Sjkim	const char *f1 = func_name (&sinfo->fun[i - 1]);
1818238405Sjkim	const char *f2 = func_name (&sinfo->fun[i]);
1819238405Sjkim
1820238405Sjkim	info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1821238405Sjkim	sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1822238405Sjkim      }
1823238405Sjkim    else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1824238405Sjkim      gaps = TRUE;
1825238405Sjkim
1826238405Sjkim  if (sinfo->num_fun == 0)
1827238405Sjkim    gaps = TRUE;
1828238405Sjkim  else
1829238405Sjkim    {
1830238405Sjkim      if (sinfo->fun[0].lo != 0)
1831238405Sjkim	gaps = TRUE;
1832238405Sjkim      if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1833238405Sjkim	{
1834238405Sjkim	  const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1835238405Sjkim
1836238405Sjkim	  info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1837238405Sjkim	  sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1838238405Sjkim	}
1839238405Sjkim      else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1840238405Sjkim	gaps = TRUE;
1841238405Sjkim    }
1842238405Sjkim  return gaps;
1843238405Sjkim}
1844238405Sjkim
1845238405Sjkim/* Search current function info for a function that contains address
1846238405Sjkim   OFFSET in section SEC.  */
1847238405Sjkim
1848238405Sjkimstatic struct function_info *
1849238405Sjkimfind_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
1850238405Sjkim{
1851238405Sjkim  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1852238405Sjkim  struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1853238405Sjkim  int lo, hi, mid;
1854238405Sjkim
1855238405Sjkim  lo = 0;
1856238405Sjkim  hi = sinfo->num_fun;
1857238405Sjkim  while (lo < hi)
1858238405Sjkim    {
1859238405Sjkim      mid = (lo + hi) / 2;
1860238405Sjkim      if (offset < sinfo->fun[mid].lo)
1861238405Sjkim	hi = mid;
1862238405Sjkim      else if (offset >= sinfo->fun[mid].hi)
1863238405Sjkim	lo = mid + 1;
1864238405Sjkim      else
1865238405Sjkim	return &sinfo->fun[mid];
1866238405Sjkim    }
1867238405Sjkim  info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
1868238405Sjkim			  sec, offset);
1869238405Sjkim  return NULL;
1870238405Sjkim}
1871238405Sjkim
1872238405Sjkim/* Add CALLEE to CALLER call list if not already present.  */
1873238405Sjkim
1874238405Sjkimstatic bfd_boolean
1875238405Sjkiminsert_callee (struct function_info *caller, struct call_info *callee)
1876238405Sjkim{
1877238405Sjkim  struct call_info *p;
1878238405Sjkim  for (p = caller->call_list; p != NULL; p = p->next)
1879238405Sjkim    if (p->fun == callee->fun)
1880238405Sjkim      {
1881238405Sjkim	/* Tail calls use less stack than normal calls.  Retain entry
1882238405Sjkim	   for normal call over one for tail call.  */
1883238405Sjkim	if (p->is_tail > callee->is_tail)
1884238405Sjkim	  p->is_tail = callee->is_tail;
1885238405Sjkim	return FALSE;
1886238405Sjkim      }
1887238405Sjkim  callee->next = caller->call_list;
1888238405Sjkim  caller->call_list = callee;
1889238405Sjkim  return TRUE;
1890238405Sjkim}
1891238405Sjkim
1892238405Sjkim/* Rummage through the relocs for SEC, looking for function calls.
1893238405Sjkim   If CALL_TREE is true, fill in call graph.  If CALL_TREE is false,
1894238405Sjkim   mark destination symbols on calls as being functions.  Also
1895238405Sjkim   look at branches, which may be tail calls or go to hot/cold
1896238405Sjkim   section part of same function.  */
1897238405Sjkim
1898238405Sjkimstatic bfd_boolean
1899238405Sjkimmark_functions_via_relocs (asection *sec,
1900238405Sjkim			   struct bfd_link_info *info,
1901238405Sjkim			   int call_tree)
1902238405Sjkim{
1903238405Sjkim  Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1904238405Sjkim  Elf_Internal_Shdr *symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1905238405Sjkim  Elf_Internal_Sym *syms;
1906238405Sjkim  void *psyms;
1907238405Sjkim  static bfd_boolean warned;
1908238405Sjkim
1909238405Sjkim  internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
1910238405Sjkim					       info->keep_memory);
1911238405Sjkim  if (internal_relocs == NULL)
1912238405Sjkim    return FALSE;
1913238405Sjkim
1914238405Sjkim  symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1915238405Sjkim  psyms = &symtab_hdr->contents;
1916238405Sjkim  syms = *(Elf_Internal_Sym **) psyms;
1917238405Sjkim  irela = internal_relocs;
1918238405Sjkim  irelaend = irela + sec->reloc_count;
1919238405Sjkim  for (; irela < irelaend; irela++)
1920238405Sjkim    {
1921238405Sjkim      enum elf_spu_reloc_type r_type;
1922238405Sjkim      unsigned int r_indx;
1923238405Sjkim      asection *sym_sec;
1924238405Sjkim      Elf_Internal_Sym *sym;
1925238405Sjkim      struct elf_link_hash_entry *h;
1926238405Sjkim      bfd_vma val;
1927238405Sjkim      unsigned char insn[4];
1928238405Sjkim      bfd_boolean is_call;
1929238405Sjkim      struct function_info *caller;
1930238405Sjkim      struct call_info *callee;
1931238405Sjkim
1932238405Sjkim      r_type = ELF32_R_TYPE (irela->r_info);
1933238405Sjkim      if (r_type != R_SPU_REL16
1934238405Sjkim	  && r_type != R_SPU_ADDR16)
1935238405Sjkim	continue;
1936238405Sjkim
1937238405Sjkim      r_indx = ELF32_R_SYM (irela->r_info);
1938238405Sjkim      if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
1939238405Sjkim	return FALSE;
1940238405Sjkim
1941238405Sjkim      if (sym_sec == NULL
1942238405Sjkim	  || sym_sec->output_section == NULL
1943238405Sjkim	  || sym_sec->output_section->owner != sec->output_section->owner)
1944238405Sjkim	continue;
1945238405Sjkim
1946238405Sjkim      if (!bfd_get_section_contents (sec->owner, sec, insn,
1947238405Sjkim				     irela->r_offset, 4))
1948238405Sjkim	return FALSE;
1949238405Sjkim      if (!is_branch (insn))
1950238405Sjkim	continue;
1951238405Sjkim
1952238405Sjkim      if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1953238405Sjkim	  != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1954238405Sjkim	{
1955238405Sjkim	  if (!call_tree)
1956238405Sjkim	    warned = TRUE;
1957238405Sjkim	  if (!call_tree || !warned)
1958238405Sjkim	    info->callbacks->einfo (_("%B(%A+0x%v): call to non-code section"
1959238405Sjkim				      " %B(%A), stack analysis incomplete\n"),
1960238405Sjkim				    sec->owner, sec, irela->r_offset,
1961238405Sjkim				    sym_sec->owner, sym_sec);
1962238405Sjkim	  continue;
1963238405Sjkim	}
1964238405Sjkim
1965238405Sjkim      is_call = (insn[0] & 0xfd) == 0x31;
1966238405Sjkim
1967238405Sjkim      if (h)
1968238405Sjkim	val = h->root.u.def.value;
1969238405Sjkim      else
1970238405Sjkim	val = sym->st_value;
1971238405Sjkim      val += irela->r_addend;
1972238405Sjkim
1973238405Sjkim      if (!call_tree)
1974238405Sjkim	{
1975238405Sjkim	  struct function_info *fun;
1976238405Sjkim
1977238405Sjkim	  if (irela->r_addend != 0)
1978238405Sjkim	    {
1979238405Sjkim	      Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
1980238405Sjkim	      if (fake == NULL)
1981238405Sjkim		return FALSE;
1982238405Sjkim	      fake->st_value = val;
1983238405Sjkim	      fake->st_shndx
1984238405Sjkim		= _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
1985238405Sjkim	      sym = fake;
1986238405Sjkim	    }
1987238405Sjkim	  if (sym)
1988238405Sjkim	    fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
1989238405Sjkim	  else
1990238405Sjkim	    fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
1991238405Sjkim	  if (fun == NULL)
1992238405Sjkim	    return FALSE;
1993238405Sjkim	  if (irela->r_addend != 0
1994238405Sjkim	      && fun->u.sym != sym)
1995238405Sjkim	    free (sym);
1996238405Sjkim	  continue;
1997238405Sjkim	}
1998238405Sjkim
1999238405Sjkim      caller = find_function (sec, irela->r_offset, info);
2000238405Sjkim      if (caller == NULL)
2001238405Sjkim	return FALSE;
2002238405Sjkim      callee = bfd_malloc (sizeof *callee);
2003238405Sjkim      if (callee == NULL)
2004238405Sjkim	return FALSE;
2005238405Sjkim
2006238405Sjkim      callee->fun = find_function (sym_sec, val, info);
2007238405Sjkim      if (callee->fun == NULL)
2008238405Sjkim	return FALSE;
2009238405Sjkim      callee->is_tail = !is_call;
2010238405Sjkim      if (!insert_callee (caller, callee))
2011238405Sjkim	free (callee);
2012238405Sjkim      else if (!is_call
2013238405Sjkim	       && !callee->fun->is_func
2014238405Sjkim	       && callee->fun->stack == 0)
2015238405Sjkim	{
2016238405Sjkim	  /* This is either a tail call or a branch from one part of
2017238405Sjkim	     the function to another, ie. hot/cold section.  If the
2018238405Sjkim	     destination has been called by some other function then
2019238405Sjkim	     it is a separate function.  We also assume that functions
2020238405Sjkim	     are not split across input files.  */
2021238405Sjkim	  if (callee->fun->start != NULL
2022238405Sjkim	      || sec->owner != sym_sec->owner)
2023238405Sjkim	    {
2024238405Sjkim	      callee->fun->start = NULL;
2025238405Sjkim	      callee->fun->is_func = TRUE;
2026238405Sjkim	    }
2027238405Sjkim	  else
2028238405Sjkim	    callee->fun->start = caller;
2029238405Sjkim	}
2030238405Sjkim    }
2031238405Sjkim
2032238405Sjkim  return TRUE;
2033238405Sjkim}
2034238405Sjkim
2035238405Sjkim/* Handle something like .init or .fini, which has a piece of a function.
2036238405Sjkim   These sections are pasted together to form a single function.  */
2037238405Sjkim
2038238405Sjkimstatic bfd_boolean
2039238405Sjkimpasted_function (asection *sec, struct bfd_link_info *info)
2040238405Sjkim{
2041238405Sjkim  struct bfd_link_order *l;
2042238405Sjkim  struct _spu_elf_section_data *sec_data;
2043238405Sjkim  struct spu_elf_stack_info *sinfo;
2044238405Sjkim  Elf_Internal_Sym *fake;
2045238405Sjkim  struct function_info *fun, *fun_start;
2046238405Sjkim
2047238405Sjkim  fake = bfd_zmalloc (sizeof (*fake));
2048238405Sjkim  if (fake == NULL)
2049238405Sjkim    return FALSE;
2050238405Sjkim  fake->st_value = 0;
2051238405Sjkim  fake->st_size = sec->size;
2052238405Sjkim  fake->st_shndx
2053238405Sjkim    = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2054238405Sjkim  fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2055238405Sjkim  if (!fun)
2056238405Sjkim    return FALSE;
2057238405Sjkim
2058238405Sjkim  /* Find a function immediately preceding this section.  */
2059238405Sjkim  fun_start = NULL;
2060238405Sjkim  for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2061238405Sjkim    {
2062238405Sjkim      if (l->u.indirect.section == sec)
2063238405Sjkim	{
2064238405Sjkim	  if (fun_start != NULL)
2065238405Sjkim	    {
2066238405Sjkim	      if (fun_start->start)
2067238405Sjkim		fun_start = fun_start->start;
2068238405Sjkim	      fun->start = fun_start;
2069238405Sjkim	    }
2070238405Sjkim	  return TRUE;
2071238405Sjkim	}
2072238405Sjkim      if (l->type == bfd_indirect_link_order
2073238405Sjkim	  && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2074238405Sjkim	  && (sinfo = sec_data->stack_info) != NULL
2075238405Sjkim	  && sinfo->num_fun != 0)
2076238405Sjkim	fun_start = &sinfo->fun[sinfo->num_fun - 1];
2077238405Sjkim    }
2078238405Sjkim
2079238405Sjkim  info->callbacks->einfo (_("%A link_order not found\n"), sec);
2080238405Sjkim  return FALSE;
2081238405Sjkim}
2082238405Sjkim
2083238405Sjkim/* We're only interested in code sections.  */
2084238405Sjkim
2085238405Sjkimstatic bfd_boolean
2086238405Sjkiminteresting_section (asection *s, bfd *obfd, struct spu_link_hash_table *htab)
2087238405Sjkim{
2088238405Sjkim  return (s != htab->stub
2089238405Sjkim	  && s->output_section != NULL
2090238405Sjkim	  && s->output_section->owner == obfd
2091238405Sjkim	  && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2092238405Sjkim	      == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2093238405Sjkim	  && s->size != 0);
2094238405Sjkim}
2095238405Sjkim
2096238405Sjkim/* Map address ranges in code sections to functions.  */
2097238405Sjkim
2098238405Sjkimstatic bfd_boolean
2099238405Sjkimdiscover_functions (bfd *output_bfd, struct bfd_link_info *info)
2100238405Sjkim{
2101238405Sjkim  struct spu_link_hash_table *htab = spu_hash_table (info);
2102238405Sjkim  bfd *ibfd;
2103238405Sjkim  int bfd_idx;
2104238405Sjkim  Elf_Internal_Sym ***psym_arr;
2105238405Sjkim  asection ***sec_arr;
2106238405Sjkim  bfd_boolean gaps = FALSE;
2107238405Sjkim
2108238405Sjkim  bfd_idx = 0;
2109238405Sjkim  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2110238405Sjkim    bfd_idx++;
2111238405Sjkim
2112238405Sjkim  psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2113238405Sjkim  if (psym_arr == NULL)
2114238405Sjkim    return FALSE;
2115238405Sjkim  sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2116238405Sjkim  if (sec_arr == NULL)
2117238405Sjkim    return FALSE;
2118238405Sjkim
2119238405Sjkim
2120238405Sjkim  for (ibfd = info->input_bfds, bfd_idx = 0;
2121238405Sjkim       ibfd != NULL;
2122238405Sjkim       ibfd = ibfd->link_next, bfd_idx++)
2123238405Sjkim    {
2124238405Sjkim      extern const bfd_target bfd_elf32_spu_vec;
2125238405Sjkim      Elf_Internal_Shdr *symtab_hdr;
2126238405Sjkim      asection *sec;
2127238405Sjkim      size_t symcount;
2128238405Sjkim      Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2129238405Sjkim      asection **psecs, **p;
2130238405Sjkim
2131238405Sjkim      if (ibfd->xvec != &bfd_elf32_spu_vec)
2132238405Sjkim	continue;
2133238405Sjkim
2134238405Sjkim      /* Read all the symbols.  */
2135238405Sjkim      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2136238405Sjkim      symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2137238405Sjkim      if (symcount == 0)
2138238405Sjkim	continue;
2139238405Sjkim
2140238405Sjkim      syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2141238405Sjkim      if (syms == NULL)
2142238405Sjkim	{
2143238405Sjkim	  syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2144238405Sjkim				       NULL, NULL, NULL);
2145238405Sjkim	  symtab_hdr->contents = (void *) syms;
2146238405Sjkim	  if (syms == NULL)
2147238405Sjkim	    return FALSE;
2148238405Sjkim	}
2149238405Sjkim
2150238405Sjkim      /* Select defined function symbols that are going to be output.  */
2151238405Sjkim      psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2152238405Sjkim      if (psyms == NULL)
2153238405Sjkim	return FALSE;
2154238405Sjkim      psym_arr[bfd_idx] = psyms;
2155238405Sjkim      psecs = bfd_malloc (symcount * sizeof (*psecs));
2156238405Sjkim      if (psecs == NULL)
2157238405Sjkim	return FALSE;
2158238405Sjkim      sec_arr[bfd_idx] = psecs;
2159238405Sjkim      for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2160238405Sjkim	if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2161238405Sjkim	    || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2162238405Sjkim	  {
2163238405Sjkim	    asection *s;
2164238405Sjkim
2165238405Sjkim	    *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2166238405Sjkim	    if (s != NULL && interesting_section (s, output_bfd, htab))
2167238405Sjkim	      *psy++ = sy;
2168238405Sjkim	  }
2169238405Sjkim      symcount = psy - psyms;
2170238405Sjkim      *psy = NULL;
2171238405Sjkim
2172238405Sjkim      /* Sort them by section and offset within section.  */
2173238405Sjkim      sort_syms_syms = syms;
2174238405Sjkim      sort_syms_psecs = psecs;
2175238405Sjkim      qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2176238405Sjkim
2177238405Sjkim      /* Now inspect the function symbols.  */
2178238405Sjkim      for (psy = psyms; psy < psyms + symcount; )
2179238405Sjkim	{
2180238405Sjkim	  asection *s = psecs[*psy - syms];
2181238405Sjkim	  Elf_Internal_Sym **psy2;
2182238405Sjkim
2183238405Sjkim	  for (psy2 = psy; ++psy2 < psyms + symcount; )
2184238405Sjkim	    if (psecs[*psy2 - syms] != s)
2185238405Sjkim	      break;
2186238405Sjkim
2187238405Sjkim	  if (!alloc_stack_info (s, psy2 - psy))
2188238405Sjkim	    return FALSE;
2189238405Sjkim	  psy = psy2;
2190238405Sjkim	}
2191238405Sjkim
2192238405Sjkim      /* First install info about properly typed and sized functions.
2193238405Sjkim	 In an ideal world this will cover all code sections, except
2194238405Sjkim	 when partitioning functions into hot and cold sections,
2195238405Sjkim	 and the horrible pasted together .init and .fini functions.  */
2196238405Sjkim      for (psy = psyms; psy < psyms + symcount; ++psy)
2197238405Sjkim	{
2198238405Sjkim	  sy = *psy;
2199238405Sjkim	  if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2200238405Sjkim	    {
2201238405Sjkim	      asection *s = psecs[sy - syms];
2202238405Sjkim	      if (!maybe_insert_function (s, sy, FALSE, TRUE))
2203238405Sjkim		return FALSE;
2204238405Sjkim	    }
2205238405Sjkim	}
2206238405Sjkim
2207238405Sjkim      for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2208238405Sjkim	if (interesting_section (sec, output_bfd, htab))
2209238405Sjkim	  gaps |= check_function_ranges (sec, info);
2210238405Sjkim    }
2211238405Sjkim
2212238405Sjkim  if (gaps)
2213238405Sjkim    {
2214238405Sjkim      /* See if we can discover more function symbols by looking at
2215238405Sjkim	 relocations.  */
2216238405Sjkim      for (ibfd = info->input_bfds, bfd_idx = 0;
2217238405Sjkim	   ibfd != NULL;
2218238405Sjkim	   ibfd = ibfd->link_next, bfd_idx++)
2219238405Sjkim	{
2220238405Sjkim	  asection *sec;
2221238405Sjkim
2222238405Sjkim	  if (psym_arr[bfd_idx] == NULL)
2223238405Sjkim	    continue;
2224238405Sjkim
2225238405Sjkim	  for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2226238405Sjkim	    if (interesting_section (sec, output_bfd, htab)
2227238405Sjkim		&& sec->reloc_count != 0)
2228238405Sjkim	      {
2229238405Sjkim		if (!mark_functions_via_relocs (sec, info, FALSE))
2230238405Sjkim		  return FALSE;
2231238405Sjkim	      }
2232238405Sjkim	}
2233238405Sjkim
2234238405Sjkim      for (ibfd = info->input_bfds, bfd_idx = 0;
2235238405Sjkim	   ibfd != NULL;
2236238405Sjkim	   ibfd = ibfd->link_next, bfd_idx++)
2237238405Sjkim	{
2238238405Sjkim	  Elf_Internal_Shdr *symtab_hdr;
2239238405Sjkim	  asection *sec;
2240238405Sjkim	  Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2241238405Sjkim	  asection **psecs;
2242238405Sjkim
2243238405Sjkim	  if ((psyms = psym_arr[bfd_idx]) == NULL)
2244238405Sjkim	    continue;
2245238405Sjkim
2246238405Sjkim	  psecs = sec_arr[bfd_idx];
2247238405Sjkim
2248238405Sjkim	  symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2249238405Sjkim	  syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2250238405Sjkim
2251238405Sjkim	  gaps = FALSE;
2252238405Sjkim	  for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2253238405Sjkim	    if (interesting_section (sec, output_bfd, htab))
2254238405Sjkim	      gaps |= check_function_ranges (sec, info);
2255238405Sjkim	  if (!gaps)
2256238405Sjkim	    continue;
2257238405Sjkim
2258238405Sjkim	  /* Finally, install all globals.  */
2259238405Sjkim	  for (psy = psyms; (sy = *psy) != NULL; ++psy)
2260238405Sjkim	    {
2261238405Sjkim	      asection *s;
2262238405Sjkim
2263238405Sjkim	      s = psecs[sy - syms];
2264238405Sjkim
2265238405Sjkim	      /* Global syms might be improperly typed functions.  */
2266238405Sjkim	      if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2267238405Sjkim		  && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2268238405Sjkim		{
2269238405Sjkim		  if (!maybe_insert_function (s, sy, FALSE, FALSE))
2270238405Sjkim		    return FALSE;
2271238405Sjkim		}
2272238405Sjkim	    }
2273238405Sjkim
2274238405Sjkim	  /* Some of the symbols we've installed as marking the
2275238405Sjkim	     beginning of functions may have a size of zero.  Extend
2276238405Sjkim	     the range of such functions to the beginning of the
2277238405Sjkim	     next symbol of interest.  */
2278238405Sjkim	  for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2279238405Sjkim	    if (interesting_section (sec, output_bfd, htab))
2280238405Sjkim	      {
2281238405Sjkim		struct _spu_elf_section_data *sec_data;
2282238405Sjkim		struct spu_elf_stack_info *sinfo;
2283238405Sjkim
2284238405Sjkim		sec_data = spu_elf_section_data (sec);
2285238405Sjkim		sinfo = sec_data->stack_info;
2286238405Sjkim		if (sinfo != NULL)
2287238405Sjkim		  {
2288238405Sjkim		    int fun_idx;
2289238405Sjkim		    bfd_vma hi = sec->size;
2290238405Sjkim
2291238405Sjkim		    for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2292238405Sjkim		      {
2293238405Sjkim			sinfo->fun[fun_idx].hi = hi;
2294238405Sjkim			hi = sinfo->fun[fun_idx].lo;
2295238405Sjkim		      }
2296238405Sjkim		  }
2297238405Sjkim		/* No symbols in this section.  Must be .init or .fini
2298238405Sjkim		   or something similar.  */
2299238405Sjkim		else if (!pasted_function (sec, info))
2300238405Sjkim		  return FALSE;
2301238405Sjkim	      }
2302238405Sjkim	}
2303238405Sjkim    }
2304238405Sjkim
2305238405Sjkim  for (ibfd = info->input_bfds, bfd_idx = 0;
2306238405Sjkim       ibfd != NULL;
2307238405Sjkim       ibfd = ibfd->link_next, bfd_idx++)
2308238405Sjkim    {
2309238405Sjkim      if (psym_arr[bfd_idx] == NULL)
2310238405Sjkim	continue;
2311238405Sjkim
2312238405Sjkim      free (psym_arr[bfd_idx]);
2313238405Sjkim      free (sec_arr[bfd_idx]);
2314238405Sjkim    }
2315238405Sjkim
2316238405Sjkim  free (psym_arr);
2317238405Sjkim  free (sec_arr);
2318238405Sjkim
2319238405Sjkim  return TRUE;
2320238405Sjkim}
2321238405Sjkim
2322238405Sjkim/* Mark nodes in the call graph that are called by some other node.  */
2323238405Sjkim
2324238405Sjkimstatic void
2325238405Sjkimmark_non_root (struct function_info *fun)
2326238405Sjkim{
2327238405Sjkim  struct call_info *call;
2328238405Sjkim
2329238405Sjkim  fun->visit1 = TRUE;
2330238405Sjkim  for (call = fun->call_list; call; call = call->next)
2331238405Sjkim    {
2332238405Sjkim      call->fun->non_root = TRUE;
2333238405Sjkim      if (!call->fun->visit1)
2334238405Sjkim	mark_non_root (call->fun);
2335238405Sjkim    }
2336238405Sjkim}
2337238405Sjkim
2338238405Sjkim/* Remove cycles from the call graph.  */
2339238405Sjkim
2340238405Sjkimstatic void
2341238405Sjkimcall_graph_traverse (struct function_info *fun, struct bfd_link_info *info)
2342238405Sjkim{
2343238405Sjkim  struct call_info **callp, *call;
2344238405Sjkim
2345238405Sjkim  fun->visit2 = TRUE;
2346238405Sjkim  fun->marking = TRUE;
2347238405Sjkim
2348238405Sjkim  callp = &fun->call_list;
2349238405Sjkim  while ((call = *callp) != NULL)
2350238405Sjkim    {
2351238405Sjkim      if (!call->fun->visit2)
2352238405Sjkim	call_graph_traverse (call->fun, info);
2353238405Sjkim      else if (call->fun->marking)
2354238405Sjkim	{
2355238405Sjkim	  const char *f1 = func_name (fun);
2356238405Sjkim	  const char *f2 = func_name (call->fun);
2357238405Sjkim
2358238405Sjkim	  info->callbacks->info (_("Stack analysis will ignore the call "
2359238405Sjkim				   "from %s to %s\n"),
2360238405Sjkim				 f1, f2);
2361238405Sjkim	  *callp = call->next;
2362238405Sjkim	  continue;
2363238405Sjkim	}
2364238405Sjkim      callp = &call->next;
2365238405Sjkim    }
2366238405Sjkim  fun->marking = FALSE;
2367238405Sjkim}
2368238405Sjkim
2369238405Sjkim/* Populate call_list for each function.  */
2370238405Sjkim
2371238405Sjkimstatic bfd_boolean
2372238405Sjkimbuild_call_tree (bfd *output_bfd, struct bfd_link_info *info)
2373238405Sjkim{
2374238405Sjkim  struct spu_link_hash_table *htab = spu_hash_table (info);
2375238405Sjkim  bfd *ibfd;
2376238405Sjkim
2377238405Sjkim  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2378238405Sjkim    {
2379238405Sjkim      extern const bfd_target bfd_elf32_spu_vec;
2380238405Sjkim      asection *sec;
2381238405Sjkim
2382238405Sjkim      if (ibfd->xvec != &bfd_elf32_spu_vec)
2383238405Sjkim	continue;
2384238405Sjkim
2385238405Sjkim      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2386238405Sjkim	{
2387238405Sjkim	  if (!interesting_section (sec, output_bfd, htab)
2388238405Sjkim	      || sec->reloc_count == 0)
2389238405Sjkim	    continue;
2390238405Sjkim
2391238405Sjkim	  if (!mark_functions_via_relocs (sec, info, TRUE))
2392238405Sjkim	    return FALSE;
2393238405Sjkim	}
2394238405Sjkim
2395238405Sjkim      /* Transfer call info from hot/cold section part of function
2396238405Sjkim	 to main entry.  */
2397238405Sjkim      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2398238405Sjkim	{
2399238405Sjkim	  struct _spu_elf_section_data *sec_data;
2400238405Sjkim	  struct spu_elf_stack_info *sinfo;
2401238405Sjkim
2402238405Sjkim	  if ((sec_data = spu_elf_section_data (sec)) != NULL
2403238405Sjkim	      && (sinfo = sec_data->stack_info) != NULL)
2404238405Sjkim	    {
2405238405Sjkim	      int i;
2406238405Sjkim	      for (i = 0; i < sinfo->num_fun; ++i)
2407238405Sjkim		{
2408238405Sjkim		  if (sinfo->fun[i].start != NULL)
2409238405Sjkim		    {
2410238405Sjkim		      struct call_info *call = sinfo->fun[i].call_list;
2411238405Sjkim
2412238405Sjkim		      while (call != NULL)
2413238405Sjkim			{
2414238405Sjkim			  struct call_info *call_next = call->next;
2415238405Sjkim			  if (!insert_callee (sinfo->fun[i].start, call))
2416238405Sjkim			    free (call);
2417238405Sjkim			  call = call_next;
2418238405Sjkim			}
2419238405Sjkim		      sinfo->fun[i].call_list = NULL;
2420238405Sjkim		      sinfo->fun[i].non_root = TRUE;
2421238405Sjkim		    }
2422238405Sjkim		}
2423238405Sjkim	    }
2424238405Sjkim	}
2425238405Sjkim    }
2426238405Sjkim
2427238405Sjkim  /* Find the call graph root(s).  */
2428238405Sjkim  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2429238405Sjkim    {
2430238405Sjkim      extern const bfd_target bfd_elf32_spu_vec;
2431238405Sjkim      asection *sec;
2432238405Sjkim
2433238405Sjkim      if (ibfd->xvec != &bfd_elf32_spu_vec)
2434238405Sjkim	continue;
2435238405Sjkim
2436238405Sjkim      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2437238405Sjkim	{
2438238405Sjkim	  struct _spu_elf_section_data *sec_data;
2439238405Sjkim	  struct spu_elf_stack_info *sinfo;
2440238405Sjkim
2441238405Sjkim	  if ((sec_data = spu_elf_section_data (sec)) != NULL
2442238405Sjkim	      && (sinfo = sec_data->stack_info) != NULL)
2443238405Sjkim	    {
2444238405Sjkim	      int i;
2445238405Sjkim	      for (i = 0; i < sinfo->num_fun; ++i)
2446238405Sjkim		if (!sinfo->fun[i].visit1)
2447238405Sjkim		  mark_non_root (&sinfo->fun[i]);
2448238405Sjkim	    }
2449238405Sjkim	}
2450238405Sjkim    }
2451238405Sjkim
2452238405Sjkim  /* Remove cycles from the call graph.  We start from the root node(s)
2453238405Sjkim     so that we break cycles in a reasonable place.  */
2454238405Sjkim  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2455238405Sjkim    {
2456238405Sjkim      extern const bfd_target bfd_elf32_spu_vec;
2457238405Sjkim      asection *sec;
2458238405Sjkim
2459238405Sjkim      if (ibfd->xvec != &bfd_elf32_spu_vec)
2460238405Sjkim	continue;
2461238405Sjkim
2462238405Sjkim      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2463238405Sjkim	{
2464238405Sjkim	  struct _spu_elf_section_data *sec_data;
2465238405Sjkim	  struct spu_elf_stack_info *sinfo;
2466238405Sjkim
2467238405Sjkim	  if ((sec_data = spu_elf_section_data (sec)) != NULL
2468238405Sjkim	      && (sinfo = sec_data->stack_info) != NULL)
2469238405Sjkim	    {
2470238405Sjkim	      int i;
2471238405Sjkim	      for (i = 0; i < sinfo->num_fun; ++i)
2472238405Sjkim		if (!sinfo->fun[i].non_root)
2473238405Sjkim		  call_graph_traverse (&sinfo->fun[i], info);
2474238405Sjkim	    }
2475238405Sjkim	}
2476238405Sjkim    }
2477238405Sjkim
2478238405Sjkim  return TRUE;
2479238405Sjkim}
2480238405Sjkim
2481238405Sjkim/* Descend the call graph for FUN, accumulating total stack required.  */
2482238405Sjkim
2483238405Sjkimstatic bfd_vma
2484238405Sjkimsum_stack (struct function_info *fun,
2485238405Sjkim	   struct bfd_link_info *info,
2486238405Sjkim	   int emit_stack_syms)
2487238405Sjkim{
2488238405Sjkim  struct call_info *call;
2489238405Sjkim  struct function_info *max = NULL;
2490238405Sjkim  bfd_vma max_stack = fun->stack;
2491238405Sjkim  bfd_vma stack;
2492238405Sjkim  const char *f1;
2493238405Sjkim
2494238405Sjkim  if (fun->visit3)
2495238405Sjkim    return max_stack;
2496238405Sjkim
2497238405Sjkim  for (call = fun->call_list; call; call = call->next)
2498238405Sjkim    {
2499238405Sjkim      stack = sum_stack (call->fun, info, emit_stack_syms);
2500238405Sjkim      /* Include caller stack for normal calls, don't do so for
2501238405Sjkim	 tail calls.  fun->stack here is local stack usage for
2502238405Sjkim	 this function.  */
2503238405Sjkim      if (!call->is_tail)
2504238405Sjkim	stack += fun->stack;
2505238405Sjkim      if (max_stack < stack)
2506238405Sjkim	{
2507238405Sjkim	  max_stack = stack;
2508238405Sjkim	  max = call->fun;
2509238405Sjkim	}
2510238405Sjkim    }
2511238405Sjkim
2512238405Sjkim  f1 = func_name (fun);
2513238405Sjkim  info->callbacks->minfo (_("%s: 0x%v 0x%v\n"), f1, fun->stack, max_stack);
2514238405Sjkim
2515238405Sjkim  if (fun->call_list)
2516238405Sjkim    {
2517238405Sjkim      info->callbacks->minfo (_("  calls:\n"));
2518238405Sjkim      for (call = fun->call_list; call; call = call->next)
2519238405Sjkim	{
2520238405Sjkim	  const char *f2 = func_name (call->fun);
2521238405Sjkim	  const char *ann1 = call->fun == max ? "*" : " ";
2522238405Sjkim	  const char *ann2 = call->is_tail ? "t" : " ";
2523238405Sjkim
2524238405Sjkim	  info->callbacks->minfo (_("   %s%s %s\n"), ann1, ann2, f2);
2525238405Sjkim	}
2526238405Sjkim    }
2527238405Sjkim
2528238405Sjkim  /* Now fun->stack holds cumulative stack.  */
2529238405Sjkim  fun->stack = max_stack;
2530238405Sjkim  fun->visit3 = TRUE;
2531238405Sjkim
2532238405Sjkim  if (emit_stack_syms)
2533238405Sjkim    {
2534238405Sjkim      struct spu_link_hash_table *htab = spu_hash_table (info);
2535238405Sjkim      char *name = bfd_malloc (18 + strlen (f1));
2536238405Sjkim      struct elf_link_hash_entry *h;
2537238405Sjkim
2538238405Sjkim      if (name != NULL)
2539238405Sjkim	{
2540238405Sjkim	  if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
2541238405Sjkim	    sprintf (name, "__stack_%s", f1);
2542238405Sjkim	  else
2543238405Sjkim	    sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
2544
2545	  h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
2546	  free (name);
2547	  if (h != NULL
2548	      && (h->root.type == bfd_link_hash_new
2549		  || h->root.type == bfd_link_hash_undefined
2550		  || h->root.type == bfd_link_hash_undefweak))
2551	    {
2552	      h->root.type = bfd_link_hash_defined;
2553	      h->root.u.def.section = bfd_abs_section_ptr;
2554	      h->root.u.def.value = max_stack;
2555	      h->size = 0;
2556	      h->type = 0;
2557	      h->ref_regular = 1;
2558	      h->def_regular = 1;
2559	      h->ref_regular_nonweak = 1;
2560	      h->forced_local = 1;
2561	      h->non_elf = 0;
2562	    }
2563	}
2564    }
2565
2566  return max_stack;
2567}
2568
2569/* Provide an estimate of total stack required.  */
2570
2571static bfd_boolean
2572spu_elf_stack_analysis (bfd *output_bfd,
2573			struct bfd_link_info *info,
2574			int emit_stack_syms)
2575{
2576  bfd *ibfd;
2577  bfd_vma max_stack = 0;
2578
2579  if (!discover_functions (output_bfd, info))
2580    return FALSE;
2581
2582  if (!build_call_tree (output_bfd, info))
2583    return FALSE;
2584
2585  info->callbacks->info (_("Stack size for call graph root nodes.\n"));
2586  info->callbacks->minfo (_("\nStack size for functions.  "
2587			    "Annotations: '*' max stack, 't' tail call\n"));
2588  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2589    {
2590      extern const bfd_target bfd_elf32_spu_vec;
2591      asection *sec;
2592
2593      if (ibfd->xvec != &bfd_elf32_spu_vec)
2594	continue;
2595
2596      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2597	{
2598	  struct _spu_elf_section_data *sec_data;
2599	  struct spu_elf_stack_info *sinfo;
2600
2601	  if ((sec_data = spu_elf_section_data (sec)) != NULL
2602	      && (sinfo = sec_data->stack_info) != NULL)
2603	    {
2604	      int i;
2605	      for (i = 0; i < sinfo->num_fun; ++i)
2606		{
2607		  if (!sinfo->fun[i].non_root)
2608		    {
2609		      bfd_vma stack;
2610		      const char *f1;
2611
2612		      stack = sum_stack (&sinfo->fun[i], info,
2613					 emit_stack_syms);
2614		      f1 = func_name (&sinfo->fun[i]);
2615		      info->callbacks->info (_("  %s: 0x%v\n"),
2616					      f1, stack);
2617		      if (max_stack < stack)
2618			max_stack = stack;
2619		    }
2620		}
2621	    }
2622	}
2623    }
2624
2625  info->callbacks->info (_("Maximum stack required is 0x%v\n"), max_stack);
2626  return TRUE;
2627}
2628
2629/* Perform a final link.  */
2630
2631static bfd_boolean
2632spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
2633{
2634  struct spu_link_hash_table *htab = spu_hash_table (info);
2635
2636  if (htab->stack_analysis
2637      && !spu_elf_stack_analysis (output_bfd, info, htab->emit_stack_syms))
2638    info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
2639
2640  return bfd_elf_final_link (output_bfd, info);
2641}
2642
2643/* Called when not normally emitting relocs, ie. !info->relocatable
2644   and !info->emitrelocations.  Returns a count of special relocs
2645   that need to be emitted.  */
2646
2647static unsigned int
2648spu_elf_count_relocs (asection *sec, Elf_Internal_Rela *relocs)
2649{
2650  unsigned int count = 0;
2651  Elf_Internal_Rela *relend = relocs + sec->reloc_count;
2652
2653  for (; relocs < relend; relocs++)
2654    {
2655      int r_type = ELF32_R_TYPE (relocs->r_info);
2656      if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2657	++count;
2658    }
2659
2660  return count;
2661}
2662
2663/* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD.  */
2664
2665static bfd_boolean
2666spu_elf_relocate_section (bfd *output_bfd,
2667			  struct bfd_link_info *info,
2668			  bfd *input_bfd,
2669			  asection *input_section,
2670			  bfd_byte *contents,
2671			  Elf_Internal_Rela *relocs,
2672			  Elf_Internal_Sym *local_syms,
2673			  asection **local_sections)
2674{
2675  Elf_Internal_Shdr *symtab_hdr;
2676  struct elf_link_hash_entry **sym_hashes;
2677  Elf_Internal_Rela *rel, *relend;
2678  struct spu_link_hash_table *htab;
2679  bfd_boolean ret = TRUE;
2680  bfd_boolean emit_these_relocs = FALSE;
2681
2682  htab = spu_hash_table (info);
2683  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2684  sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
2685
2686  rel = relocs;
2687  relend = relocs + input_section->reloc_count;
2688  for (; rel < relend; rel++)
2689    {
2690      int r_type;
2691      reloc_howto_type *howto;
2692      unsigned long r_symndx;
2693      Elf_Internal_Sym *sym;
2694      asection *sec;
2695      struct elf_link_hash_entry *h;
2696      const char *sym_name;
2697      bfd_vma relocation;
2698      bfd_vma addend;
2699      bfd_reloc_status_type r;
2700      bfd_boolean unresolved_reloc;
2701      bfd_boolean warned;
2702      bfd_boolean branch;
2703
2704      r_symndx = ELF32_R_SYM (rel->r_info);
2705      r_type = ELF32_R_TYPE (rel->r_info);
2706      if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2707	{
2708	  emit_these_relocs = TRUE;
2709	  continue;
2710	}
2711
2712      howto = elf_howto_table + r_type;
2713      unresolved_reloc = FALSE;
2714      warned = FALSE;
2715      h = NULL;
2716      sym = NULL;
2717      sec = NULL;
2718      if (r_symndx < symtab_hdr->sh_info)
2719	{
2720	  sym = local_syms + r_symndx;
2721	  sec = local_sections[r_symndx];
2722	  sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
2723	  relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2724	}
2725      else
2726	{
2727	  RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2728				   r_symndx, symtab_hdr, sym_hashes,
2729				   h, sec, relocation,
2730				   unresolved_reloc, warned);
2731	  sym_name = h->root.root.string;
2732	}
2733
2734      if (sec != NULL && elf_discarded_section (sec))
2735	{
2736	  /* For relocs against symbols from removed linkonce sections,
2737	     or sections discarded by a linker script, we just want the
2738	     section contents zeroed.  Avoid any special processing.  */
2739	  _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
2740	  rel->r_info = 0;
2741	  rel->r_addend = 0;
2742	  continue;
2743	}
2744
2745      if (info->relocatable)
2746	continue;
2747
2748      if (unresolved_reloc)
2749	{
2750	  (*_bfd_error_handler)
2751	    (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2752	     input_bfd,
2753	     bfd_get_section_name (input_bfd, input_section),
2754	     (long) rel->r_offset,
2755	     howto->name,
2756	     sym_name);
2757	  ret = FALSE;
2758	}
2759
2760      /* If this symbol is in an overlay area, we may need to relocate
2761	 to the overlay stub.  */
2762      addend = rel->r_addend;
2763      branch = (is_branch (contents + rel->r_offset)
2764		|| is_hint (contents + rel->r_offset));
2765      if (needs_ovl_stub (sym_name, sec, input_section, htab, branch))
2766	{
2767	  char *stub_name;
2768	  struct spu_stub_hash_entry *sh;
2769
2770	  stub_name = spu_stub_name (sec, h, rel);
2771	  if (stub_name == NULL)
2772	    return FALSE;
2773
2774	  sh = (struct spu_stub_hash_entry *)
2775	    bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2776	  if (sh != NULL)
2777	    {
2778	      relocation = (htab->stub->output_section->vma
2779			    + htab->stub->output_offset
2780			    + sh->off);
2781	      addend = 0;
2782	    }
2783	  free (stub_name);
2784	}
2785
2786      r = _bfd_final_link_relocate (howto,
2787				    input_bfd,
2788				    input_section,
2789				    contents,
2790				    rel->r_offset, relocation, addend);
2791
2792      if (r != bfd_reloc_ok)
2793	{
2794	  const char *msg = (const char *) 0;
2795
2796	  switch (r)
2797	    {
2798	    case bfd_reloc_overflow:
2799	      if (!((*info->callbacks->reloc_overflow)
2800		    (info, (h ? &h->root : NULL), sym_name, howto->name,
2801		     (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
2802		return FALSE;
2803	      break;
2804
2805	    case bfd_reloc_undefined:
2806	      if (!((*info->callbacks->undefined_symbol)
2807		    (info, sym_name, input_bfd, input_section,
2808		     rel->r_offset, TRUE)))
2809		return FALSE;
2810	      break;
2811
2812	    case bfd_reloc_outofrange:
2813	      msg = _("internal error: out of range error");
2814	      goto common_error;
2815
2816	    case bfd_reloc_notsupported:
2817	      msg = _("internal error: unsupported relocation error");
2818	      goto common_error;
2819
2820	    case bfd_reloc_dangerous:
2821	      msg = _("internal error: dangerous error");
2822	      goto common_error;
2823
2824	    default:
2825	      msg = _("internal error: unknown error");
2826	      /* fall through */
2827
2828	    common_error:
2829	      if (!((*info->callbacks->warning)
2830		    (info, msg, sym_name, input_bfd, input_section,
2831		     rel->r_offset)))
2832		return FALSE;
2833	      break;
2834	    }
2835	}
2836    }
2837
2838  if (ret
2839      && emit_these_relocs
2840      && !info->relocatable
2841      && !info->emitrelocations)
2842    {
2843      Elf_Internal_Rela *wrel;
2844      Elf_Internal_Shdr *rel_hdr;
2845
2846      wrel = rel = relocs;
2847      relend = relocs + input_section->reloc_count;
2848      for (; rel < relend; rel++)
2849	{
2850	  int r_type;
2851
2852	  r_type = ELF32_R_TYPE (rel->r_info);
2853	  if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2854	    *wrel++ = *rel;
2855	}
2856      input_section->reloc_count = wrel - relocs;
2857      /* Backflips for _bfd_elf_link_output_relocs.  */
2858      rel_hdr = &elf_section_data (input_section)->rel_hdr;
2859      rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
2860      ret = 2;
2861    }
2862
2863  return ret;
2864}
2865
2866/* Adjust _SPUEAR_ syms to point at their overlay stubs.  */
2867
2868static bfd_boolean
2869spu_elf_output_symbol_hook (struct bfd_link_info *info,
2870			    const char *sym_name ATTRIBUTE_UNUSED,
2871			    Elf_Internal_Sym *sym,
2872			    asection *sym_sec ATTRIBUTE_UNUSED,
2873			    struct elf_link_hash_entry *h)
2874{
2875  struct spu_link_hash_table *htab = spu_hash_table (info);
2876
2877  if (!info->relocatable
2878      && htab->num_overlays != 0
2879      && h != NULL
2880      && (h->root.type == bfd_link_hash_defined
2881	  || h->root.type == bfd_link_hash_defweak)
2882      && h->def_regular
2883      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
2884    {
2885      static Elf_Internal_Rela zero_rel;
2886      char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
2887      struct spu_stub_hash_entry *sh;
2888
2889      if (stub_name == NULL)
2890	return FALSE;
2891      sh = (struct spu_stub_hash_entry *)
2892	bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2893      free (stub_name);
2894      if (sh == NULL)
2895	return TRUE;
2896      sym->st_shndx
2897	= _bfd_elf_section_from_bfd_section (htab->stub->output_section->owner,
2898					     htab->stub->output_section);
2899      sym->st_value = (htab->stub->output_section->vma
2900		       + htab->stub->output_offset
2901		       + sh->off);
2902    }
2903
2904  return TRUE;
2905}
2906
2907static int spu_plugin = 0;
2908
2909void
2910spu_elf_plugin (int val)
2911{
2912  spu_plugin = val;
2913}
2914
2915/* Set ELF header e_type for plugins.  */
2916
2917static void
2918spu_elf_post_process_headers (bfd *abfd,
2919			      struct bfd_link_info *info ATTRIBUTE_UNUSED)
2920{
2921  if (spu_plugin)
2922    {
2923      Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
2924
2925      i_ehdrp->e_type = ET_DYN;
2926    }
2927}
2928
2929/* We may add an extra PT_LOAD segment for .toe.  We also need extra
2930   segments for overlays.  */
2931
2932static int
2933spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
2934{
2935  struct spu_link_hash_table *htab = spu_hash_table (info);
2936  int extra = htab->num_overlays;
2937  asection *sec;
2938
2939  if (extra)
2940    ++extra;
2941
2942  sec = bfd_get_section_by_name (abfd, ".toe");
2943  if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
2944    ++extra;
2945
2946  return extra;
2947}
2948
2949/* Remove .toe section from other PT_LOAD segments and put it in
2950   a segment of its own.  Put overlays in separate segments too.  */
2951
2952static bfd_boolean
2953spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
2954{
2955  asection *toe, *s;
2956  struct elf_segment_map *m;
2957  unsigned int i;
2958
2959  if (info == NULL)
2960    return TRUE;
2961
2962  toe = bfd_get_section_by_name (abfd, ".toe");
2963  for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2964    if (m->p_type == PT_LOAD && m->count > 1)
2965      for (i = 0; i < m->count; i++)
2966	if ((s = m->sections[i]) == toe
2967	    || spu_elf_section_data (s)->ovl_index != 0)
2968	  {
2969	    struct elf_segment_map *m2;
2970	    bfd_vma amt;
2971
2972	    if (i + 1 < m->count)
2973	      {
2974		amt = sizeof (struct elf_segment_map);
2975		amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
2976		m2 = bfd_zalloc (abfd, amt);
2977		if (m2 == NULL)
2978		  return FALSE;
2979		m2->count = m->count - (i + 1);
2980		memcpy (m2->sections, m->sections + i + 1,
2981			m2->count * sizeof (m->sections[0]));
2982		m2->p_type = PT_LOAD;
2983		m2->next = m->next;
2984		m->next = m2;
2985	      }
2986	    m->count = 1;
2987	    if (i != 0)
2988	      {
2989		m->count = i;
2990		amt = sizeof (struct elf_segment_map);
2991		m2 = bfd_zalloc (abfd, amt);
2992		if (m2 == NULL)
2993		  return FALSE;
2994		m2->p_type = PT_LOAD;
2995		m2->count = 1;
2996		m2->sections[0] = s;
2997		m2->next = m->next;
2998		m->next = m2;
2999	      }
3000	    break;
3001	  }
3002
3003  return TRUE;
3004}
3005
3006/* Check that all loadable section VMAs lie in the range
3007   LO .. HI inclusive.  */
3008
3009asection *
3010spu_elf_check_vma (bfd *abfd, bfd_vma lo, bfd_vma hi)
3011{
3012  struct elf_segment_map *m;
3013  unsigned int i;
3014
3015  for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
3016    if (m->p_type == PT_LOAD)
3017      for (i = 0; i < m->count; i++)
3018	if (m->sections[i]->size != 0
3019	    && (m->sections[i]->vma < lo
3020		|| m->sections[i]->vma > hi
3021		|| m->sections[i]->vma + m->sections[i]->size - 1 > hi))
3022	  return m->sections[i];
3023
3024  return NULL;
3025}
3026
3027/* Tweak the section type of .note.spu_name.  */
3028
3029static bfd_boolean
3030spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
3031		       Elf_Internal_Shdr *hdr,
3032		       asection *sec)
3033{
3034  if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
3035    hdr->sh_type = SHT_NOTE;
3036  return TRUE;
3037}
3038
3039/* Tweak phdrs before writing them out.  */
3040
3041static int
3042spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
3043{
3044  const struct elf_backend_data *bed;
3045  struct elf_obj_tdata *tdata;
3046  Elf_Internal_Phdr *phdr, *last;
3047  struct spu_link_hash_table *htab;
3048  unsigned int count;
3049  unsigned int i;
3050
3051  if (info == NULL)
3052    return TRUE;
3053
3054  bed = get_elf_backend_data (abfd);
3055  tdata = elf_tdata (abfd);
3056  phdr = tdata->phdr;
3057  count = tdata->program_header_size / bed->s->sizeof_phdr;
3058  htab = spu_hash_table (info);
3059  if (htab->num_overlays != 0)
3060    {
3061      struct elf_segment_map *m;
3062      unsigned int o;
3063
3064      for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
3065	if (m->count != 0
3066	    && (o = spu_elf_section_data (m->sections[0])->ovl_index) != 0)
3067	  {
3068	    /* Mark this as an overlay header.  */
3069	    phdr[i].p_flags |= PF_OVERLAY;
3070
3071	    if (htab->ovtab != NULL && htab->ovtab->size != 0)
3072	      {
3073		bfd_byte *p = htab->ovtab->contents;
3074		unsigned int off = (o - 1) * 16 + 8;
3075
3076		/* Write file_off into _ovly_table.  */
3077		bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
3078	      }
3079	  }
3080    }
3081
3082  /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3083     of 16.  This should always be possible when using the standard
3084     linker scripts, but don't create overlapping segments if
3085     someone is playing games with linker scripts.  */
3086  last = NULL;
3087  for (i = count; i-- != 0; )
3088    if (phdr[i].p_type == PT_LOAD)
3089      {
3090	unsigned adjust;
3091
3092	adjust = -phdr[i].p_filesz & 15;
3093	if (adjust != 0
3094	    && last != NULL
3095	    && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
3096	  break;
3097
3098	adjust = -phdr[i].p_memsz & 15;
3099	if (adjust != 0
3100	    && last != NULL
3101	    && phdr[i].p_filesz != 0
3102	    && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
3103	    && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
3104	  break;
3105
3106	if (phdr[i].p_filesz != 0)
3107	  last = &phdr[i];
3108      }
3109
3110  if (i == (unsigned int) -1)
3111    for (i = count; i-- != 0; )
3112      if (phdr[i].p_type == PT_LOAD)
3113	{
3114	unsigned adjust;
3115
3116	adjust = -phdr[i].p_filesz & 15;
3117	phdr[i].p_filesz += adjust;
3118
3119	adjust = -phdr[i].p_memsz & 15;
3120	phdr[i].p_memsz += adjust;
3121      }
3122
3123  return TRUE;
3124}
3125
3126#define TARGET_BIG_SYM		bfd_elf32_spu_vec
3127#define TARGET_BIG_NAME		"elf32-spu"
3128#define ELF_ARCH		bfd_arch_spu
3129#define ELF_MACHINE_CODE	EM_SPU
3130/* This matches the alignment need for DMA.  */
3131#define ELF_MAXPAGESIZE		0x80
3132#define elf_backend_rela_normal         1
3133#define elf_backend_can_gc_sections	1
3134
3135#define bfd_elf32_bfd_reloc_type_lookup		spu_elf_reloc_type_lookup
3136#define bfd_elf32_bfd_reloc_name_lookup	spu_elf_reloc_name_lookup
3137#define elf_info_to_howto			spu_elf_info_to_howto
3138#define elf_backend_count_relocs		spu_elf_count_relocs
3139#define elf_backend_relocate_section		spu_elf_relocate_section
3140#define elf_backend_symbol_processing		spu_elf_backend_symbol_processing
3141#define elf_backend_link_output_symbol_hook	spu_elf_output_symbol_hook
3142#define bfd_elf32_new_section_hook		spu_elf_new_section_hook
3143#define bfd_elf32_bfd_link_hash_table_create	spu_elf_link_hash_table_create
3144#define bfd_elf32_bfd_link_hash_table_free	spu_elf_link_hash_table_free
3145
3146#define elf_backend_additional_program_headers	spu_elf_additional_program_headers
3147#define elf_backend_modify_segment_map		spu_elf_modify_segment_map
3148#define elf_backend_modify_program_headers	spu_elf_modify_program_headers
3149#define elf_backend_post_process_headers        spu_elf_post_process_headers
3150#define elf_backend_fake_sections		spu_elf_fake_sections
3151#define elf_backend_special_sections		spu_elf_special_sections
3152#define bfd_elf32_bfd_final_link		spu_elf_final_link
3153
3154#include "elf32-target.h"
3155