1/* KVX-specific support for NN-bit ELF.
2   Copyright (C) 2009-2024 Free Software Foundation, Inc.
3   Contributed by Kalray SA.
4
5   This file is part of BFD, the Binary File Descriptor library.
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License as published by
9   the Free Software Foundation; either version 3 of the License, or
10   (at your option) any later version.
11
12   This program is distributed in the hope that it will be useful,
13   but WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15   GNU General Public License for more details.
16
17   You should have received a copy of the GNU General Public License
18   along with this program; see the file COPYING3. If not,
19   see <http://www.gnu.org/licenses/>.  */
20
21#include "sysdep.h"
22#include "bfd.h"
23#include "libiberty.h"
24#include "libbfd.h"
25#include "elf-bfd.h"
26#include "bfdlink.h"
27#include "objalloc.h"
28#include "elf/kvx.h"
29#include "elfxx-kvx.h"
30
31#define ARCH_SIZE	NN
32
33#if ARCH_SIZE == 64
34#define LOG_FILE_ALIGN	3
35#endif
36
37#if ARCH_SIZE == 32
38#define LOG_FILE_ALIGN	2
39#endif
40
41#define IS_KVX_TLS_RELOC(R_TYPE)			\
42  ((R_TYPE) == BFD_RELOC_KVX_S37_TLS_LE_LO10	\
43   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LE_UP27	\
44   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_LO10	\
45   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_UP27	\
46   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_EX6	\
47   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_DTPOFF_LO10	\
48   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_DTPOFF_UP27	\
49   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_LO10	\
50   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_UP27	\
51   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_EX6	\
52   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_IE_LO10	\
53   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_IE_UP27	\
54   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_LO10	\
55   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_UP27	\
56   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_EX6	\
57   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_GD_LO10	\
58   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_GD_UP27	\
59   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_LO10	\
60   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_UP27	\
61   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_EX6	\
62   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LD_LO10	\
63   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LD_UP27	\
64   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_LO10	\
65   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_UP27	\
66   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_EX6	\
67   )
68
69#define IS_KVX_TLS_RELAX_RELOC(R_TYPE) 0
70
71#define ELIMINATE_COPY_RELOCS 0
72
73/* Return size of a relocation entry.  HTAB is the bfd's
74   elf_kvx_link_hash_entry.  */
75#define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
76
77/* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32.  */
78#define GOT_ENTRY_SIZE                  (ARCH_SIZE / 8)
79#define PLT_ENTRY_SIZE                  (32)
80
81#define PLT_SMALL_ENTRY_SIZE            (4*4)
82
83/* Encoding of the nop instruction */
84#define INSN_NOP 0x00f0037f
85
86#define kvx_compute_jump_table_size(htab)		\
87  (((htab)->root.srelplt == NULL) ? 0			\
88   : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
89
90static const bfd_byte elfNN_kvx_small_plt0_entry[PLT_ENTRY_SIZE] =
91{
92 /* FIXME KVX: no first entry, not used yet */
93  0
94};
95
96/* Per function entry in a procedure linkage table looks like this
97   if the distance between the PLTGOT and the PLT is < 4GB use
98   these PLT entries.  */
99static const bfd_byte elfNN_kvx_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
100{
101  0x10, 0x00, 0xc4, 0x0f,       /* get $r16 = $pc     ;; */
102#if ARCH_SIZE == 32
103  0x10, 0x00, 0x40, 0xb0,       /* lwz $r16 = 0[$r16]   ;; */
104#else
105  0x10, 0x00, 0x40, 0xb8,       /* ld $r16 = 0[$r16] ;; */
106#endif
107  0x00, 0x00, 0x00, 0x18,       /* upper 27 bits for LSU */
108  0x10, 0x00, 0xd8, 0x0f,	/* igoto $r16          ;; */
109};
110
111/* Long stub use 43bits format of make. */
112static const uint32_t elfNN_kvx_long_branch_stub[] =
113{
114  0xe0400000,      /* make $r16 = LO10<emm43> EX6<imm43> */
115  0x00000000,      /* UP27<imm43> ;; */
116  0x0fd80010,      /* igoto "r16  ;; */
117};
118
119#define elf_info_to_howto               elfNN_kvx_info_to_howto
120#define elf_info_to_howto_rel           elfNN_kvx_info_to_howto
121
122#define KVX_ELF_ABI_VERSION		0
123
124/* In case we're on a 32-bit machine, construct a 64-bit "-1" value.  */
125#define ALL_ONES (~ (bfd_vma) 0)
126
127/* Indexed by the bfd interal reloc enumerators.
128   Therefore, the table needs to be synced with BFD_RELOC_KVX_*
129   in reloc.c.   */
130
131#define KVX_KV3_V1_KV3_V2_KV4_V1
132#include "elfxx-kvx-relocs.h"
133#undef KVX_KV3_V1_KV3_V2_KV4_V1
134
135/* Given HOWTO, return the bfd internal relocation enumerator.  */
136
137static bfd_reloc_code_real_type
138elfNN_kvx_bfd_reloc_from_howto (reloc_howto_type *howto)
139{
140  const int size = (int) ARRAY_SIZE (elf_kvx_howto_table);
141  const ptrdiff_t offset = howto - elf_kvx_howto_table;
142
143  if (offset >= 0 && offset < size)
144    return BFD_RELOC_KVX_RELOC_START + offset + 1;
145
146  return BFD_RELOC_KVX_RELOC_START + 1;
147}
148
149/* Given R_TYPE, return the bfd internal relocation enumerator.  */
150
151static bfd_reloc_code_real_type
152elfNN_kvx_bfd_reloc_from_type (bfd *abfd ATTRIBUTE_UNUSED, unsigned int r_type)
153{
154  static bool initialized_p = false;
155  /* Indexed by R_TYPE, values are offsets in the howto_table.  */
156  static unsigned int offsets[R_KVX_end];
157
158  if (!initialized_p)
159    {
160      unsigned int i;
161
162      for (i = 0; i < ARRAY_SIZE (elf_kvx_howto_table); ++i)
163	offsets[elf_kvx_howto_table[i].type] = i;
164
165      initialized_p = true;
166    }
167
168  /* PR 17512: file: b371e70a.  */
169  if (r_type >= R_KVX_end)
170    {
171      bfd_set_error (bfd_error_bad_value);
172      return BFD_RELOC_KVX_RELOC_END;
173    }
174
175  return (BFD_RELOC_KVX_RELOC_START + 1) + offsets[r_type];
176}
177
178struct elf_kvx_reloc_map
179{
180  bfd_reloc_code_real_type from;
181  bfd_reloc_code_real_type to;
182};
183
184/* Map bfd generic reloc to KVX-specific reloc.  */
185static const struct elf_kvx_reloc_map elf_kvx_reloc_map[] =
186{
187  {BFD_RELOC_NONE, BFD_RELOC_KVX_NONE},
188
189  /* Basic data relocations.  */
190  {BFD_RELOC_CTOR, BFD_RELOC_KVX_NN},
191  {BFD_RELOC_64, BFD_RELOC_KVX_64},
192  {BFD_RELOC_32, BFD_RELOC_KVX_32},
193  {BFD_RELOC_16, BFD_RELOC_KVX_16},
194  {BFD_RELOC_8,  BFD_RELOC_KVX_8},
195
196  {BFD_RELOC_64_PCREL, BFD_RELOC_KVX_64_PCREL},
197  {BFD_RELOC_32_PCREL, BFD_RELOC_KVX_32_PCREL},
198};
199
200/* Given the bfd internal relocation enumerator in CODE, return the
201   corresponding howto entry.  */
202
203static reloc_howto_type *
204elfNN_kvx_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
205{
206  unsigned int i;
207
208  /* Convert bfd generic reloc to KVX-specific reloc.  */
209  if (code < BFD_RELOC_KVX_RELOC_START || code > BFD_RELOC_KVX_RELOC_END)
210    for (i = 0; i < ARRAY_SIZE (elf_kvx_reloc_map) ; i++)
211      if (elf_kvx_reloc_map[i].from == code)
212	{
213	  code = elf_kvx_reloc_map[i].to;
214	  break;
215	}
216
217  if (code > BFD_RELOC_KVX_RELOC_START && code < BFD_RELOC_KVX_RELOC_END)
218      return &elf_kvx_howto_table[code - (BFD_RELOC_KVX_RELOC_START + 1)];
219
220  return NULL;
221}
222
223static reloc_howto_type *
224elfNN_kvx_howto_from_type (bfd *abfd, unsigned int r_type)
225{
226  bfd_reloc_code_real_type val;
227  reloc_howto_type *howto;
228
229#if ARCH_SIZE == 32
230  if (r_type > 256)
231    {
232      bfd_set_error (bfd_error_bad_value);
233      return NULL;
234    }
235#endif
236
237  val = elfNN_kvx_bfd_reloc_from_type (abfd, r_type);
238  howto = elfNN_kvx_howto_from_bfd_reloc (val);
239
240  if (howto != NULL)
241    return howto;
242
243  bfd_set_error (bfd_error_bad_value);
244  return NULL;
245}
246
247static bool
248elfNN_kvx_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
249			 Elf_Internal_Rela *elf_reloc)
250{
251  unsigned int r_type;
252
253  r_type = ELFNN_R_TYPE (elf_reloc->r_info);
254  bfd_reloc->howto = elfNN_kvx_howto_from_type (abfd, r_type);
255
256  if (bfd_reloc->howto == NULL)
257    {
258      /* xgettext:c-format */
259      _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
260			  abfd, r_type);
261      return false;
262    }
263  return true;
264}
265
266static reloc_howto_type *
267elfNN_kvx_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
268			     bfd_reloc_code_real_type code)
269{
270  reloc_howto_type *howto = elfNN_kvx_howto_from_bfd_reloc (code);
271
272  if (howto != NULL)
273    return howto;
274
275  bfd_set_error (bfd_error_bad_value);
276  return NULL;
277}
278
279static reloc_howto_type *
280elfNN_kvx_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
281			     const char *r_name)
282{
283  unsigned int i;
284
285  for (i = 0; i < ARRAY_SIZE (elf_kvx_howto_table); ++i)
286    if (elf_kvx_howto_table[i].name != NULL
287	&& strcasecmp (elf_kvx_howto_table[i].name, r_name) == 0)
288      return &elf_kvx_howto_table[i];
289
290  return NULL;
291}
292
293#define TARGET_LITTLE_SYM               kvx_elfNN_vec
294#define TARGET_LITTLE_NAME              "elfNN-kvx"
295
296/* The linker script knows the section names for placement.
297   The entry_names are used to do simple name mangling on the stubs.
298   Given a function name, and its type, the stub can be found. The
299   name can be changed. The only requirement is the %s be present.  */
300#define STUB_ENTRY_NAME   "__%s_veneer"
301
302/* The name of the dynamic interpreter.  This is put in the .interp
303   section.  */
304#define ELF_DYNAMIC_INTERPRETER     "/lib/ld.so.1"
305
306
307/* PCREL 27 is signed-extended and scaled by 4 */
308#define KVX_MAX_FWD_CALL_OFFSET \
309  (((1 << 26) - 1) << 2)
310#define KVX_MAX_BWD_CALL_OFFSET \
311  (-((1 << 26) << 2))
312
313/* Check that the destination of the call is within the PCREL27
314   range. */
315static int
316kvx_valid_call_p (bfd_vma value, bfd_vma place)
317{
318  bfd_signed_vma offset = (bfd_signed_vma) (value - place);
319  return (offset <= KVX_MAX_FWD_CALL_OFFSET
320	  && offset >= KVX_MAX_BWD_CALL_OFFSET);
321}
322
323/* Section name for stubs is the associated section name plus this
324   string.  */
325#define STUB_SUFFIX ".stub"
326
327enum elf_kvx_stub_type
328{
329  kvx_stub_none,
330  kvx_stub_long_branch,
331};
332
333struct elf_kvx_stub_hash_entry
334{
335  /* Base hash table entry structure.  */
336  struct bfd_hash_entry root;
337
338  /* The stub section.  */
339  asection *stub_sec;
340
341  /* Offset within stub_sec of the beginning of this stub.  */
342  bfd_vma stub_offset;
343
344  /* Given the symbol's value and its section we can determine its final
345     value when building the stubs (so the stub knows where to jump).  */
346  bfd_vma target_value;
347  asection *target_section;
348
349  enum elf_kvx_stub_type stub_type;
350
351  /* The symbol table entry, if any, that this was derived from.  */
352  struct elf_kvx_link_hash_entry *h;
353
354  /* Destination symbol type */
355  unsigned char st_type;
356
357  /* Where this stub is being called from, or, in the case of combined
358     stub sections, the first input section in the group.  */
359  asection *id_sec;
360
361  /* The name for the local symbol at the start of this stub.  The
362     stub name in the hash table has to be unique; this does not, so
363     it can be friendlier.  */
364  char *output_name;
365};
366
367/* Used to build a map of a section.  This is required for mixed-endian
368   code/data.  */
369
370typedef struct elf_elf_section_map
371{
372  bfd_vma vma;
373  char type;
374}
375elf_kvx_section_map;
376
377
378typedef struct _kvx_elf_section_data
379{
380  struct bfd_elf_section_data elf;
381  unsigned int mapcount;
382  unsigned int mapsize;
383  elf_kvx_section_map *map;
384}
385_kvx_elf_section_data;
386
387#define elf_kvx_section_data(sec) \
388  ((_kvx_elf_section_data *) elf_section_data (sec))
389
390struct elf_kvx_local_symbol
391{
392  unsigned int got_type;
393  bfd_signed_vma got_refcount;
394  bfd_vma got_offset;
395};
396
397struct elf_kvx_obj_tdata
398{
399  struct elf_obj_tdata root;
400
401  /* local symbol descriptors */
402  struct elf_kvx_local_symbol *locals;
403
404  /* Zero to warn when linking objects with incompatible enum sizes.  */
405  int no_enum_size_warning;
406
407  /* Zero to warn when linking objects with incompatible wchar_t sizes.  */
408  int no_wchar_size_warning;
409};
410
411#define elf_kvx_tdata(bfd)				\
412  ((struct elf_kvx_obj_tdata *) (bfd)->tdata.any)
413
414#define elf_kvx_locals(bfd) (elf_kvx_tdata (bfd)->locals)
415
416#define is_kvx_elf(bfd)				\
417  (bfd_get_flavour (bfd) == bfd_target_elf_flavour	\
418   && elf_tdata (bfd) != NULL				\
419   && elf_object_id (bfd) == KVX_ELF_DATA)
420
421static bool
422elfNN_kvx_mkobject (bfd *abfd)
423{
424  return bfd_elf_allocate_object (abfd, sizeof (struct elf_kvx_obj_tdata),
425				  KVX_ELF_DATA);
426}
427
428#define elf_kvx_hash_entry(ent) \
429  ((struct elf_kvx_link_hash_entry *)(ent))
430
431#define GOT_UNKNOWN    0
432#define GOT_NORMAL     1
433
434#define GOT_TLS_GD     2
435#define GOT_TLS_IE     4
436#define GOT_TLS_LD     8
437
438/* KVX ELF linker hash entry.  */
439struct elf_kvx_link_hash_entry
440{
441  struct elf_link_hash_entry root;
442
443  /* Since PLT entries have variable size, we need to record the
444     index into .got.plt instead of recomputing it from the PLT
445     offset.  */
446  bfd_signed_vma plt_got_offset;
447
448  /* Bit mask representing the type of GOT entry(s) if any required by
449     this symbol.  */
450  unsigned int got_type;
451
452  /* A pointer to the most recently used stub hash entry against this
453     symbol.  */
454  struct elf_kvx_stub_hash_entry *stub_cache;
455};
456
457/* Get the KVX elf linker hash table from a link_info structure.  */
458#define elf_kvx_hash_table(info)					\
459  ((struct elf_kvx_link_hash_table *) ((info)->hash))
460
461#define kvx_stub_hash_lookup(table, string, create, copy)		\
462  ((struct elf_kvx_stub_hash_entry *)				\
463   bfd_hash_lookup ((table), (string), (create), (copy)))
464
465/* KVX ELF linker hash table.  */
466struct elf_kvx_link_hash_table
467{
468  /* The main hash table.  */
469  struct elf_link_hash_table root;
470
471  /* Nonzero to force PIC branch veneers.  */
472  int pic_veneer;
473
474  /* The number of bytes in the initial entry in the PLT.  */
475  bfd_size_type plt_header_size;
476
477  /* The number of bytes in the subsequent PLT etries.  */
478  bfd_size_type plt_entry_size;
479
480  /* The bytes of the subsequent PLT entry.  */
481  const bfd_byte *plt_entry;
482
483  /* Short-cuts to get to dynamic linker sections.  */
484  asection *sdynbss;
485  asection *srelbss;
486
487  /* Small local sym cache.  */
488  struct sym_cache sym_cache;
489
490  /* For convenience in allocate_dynrelocs.  */
491  bfd *obfd;
492
493  /* The amount of space used by the reserved portion of the sgotplt
494     section, plus whatever space is used by the jump slots.  */
495  bfd_vma sgotplt_jump_table_size;
496
497  /* The stub hash table.  */
498  struct bfd_hash_table stub_hash_table;
499
500  /* Linker stub bfd.  */
501  bfd *stub_bfd;
502
503  /* Linker call-backs.  */
504  asection *(*add_stub_section) (const char *, asection *);
505  void (*layout_sections_again) (void);
506
507  /* Array to keep track of which stub sections have been created, and
508     information on stub grouping.  */
509  struct map_stub
510  {
511    /* This is the section to which stubs in the group will be
512       attached.  */
513    asection *link_sec;
514    /* The stub section.  */
515    asection *stub_sec;
516  } *stub_group;
517
518  /* Assorted information used by elfNN_kvx_size_stubs.  */
519  unsigned int bfd_count;
520  unsigned int top_index;
521  asection **input_list;
522};
523
524/* Create an entry in an KVX ELF linker hash table.  */
525
526static struct bfd_hash_entry *
527elfNN_kvx_link_hash_newfunc (struct bfd_hash_entry *entry,
528			     struct bfd_hash_table *table,
529			     const char *string)
530{
531  struct elf_kvx_link_hash_entry *ret =
532    (struct elf_kvx_link_hash_entry *) entry;
533
534  /* Allocate the structure if it has not already been allocated by a
535     subclass.  */
536  if (ret == NULL)
537    ret = bfd_hash_allocate (table,
538			     sizeof (struct elf_kvx_link_hash_entry));
539  if (ret == NULL)
540    return (struct bfd_hash_entry *) ret;
541
542  /* Call the allocation method of the superclass.  */
543  ret = ((struct elf_kvx_link_hash_entry *)
544	 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
545				     table, string));
546  if (ret != NULL)
547    {
548      ret->got_type = GOT_UNKNOWN;
549      ret->plt_got_offset = (bfd_vma) - 1;
550      ret->stub_cache = NULL;
551    }
552
553  return (struct bfd_hash_entry *) ret;
554}
555
556/* Initialize an entry in the stub hash table.  */
557
558static struct bfd_hash_entry *
559stub_hash_newfunc (struct bfd_hash_entry *entry,
560		   struct bfd_hash_table *table, const char *string)
561{
562  /* Allocate the structure if it has not already been allocated by a
563     subclass.  */
564  if (entry == NULL)
565    {
566      entry = bfd_hash_allocate (table,
567				 sizeof (struct
568					 elf_kvx_stub_hash_entry));
569      if (entry == NULL)
570	return entry;
571    }
572
573  /* Call the allocation method of the superclass.  */
574  entry = bfd_hash_newfunc (entry, table, string);
575  if (entry != NULL)
576    {
577      struct elf_kvx_stub_hash_entry *eh;
578
579      /* Initialize the local fields.  */
580      eh = (struct elf_kvx_stub_hash_entry *) entry;
581      eh->stub_sec = NULL;
582      eh->stub_offset = 0;
583      eh->target_value = 0;
584      eh->target_section = NULL;
585      eh->stub_type = kvx_stub_none;
586      eh->h = NULL;
587      eh->id_sec = NULL;
588    }
589
590  return entry;
591}
592
593/* Copy the extra info we tack onto an elf_link_hash_entry.  */
594
595static void
596elfNN_kvx_copy_indirect_symbol (struct bfd_link_info *info,
597				struct elf_link_hash_entry *dir,
598				struct elf_link_hash_entry *ind)
599{
600  struct elf_kvx_link_hash_entry *edir, *eind;
601
602  edir = (struct elf_kvx_link_hash_entry *) dir;
603  eind = (struct elf_kvx_link_hash_entry *) ind;
604
605  if (ind->root.type == bfd_link_hash_indirect)
606    {
607      /* Copy over PLT info.  */
608      if (dir->got.refcount <= 0)
609	{
610	  edir->got_type = eind->got_type;
611	  eind->got_type = GOT_UNKNOWN;
612	}
613    }
614
615  _bfd_elf_link_hash_copy_indirect (info, dir, ind);
616}
617
618/* Destroy a KVX elf linker hash table.  */
619
620static void
621elfNN_kvx_link_hash_table_free (bfd *obfd)
622{
623  struct elf_kvx_link_hash_table *ret
624    = (struct elf_kvx_link_hash_table *) obfd->link.hash;
625
626  bfd_hash_table_free (&ret->stub_hash_table);
627  _bfd_elf_link_hash_table_free (obfd);
628}
629
630/* Create a KVX elf linker hash table.  */
631
632static struct bfd_link_hash_table *
633elfNN_kvx_link_hash_table_create (bfd *abfd)
634{
635  struct elf_kvx_link_hash_table *ret;
636  bfd_size_type amt = sizeof (struct elf_kvx_link_hash_table);
637
638  ret = bfd_zmalloc (amt);
639  if (ret == NULL)
640    return NULL;
641
642  if (!_bfd_elf_link_hash_table_init
643      (&ret->root, abfd, elfNN_kvx_link_hash_newfunc,
644       sizeof (struct elf_kvx_link_hash_entry), KVX_ELF_DATA))
645    {
646      free (ret);
647      return NULL;
648    }
649
650  ret->plt_header_size = PLT_ENTRY_SIZE;
651  ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
652  ret->plt_entry = elfNN_kvx_small_plt_entry;
653
654  ret->obfd = abfd;
655
656  if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
657			    sizeof (struct elf_kvx_stub_hash_entry)))
658    {
659      _bfd_elf_link_hash_table_free (abfd);
660      return NULL;
661    }
662
663  ret->root.root.hash_table_free = elfNN_kvx_link_hash_table_free;
664
665  return &ret->root.root;
666}
667
668static bfd_reloc_status_type
669kvx_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
670	      bfd_vma offset, bfd_vma value)
671{
672  reloc_howto_type *howto;
673
674  howto = elfNN_kvx_howto_from_type (input_bfd, r_type);
675  r_type = elfNN_kvx_bfd_reloc_from_type (input_bfd, r_type);
676  return _bfd_kvx_elf_put_addend (input_bfd,
677				  input_section->contents + offset, r_type,
678				  howto, value);
679}
680
681/* Determine the type of stub needed, if any, for a call.  */
682
683static enum elf_kvx_stub_type
684kvx_type_of_stub (asection *input_sec,
685		  const Elf_Internal_Rela *rel,
686		  asection *sym_sec,
687		  unsigned char st_type,
688		  bfd_vma destination)
689{
690  bfd_vma location;
691  bfd_signed_vma branch_offset;
692  unsigned int r_type;
693  enum elf_kvx_stub_type stub_type = kvx_stub_none;
694
695  if (st_type != STT_FUNC
696      && (sym_sec == input_sec))
697    return stub_type;
698
699  /* Determine where the call point is.  */
700  location = (input_sec->output_offset
701	      + input_sec->output_section->vma + rel->r_offset);
702
703  branch_offset = (bfd_signed_vma) (destination - location);
704
705  r_type = ELFNN_R_TYPE (rel->r_info);
706
707  /* We don't want to redirect any old unconditional jump in this way,
708     only one which is being used for a sibcall, where it is
709     acceptable for the R16 and R17 registers to be clobbered.  */
710  if (r_type == R_KVX_PCREL27
711      && (branch_offset > KVX_MAX_FWD_CALL_OFFSET
712	  || branch_offset < KVX_MAX_BWD_CALL_OFFSET))
713    {
714      stub_type = kvx_stub_long_branch;
715    }
716
717  return stub_type;
718}
719
720/* Build a name for an entry in the stub hash table.  */
721
722static char *
723elfNN_kvx_stub_name (const asection *input_section,
724		     const asection *sym_sec,
725		     const struct elf_kvx_link_hash_entry *hash,
726		     const Elf_Internal_Rela *rel)
727{
728  char *stub_name;
729  bfd_size_type len;
730
731  if (hash)
732    {
733      len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
734      stub_name = bfd_malloc (len);
735      if (stub_name != NULL)
736	snprintf (stub_name, len, "%08x_%s+%" PRIx64 "x",
737		  (unsigned int) input_section->id,
738		  hash->root.root.root.string,
739		  (uint64_t) rel->r_addend);
740    }
741  else
742    {
743      len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
744      stub_name = bfd_malloc (len);
745      if (stub_name != NULL)
746	snprintf (stub_name, len, "%08x_%x:%x+%" PRIx64 "x",
747		  (unsigned int) input_section->id,
748		  (unsigned int) sym_sec->id,
749		  (unsigned int) ELFNN_R_SYM (rel->r_info),
750		  (uint64_t) rel->r_addend);
751    }
752
753  return stub_name;
754}
755
756/* Return true if symbol H should be hashed in the `.gnu.hash' section.  For
757   executable PLT slots where the executable never takes the address of those
758   functions, the function symbols are not added to the hash table.  */
759
760static bool
761elf_kvx_hash_symbol (struct elf_link_hash_entry *h)
762{
763  if (h->plt.offset != (bfd_vma) -1
764      && !h->def_regular
765      && !h->pointer_equality_needed)
766    return false;
767
768  return _bfd_elf_hash_symbol (h);
769}
770
771
772/* Look up an entry in the stub hash.  Stub entries are cached because
773   creating the stub name takes a bit of time.  */
774
775static struct elf_kvx_stub_hash_entry *
776elfNN_kvx_get_stub_entry (const asection *input_section,
777			  const asection *sym_sec,
778			  struct elf_link_hash_entry *hash,
779			  const Elf_Internal_Rela *rel,
780			  struct elf_kvx_link_hash_table *htab)
781{
782  struct elf_kvx_stub_hash_entry *stub_entry;
783  struct elf_kvx_link_hash_entry *h =
784    (struct elf_kvx_link_hash_entry *) hash;
785  const asection *id_sec;
786
787  if ((input_section->flags & SEC_CODE) == 0)
788    return NULL;
789
790  /* If this input section is part of a group of sections sharing one
791     stub section, then use the id of the first section in the group.
792     Stub names need to include a section id, as there may well be
793     more than one stub used to reach say, printf, and we need to
794     distinguish between them.  */
795  id_sec = htab->stub_group[input_section->id].link_sec;
796
797  if (h != NULL && h->stub_cache != NULL
798      && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
799    {
800      stub_entry = h->stub_cache;
801    }
802  else
803    {
804      char *stub_name;
805
806      stub_name = elfNN_kvx_stub_name (id_sec, sym_sec, h, rel);
807      if (stub_name == NULL)
808	return NULL;
809
810      stub_entry = kvx_stub_hash_lookup (&htab->stub_hash_table,
811					 stub_name, false, false);
812      if (h != NULL)
813	h->stub_cache = stub_entry;
814
815      free (stub_name);
816    }
817
818  return stub_entry;
819}
820
821
822/* Create a stub section.  */
823
824static asection *
825_bfd_kvx_create_stub_section (asection *section,
826			      struct elf_kvx_link_hash_table *htab)
827
828{
829  size_t namelen;
830  bfd_size_type len;
831  char *s_name;
832
833  namelen = strlen (section->name);
834  len = namelen + sizeof (STUB_SUFFIX);
835  s_name = bfd_alloc (htab->stub_bfd, len);
836  if (s_name == NULL)
837    return NULL;
838
839  memcpy (s_name, section->name, namelen);
840  memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
841  return (*htab->add_stub_section) (s_name, section);
842}
843
844
845/* Find or create a stub section for a link section.
846
847   Fix or create the stub section used to collect stubs attached to
848   the specified link section.  */
849
850static asection *
851_bfd_kvx_get_stub_for_link_section (asection *link_section,
852				    struct elf_kvx_link_hash_table *htab)
853{
854  if (htab->stub_group[link_section->id].stub_sec == NULL)
855    htab->stub_group[link_section->id].stub_sec
856      = _bfd_kvx_create_stub_section (link_section, htab);
857  return htab->stub_group[link_section->id].stub_sec;
858}
859
860
861/* Find or create a stub section in the stub group for an input
862   section.  */
863
864static asection *
865_bfd_kvx_create_or_find_stub_sec (asection *section,
866				  struct elf_kvx_link_hash_table *htab)
867{
868  asection *link_sec = htab->stub_group[section->id].link_sec;
869  return _bfd_kvx_get_stub_for_link_section (link_sec, htab);
870}
871
872
873/* Add a new stub entry in the stub group associated with an input
874   section to the stub hash.  Not all fields of the new stub entry are
875   initialised.  */
876
877static struct elf_kvx_stub_hash_entry *
878_bfd_kvx_add_stub_entry_in_group (const char *stub_name,
879				  asection *section,
880				  struct elf_kvx_link_hash_table *htab)
881{
882  asection *link_sec;
883  asection *stub_sec;
884  struct elf_kvx_stub_hash_entry *stub_entry;
885
886  link_sec = htab->stub_group[section->id].link_sec;
887  stub_sec = _bfd_kvx_create_or_find_stub_sec (section, htab);
888
889  /* Enter this entry into the linker stub hash table.  */
890  stub_entry = kvx_stub_hash_lookup (&htab->stub_hash_table, stub_name,
891				     true, false);
892  if (stub_entry == NULL)
893    {
894      /* xgettext:c-format */
895      _bfd_error_handler (_("%pB: cannot create stub entry %s"),
896			  section->owner, stub_name);
897      return NULL;
898    }
899
900  stub_entry->stub_sec = stub_sec;
901  stub_entry->stub_offset = 0;
902  stub_entry->id_sec = link_sec;
903
904  return stub_entry;
905}
906
907static bool
908kvx_build_one_stub (struct bfd_hash_entry *gen_entry,
909		    void *in_arg)
910{
911  struct elf_kvx_stub_hash_entry *stub_entry;
912  asection *stub_sec;
913  bfd *stub_bfd;
914  bfd_byte *loc;
915  bfd_vma sym_value;
916  unsigned int template_size;
917  const uint32_t *template;
918  unsigned int i;
919  struct bfd_link_info *info;
920
921  /* Massage our args to the form they really have.  */
922  stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
923
924  info = (struct bfd_link_info *) in_arg;
925
926  /* Fail if the target section could not be assigned to an output
927     section.  The user should fix his linker script.  */
928  if (stub_entry->target_section->output_section == NULL
929      && info->non_contiguous_regions)
930    info->callbacks->einfo (_("%F%P: Could not assign '%pA' to an output section. "
931			      "Retry without "
932			      "--enable-non-contiguous-regions.\n"),
933			    stub_entry->target_section);
934
935  stub_sec = stub_entry->stub_sec;
936
937  /* Make a note of the offset within the stubs for this entry.  */
938  stub_entry->stub_offset = stub_sec->size;
939  loc = stub_sec->contents + stub_entry->stub_offset;
940
941  stub_bfd = stub_sec->owner;
942
943  /* This is the address of the stub destination.  */
944  sym_value = (stub_entry->target_value
945	       + stub_entry->target_section->output_offset
946	       + stub_entry->target_section->output_section->vma);
947
948  switch (stub_entry->stub_type)
949    {
950    case kvx_stub_long_branch:
951      template = elfNN_kvx_long_branch_stub;
952      template_size = sizeof (elfNN_kvx_long_branch_stub);
953      break;
954    default:
955      abort ();
956    }
957
958  for (i = 0; i < (template_size / sizeof template[0]); i++)
959    {
960      bfd_putl32 (template[i], loc);
961      loc += 4;
962    }
963
964  stub_sec->size += template_size;
965
966  switch (stub_entry->stub_type)
967    {
968    case kvx_stub_long_branch:
969      /* The stub uses a make insn with 43bits immediate.
970	 We need to apply 3 relocations:
971	 BFD_RELOC_KVX_S43_LO10,
972	 BFD_RELOC_KVX_S43_UP27,
973	 BFD_RELOC_KVX_S43_EX6.  */
974      if (kvx_relocate (R_KVX_S43_LO10, stub_bfd, stub_sec,
975			stub_entry->stub_offset, sym_value) != bfd_reloc_ok)
976	BFD_FAIL ();
977      if (kvx_relocate (R_KVX_S43_EX6, stub_bfd, stub_sec,
978			stub_entry->stub_offset, sym_value) != bfd_reloc_ok)
979	BFD_FAIL ();
980      if (kvx_relocate (R_KVX_S43_UP27, stub_bfd, stub_sec,
981			stub_entry->stub_offset + 4, sym_value) != bfd_reloc_ok)
982	BFD_FAIL ();
983      break;
984    default:
985      abort ();
986    }
987
988  return true;
989}
990
991/* As above, but don't actually build the stub.  Just bump offset so
992   we know stub section sizes.  */
993
994static bool
995kvx_size_one_stub (struct bfd_hash_entry *gen_entry,
996		   void *in_arg ATTRIBUTE_UNUSED)
997{
998  struct elf_kvx_stub_hash_entry *stub_entry;
999  int size;
1000
1001  /* Massage our args to the form they really have.  */
1002  stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
1003
1004  switch (stub_entry->stub_type)
1005    {
1006    case kvx_stub_long_branch:
1007      size = sizeof (elfNN_kvx_long_branch_stub);
1008      break;
1009    default:
1010      abort ();
1011    }
1012
1013  stub_entry->stub_sec->size += size;
1014  return true;
1015}
1016
1017/* External entry points for sizing and building linker stubs.  */
1018
1019/* Set up various things so that we can make a list of input sections
1020   for each output section included in the link.  Returns -1 on error,
1021   0 when no stubs will be needed, and 1 on success.  */
1022
1023int
1024elfNN_kvx_setup_section_lists (bfd *output_bfd,
1025			       struct bfd_link_info *info)
1026{
1027  bfd *input_bfd;
1028  unsigned int bfd_count;
1029  unsigned int top_id, top_index;
1030  asection *section;
1031  asection **input_list, **list;
1032  bfd_size_type amt;
1033  struct elf_kvx_link_hash_table *htab =
1034    elf_kvx_hash_table (info);
1035
1036  if (!is_elf_hash_table ((const struct bfd_link_hash_table *)htab))
1037    return 0;
1038
1039  /* Count the number of input BFDs and find the top input section id.  */
1040  for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
1041       input_bfd != NULL; input_bfd = input_bfd->link.next)
1042    {
1043      bfd_count += 1;
1044      for (section = input_bfd->sections;
1045	   section != NULL; section = section->next)
1046	{
1047	  if (top_id < section->id)
1048	    top_id = section->id;
1049	}
1050    }
1051  htab->bfd_count = bfd_count;
1052
1053  amt = sizeof (struct map_stub) * (top_id + 1);
1054  htab->stub_group = bfd_zmalloc (amt);
1055  if (htab->stub_group == NULL)
1056    return -1;
1057
1058  /* We can't use output_bfd->section_count here to find the top output
1059     section index as some sections may have been removed, and
1060     _bfd_strip_section_from_output doesn't renumber the indices.  */
1061  for (section = output_bfd->sections, top_index = 0;
1062       section != NULL; section = section->next)
1063    {
1064      if (top_index < section->index)
1065	top_index = section->index;
1066    }
1067
1068  htab->top_index = top_index;
1069  amt = sizeof (asection *) * (top_index + 1);
1070  input_list = bfd_malloc (amt);
1071  htab->input_list = input_list;
1072  if (input_list == NULL)
1073    return -1;
1074
1075  /* For sections we aren't interested in, mark their entries with a
1076     value we can check later.  */
1077  list = input_list + top_index;
1078  do
1079    *list = bfd_abs_section_ptr;
1080  while (list-- != input_list);
1081
1082  for (section = output_bfd->sections;
1083       section != NULL; section = section->next)
1084    {
1085      if ((section->flags & SEC_CODE) != 0)
1086	input_list[section->index] = NULL;
1087    }
1088
1089  return 1;
1090}
1091
1092/* Used by elfNN_kvx_next_input_section and group_sections.  */
1093#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
1094
1095/* The linker repeatedly calls this function for each input section,
1096   in the order that input sections are linked into output sections.
1097   Build lists of input sections to determine groupings between which
1098   we may insert linker stubs.  */
1099
1100void
1101elfNN_kvx_next_input_section (struct bfd_link_info *info, asection *isec)
1102{
1103  struct elf_kvx_link_hash_table *htab =
1104    elf_kvx_hash_table (info);
1105
1106  if (isec->output_section->index <= htab->top_index)
1107    {
1108      asection **list = htab->input_list + isec->output_section->index;
1109
1110      if (*list != bfd_abs_section_ptr)
1111	{
1112	  /* Steal the link_sec pointer for our list.  */
1113	  /* This happens to make the list in reverse order,
1114	     which is what we want.  */
1115	  PREV_SEC (isec) = *list;
1116	  *list = isec;
1117	}
1118    }
1119}
1120
1121/* See whether we can group stub sections together.  Grouping stub
1122   sections may result in fewer stubs.  More importantly, we need to
1123   put all .init* and .fini* stubs at the beginning of the .init or
1124   .fini output sections respectively, because glibc splits the
1125   _init and _fini functions into multiple parts.  Putting a stub in
1126   the middle of a function is not a good idea.  */
1127
1128static void
1129group_sections (struct elf_kvx_link_hash_table *htab,
1130		bfd_size_type stub_group_size,
1131		bool stubs_always_after_branch)
1132{
1133  asection **list = htab->input_list;
1134
1135  do
1136    {
1137      asection *tail = *list;
1138      asection *head;
1139
1140      if (tail == bfd_abs_section_ptr)
1141	continue;
1142
1143      /* Reverse the list: we must avoid placing stubs at the
1144	 beginning of the section because the beginning of the text
1145	 section may be required for an interrupt vector in bare metal
1146	 code.  */
1147#define NEXT_SEC PREV_SEC
1148      head = NULL;
1149      while (tail != NULL)
1150	{
1151	  /* Pop from tail.  */
1152	  asection *item = tail;
1153	  tail = PREV_SEC (item);
1154
1155	  /* Push on head.  */
1156	  NEXT_SEC (item) = head;
1157	  head = item;
1158	}
1159
1160      while (head != NULL)
1161	{
1162	  asection *curr;
1163	  asection *next;
1164	  bfd_vma stub_group_start = head->output_offset;
1165	  bfd_vma end_of_next;
1166
1167	  curr = head;
1168	  while (NEXT_SEC (curr) != NULL)
1169	    {
1170	      next = NEXT_SEC (curr);
1171	      end_of_next = next->output_offset + next->size;
1172	      if (end_of_next - stub_group_start >= stub_group_size)
1173		/* End of NEXT is too far from start, so stop.  */
1174		break;
1175	      /* Add NEXT to the group.  */
1176	      curr = next;
1177	    }
1178
1179	  /* OK, the size from the start to the start of CURR is less
1180	     than stub_group_size and thus can be handled by one stub
1181	     section.  (Or the head section is itself larger than
1182	     stub_group_size, in which case we may be toast.)
1183	     We should really be keeping track of the total size of
1184	     stubs added here, as stubs contribute to the final output
1185	     section size.  */
1186	  do
1187	    {
1188	      next = NEXT_SEC (head);
1189	      /* Set up this stub group.  */
1190	      htab->stub_group[head->id].link_sec = curr;
1191	    }
1192	  while (head != curr && (head = next) != NULL);
1193
1194	  /* But wait, there's more!  Input sections up to stub_group_size
1195	     bytes after the stub section can be handled by it too.  */
1196	  if (!stubs_always_after_branch)
1197	    {
1198	      stub_group_start = curr->output_offset + curr->size;
1199
1200	      while (next != NULL)
1201		{
1202		  end_of_next = next->output_offset + next->size;
1203		  if (end_of_next - stub_group_start >= stub_group_size)
1204		    /* End of NEXT is too far from stubs, so stop.  */
1205		    break;
1206		  /* Add NEXT to the stub group.  */
1207		  head = next;
1208		  next = NEXT_SEC (head);
1209		  htab->stub_group[head->id].link_sec = curr;
1210		}
1211	    }
1212	  head = next;
1213	}
1214    }
1215  while (list++ != htab->input_list + htab->top_index);
1216
1217  free (htab->input_list);
1218}
1219
1220static void
1221_bfd_kvx_resize_stubs (struct elf_kvx_link_hash_table *htab)
1222{
1223  asection *section;
1224
1225  /* OK, we've added some stubs.  Find out the new size of the
1226     stub sections.  */
1227  for (section = htab->stub_bfd->sections;
1228       section != NULL; section = section->next)
1229    {
1230      /* Ignore non-stub sections.  */
1231      if (!strstr (section->name, STUB_SUFFIX))
1232	continue;
1233      section->size = 0;
1234    }
1235
1236  bfd_hash_traverse (&htab->stub_hash_table, kvx_size_one_stub, htab);
1237}
1238
1239/* Satisfy the ELF linker by filling in some fields in our fake bfd.  */
1240
1241bool
1242kvx_elfNN_init_stub_bfd (struct bfd_link_info *info,
1243			bfd *stub_bfd)
1244{
1245  struct elf_kvx_link_hash_table *htab;
1246
1247  elf_elfheader (stub_bfd)->e_ident[EI_CLASS] = ELFCLASSNN;
1248
1249/* Always hook our dynamic sections into the first bfd, which is the
1250   linker created stub bfd.  This ensures that the GOT header is at
1251   the start of the output TOC section.  */
1252  htab = elf_kvx_hash_table (info);
1253  if (htab == NULL)
1254    return false;
1255
1256  return true;
1257}
1258
1259/* Determine and set the size of the stub section for a final link.
1260
1261   The basic idea here is to examine all the relocations looking for
1262   PC-relative calls to a target that is unreachable with a 27bits
1263   immediate (found in call and goto).  */
1264
1265bool
1266elfNN_kvx_size_stubs (bfd *output_bfd,
1267		     bfd *stub_bfd,
1268		     struct bfd_link_info *info,
1269		     bfd_signed_vma group_size,
1270		     asection * (*add_stub_section) (const char *,
1271						     asection *),
1272		     void (*layout_sections_again) (void))
1273{
1274  bfd_size_type stub_group_size;
1275  bool stubs_always_before_branch;
1276  bool stub_changed = false;
1277  struct elf_kvx_link_hash_table *htab = elf_kvx_hash_table (info);
1278
1279  /* Propagate mach to stub bfd, because it may not have been
1280     finalized when we created stub_bfd.  */
1281  bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
1282		     bfd_get_mach (output_bfd));
1283
1284  /* Stash our params away.  */
1285  htab->stub_bfd = stub_bfd;
1286  htab->add_stub_section = add_stub_section;
1287  htab->layout_sections_again = layout_sections_again;
1288  stubs_always_before_branch = group_size < 0;
1289  if (group_size < 0)
1290    stub_group_size = -group_size;
1291  else
1292    stub_group_size = group_size;
1293
1294  if (stub_group_size == 1)
1295    {
1296      /* Default values.  */
1297      /* KVX branch range is +-256MB. The value used is 1MB less.  */
1298      stub_group_size = 255 * 1024 * 1024;
1299    }
1300
1301  group_sections (htab, stub_group_size, stubs_always_before_branch);
1302
1303  (*htab->layout_sections_again) ();
1304
1305  while (1)
1306    {
1307      bfd *input_bfd;
1308
1309      for (input_bfd = info->input_bfds;
1310	   input_bfd != NULL; input_bfd = input_bfd->link.next)
1311	{
1312	  Elf_Internal_Shdr *symtab_hdr;
1313	  asection *section;
1314	  Elf_Internal_Sym *local_syms = NULL;
1315
1316	  if (!is_kvx_elf (input_bfd)
1317	      || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
1318	    continue;
1319
1320	  /* We'll need the symbol table in a second.  */
1321	  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
1322	  if (symtab_hdr->sh_info == 0)
1323	    continue;
1324
1325	  /* Walk over each section attached to the input bfd.  */
1326	  for (section = input_bfd->sections;
1327	       section != NULL; section = section->next)
1328	    {
1329	      Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1330
1331	      /* If there aren't any relocs, then there's nothing more
1332		 to do.  */
1333	      if ((section->flags & SEC_RELOC) == 0
1334		  || section->reloc_count == 0
1335		  || (section->flags & SEC_CODE) == 0)
1336		continue;
1337
1338	      /* If this section is a link-once section that will be
1339		 discarded, then don't create any stubs.  */
1340	      if (section->output_section == NULL
1341		  || section->output_section->owner != output_bfd)
1342		continue;
1343
1344	      /* Get the relocs.  */
1345	      internal_relocs
1346		= _bfd_elf_link_read_relocs (input_bfd, section, NULL,
1347					     NULL, info->keep_memory);
1348	      if (internal_relocs == NULL)
1349		goto error_ret_free_local;
1350
1351	      /* Now examine each relocation.  */
1352	      irela = internal_relocs;
1353	      irelaend = irela + section->reloc_count;
1354	      for (; irela < irelaend; irela++)
1355		{
1356		  unsigned int r_type, r_indx;
1357		  enum elf_kvx_stub_type stub_type;
1358		  struct elf_kvx_stub_hash_entry *stub_entry;
1359		  asection *sym_sec;
1360		  bfd_vma sym_value;
1361		  bfd_vma destination;
1362		  struct elf_kvx_link_hash_entry *hash;
1363		  const char *sym_name;
1364		  char *stub_name;
1365		  const asection *id_sec;
1366		  unsigned char st_type;
1367		  bfd_size_type len;
1368
1369		  r_type = ELFNN_R_TYPE (irela->r_info);
1370		  r_indx = ELFNN_R_SYM (irela->r_info);
1371
1372		  if (r_type >= (unsigned int) R_KVX_end)
1373		    {
1374		      bfd_set_error (bfd_error_bad_value);
1375		    error_ret_free_internal:
1376		      if (elf_section_data (section)->relocs == NULL)
1377			free (internal_relocs);
1378		      goto error_ret_free_local;
1379		    }
1380
1381		  /* Only look for stubs on unconditional branch and
1382		     branch and link instructions.  */
1383		  /* This catches CALL and GOTO insn */
1384		  if (r_type != (unsigned int) R_KVX_PCREL27)
1385		    continue;
1386
1387		  /* Now determine the call target, its name, value,
1388		     section.  */
1389		  sym_sec = NULL;
1390		  sym_value = 0;
1391		  destination = 0;
1392		  hash = NULL;
1393		  sym_name = NULL;
1394		  if (r_indx < symtab_hdr->sh_info)
1395		    {
1396		      /* It's a local symbol.  */
1397		      Elf_Internal_Sym *sym;
1398		      Elf_Internal_Shdr *hdr;
1399
1400		      if (local_syms == NULL)
1401			{
1402			  local_syms
1403			    = (Elf_Internal_Sym *) symtab_hdr->contents;
1404			  if (local_syms == NULL)
1405			    local_syms
1406			      = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
1407						      symtab_hdr->sh_info, 0,
1408						      NULL, NULL, NULL);
1409			  if (local_syms == NULL)
1410			    goto error_ret_free_internal;
1411			}
1412
1413		      sym = local_syms + r_indx;
1414		      hdr = elf_elfsections (input_bfd)[sym->st_shndx];
1415		      sym_sec = hdr->bfd_section;
1416		      if (!sym_sec)
1417			/* This is an undefined symbol.  It can never
1418			   be resolved.  */
1419			continue;
1420
1421		      if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
1422			sym_value = sym->st_value;
1423		      destination = (sym_value + irela->r_addend
1424				     + sym_sec->output_offset
1425				     + sym_sec->output_section->vma);
1426		      st_type = ELF_ST_TYPE (sym->st_info);
1427		      sym_name
1428			= bfd_elf_string_from_elf_section (input_bfd,
1429							   symtab_hdr->sh_link,
1430							   sym->st_name);
1431		    }
1432		  else
1433		    {
1434		      int e_indx;
1435
1436		      e_indx = r_indx - symtab_hdr->sh_info;
1437		      hash = ((struct elf_kvx_link_hash_entry *)
1438			      elf_sym_hashes (input_bfd)[e_indx]);
1439
1440		      while (hash->root.root.type == bfd_link_hash_indirect
1441			     || hash->root.root.type == bfd_link_hash_warning)
1442			hash = ((struct elf_kvx_link_hash_entry *)
1443				hash->root.root.u.i.link);
1444
1445		      if (hash->root.root.type == bfd_link_hash_defined
1446			  || hash->root.root.type == bfd_link_hash_defweak)
1447			{
1448			  struct elf_kvx_link_hash_table *globals =
1449			    elf_kvx_hash_table (info);
1450			  sym_sec = hash->root.root.u.def.section;
1451			  sym_value = hash->root.root.u.def.value;
1452			  /* For a destination in a shared library,
1453			     use the PLT stub as target address to
1454			     decide whether a branch stub is
1455			     needed.  */
1456			  if (globals->root.splt != NULL && hash != NULL
1457			      && hash->root.plt.offset != (bfd_vma) - 1)
1458			    {
1459			      sym_sec = globals->root.splt;
1460			      sym_value = hash->root.plt.offset;
1461			      if (sym_sec->output_section != NULL)
1462				destination = (sym_value
1463					       + sym_sec->output_offset
1464					       + sym_sec->output_section->vma);
1465			    }
1466			  else if (sym_sec->output_section != NULL)
1467			    destination = (sym_value + irela->r_addend
1468					   + sym_sec->output_offset
1469					   + sym_sec->output_section->vma);
1470			}
1471		      else if (hash->root.root.type == bfd_link_hash_undefined
1472			       || (hash->root.root.type
1473				   == bfd_link_hash_undefweak))
1474			{
1475			  /* For a shared library, use the PLT stub as
1476			     target address to decide whether a long
1477			     branch stub is needed.
1478			     For absolute code, they cannot be handled.  */
1479			  struct elf_kvx_link_hash_table *globals =
1480			    elf_kvx_hash_table (info);
1481
1482			  if (globals->root.splt != NULL && hash != NULL
1483			      && hash->root.plt.offset != (bfd_vma) - 1)
1484			    {
1485			      sym_sec = globals->root.splt;
1486			      sym_value = hash->root.plt.offset;
1487			      if (sym_sec->output_section != NULL)
1488				destination = (sym_value
1489					       + sym_sec->output_offset
1490					       + sym_sec->output_section->vma);
1491			    }
1492			  else
1493			    continue;
1494			}
1495		      else
1496			{
1497			  bfd_set_error (bfd_error_bad_value);
1498			  goto error_ret_free_internal;
1499			}
1500		      st_type = ELF_ST_TYPE (hash->root.type);
1501		      sym_name = hash->root.root.root.string;
1502		    }
1503
1504		  /* Determine what (if any) linker stub is needed.  */
1505		  stub_type = kvx_type_of_stub (section, irela, sym_sec,
1506						st_type, destination);
1507		  if (stub_type == kvx_stub_none)
1508		    continue;
1509
1510		  /* Support for grouping stub sections.  */
1511		  id_sec = htab->stub_group[section->id].link_sec;
1512
1513		  /* Get the name of this stub.  */
1514		  stub_name = elfNN_kvx_stub_name (id_sec, sym_sec, hash,
1515						  irela);
1516		  if (!stub_name)
1517		    goto error_ret_free_internal;
1518
1519		  stub_entry =
1520		    kvx_stub_hash_lookup (&htab->stub_hash_table,
1521					 stub_name, false, false);
1522		  if (stub_entry != NULL)
1523		    {
1524		      /* The proper stub has already been created.  */
1525		      free (stub_name);
1526		      /* Always update this stub's target since it may have
1527			 changed after layout.  */
1528		      stub_entry->target_value = sym_value + irela->r_addend;
1529		      continue;
1530		    }
1531
1532		  stub_entry = _bfd_kvx_add_stub_entry_in_group
1533		    (stub_name, section, htab);
1534		  if (stub_entry == NULL)
1535		    {
1536		      free (stub_name);
1537		      goto error_ret_free_internal;
1538		    }
1539
1540		  stub_entry->target_value = sym_value + irela->r_addend;
1541		  stub_entry->target_section = sym_sec;
1542		  stub_entry->stub_type = stub_type;
1543		  stub_entry->h = hash;
1544		  stub_entry->st_type = st_type;
1545
1546		  if (sym_name == NULL)
1547		    sym_name = "unnamed";
1548		  len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
1549		  stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
1550		  if (stub_entry->output_name == NULL)
1551		    {
1552		      free (stub_name);
1553		      goto error_ret_free_internal;
1554		    }
1555
1556		  snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
1557			    sym_name);
1558
1559		  stub_changed = true;
1560		}
1561
1562	      /* We're done with the internal relocs, free them.  */
1563	      if (elf_section_data (section)->relocs == NULL)
1564		free (internal_relocs);
1565	    }
1566	}
1567
1568      if (!stub_changed)
1569	break;
1570
1571      _bfd_kvx_resize_stubs (htab);
1572
1573      /* Ask the linker to do its stuff.  */
1574      (*htab->layout_sections_again) ();
1575      stub_changed = false;
1576    }
1577
1578  return true;
1579
1580error_ret_free_local:
1581  return false;
1582
1583}
1584
1585/* Build all the stubs associated with the current output file.  The
1586   stubs are kept in a hash table attached to the main linker hash
1587   table.  We also set up the .plt entries for statically linked PIC
1588   functions here.  This function is called via kvx_elf_finish in the
1589   linker.  */
1590
1591bool
1592elfNN_kvx_build_stubs (struct bfd_link_info *info)
1593{
1594  asection *stub_sec;
1595  struct bfd_hash_table *table;
1596  struct elf_kvx_link_hash_table *htab;
1597
1598  htab = elf_kvx_hash_table (info);
1599
1600  for (stub_sec = htab->stub_bfd->sections;
1601       stub_sec != NULL; stub_sec = stub_sec->next)
1602    {
1603      bfd_size_type size;
1604
1605      /* Ignore non-stub sections.  */
1606      if (!strstr (stub_sec->name, STUB_SUFFIX))
1607	continue;
1608
1609      /* Allocate memory to hold the linker stubs.  */
1610      size = stub_sec->size;
1611      stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
1612      if (stub_sec->contents == NULL && size != 0)
1613	return false;
1614      stub_sec->size = 0;
1615    }
1616
1617  /* Build the stubs as directed by the stub hash table.  */
1618  table = &htab->stub_hash_table;
1619  bfd_hash_traverse (table, kvx_build_one_stub, info);
1620
1621  return true;
1622}
1623
1624static bfd_vma
1625kvx_calculate_got_entry_vma (struct elf_link_hash_entry *h,
1626				 struct elf_kvx_link_hash_table
1627				 *globals, struct bfd_link_info *info,
1628				 bfd_vma value, bfd *output_bfd,
1629				 bool *unresolved_reloc_p)
1630{
1631  bfd_vma off = (bfd_vma) - 1;
1632  asection *basegot = globals->root.sgot;
1633  bool dyn = globals->root.dynamic_sections_created;
1634
1635  if (h != NULL)
1636    {
1637      BFD_ASSERT (basegot != NULL);
1638      off = h->got.offset;
1639      BFD_ASSERT (off != (bfd_vma) - 1);
1640      if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
1641	  || (bfd_link_pic (info)
1642	      && SYMBOL_REFERENCES_LOCAL (info, h))
1643	  || (ELF_ST_VISIBILITY (h->other)
1644	      && h->root.type == bfd_link_hash_undefweak))
1645	{
1646	  /* This is actually a static link, or it is a -Bsymbolic link
1647	     and the symbol is defined locally.  We must initialize this
1648	     entry in the global offset table.  Since the offset must
1649	     always be a multiple of 8 (4 in the case of ILP32), we use
1650	     the least significant bit to record whether we have
1651	     initialized it already.
1652	     When doing a dynamic link, we create a .rel(a).got relocation
1653	     entry to initialize the value.  This is done in the
1654	     finish_dynamic_symbol routine.  */
1655	  if ((off & 1) != 0)
1656	    off &= ~1;
1657	  else
1658	    {
1659	      bfd_put_NN (output_bfd, value, basegot->contents + off);
1660	      h->got.offset |= 1;
1661	    }
1662	}
1663      else
1664	*unresolved_reloc_p = false;
1665    }
1666
1667  return off;
1668}
1669
1670static unsigned int
1671kvx_reloc_got_type (bfd_reloc_code_real_type r_type)
1672{
1673  switch (r_type)
1674    {
1675      /* Extracted with:
1676	 awk 'match ($0, /HOWTO.*R_(KVX.*_GOT(OFF)?(64)?_.*),/,ary) \
1677	 {print "case BFD_RELOC_" ary[1] ":";}' elfxx-kvxc.def  */
1678    case BFD_RELOC_KVX_S37_GOTOFF_LO10:
1679    case BFD_RELOC_KVX_S37_GOTOFF_UP27:
1680
1681    case BFD_RELOC_KVX_S37_GOT_LO10:
1682    case BFD_RELOC_KVX_S37_GOT_UP27:
1683
1684    case BFD_RELOC_KVX_S43_GOTOFF_LO10:
1685    case BFD_RELOC_KVX_S43_GOTOFF_UP27:
1686    case BFD_RELOC_KVX_S43_GOTOFF_EX6:
1687
1688    case BFD_RELOC_KVX_S43_GOT_LO10:
1689    case BFD_RELOC_KVX_S43_GOT_UP27:
1690    case BFD_RELOC_KVX_S43_GOT_EX6:
1691      return GOT_NORMAL;
1692
1693    case BFD_RELOC_KVX_S37_TLS_GD_LO10:
1694    case BFD_RELOC_KVX_S37_TLS_GD_UP27:
1695    case BFD_RELOC_KVX_S43_TLS_GD_LO10:
1696    case BFD_RELOC_KVX_S43_TLS_GD_UP27:
1697    case BFD_RELOC_KVX_S43_TLS_GD_EX6:
1698      return GOT_TLS_GD;
1699
1700    case BFD_RELOC_KVX_S37_TLS_LD_LO10:
1701    case BFD_RELOC_KVX_S37_TLS_LD_UP27:
1702    case BFD_RELOC_KVX_S43_TLS_LD_LO10:
1703    case BFD_RELOC_KVX_S43_TLS_LD_UP27:
1704    case BFD_RELOC_KVX_S43_TLS_LD_EX6:
1705      return GOT_TLS_LD;
1706
1707    case BFD_RELOC_KVX_S37_TLS_IE_LO10:
1708    case BFD_RELOC_KVX_S37_TLS_IE_UP27:
1709    case BFD_RELOC_KVX_S43_TLS_IE_LO10:
1710    case BFD_RELOC_KVX_S43_TLS_IE_UP27:
1711    case BFD_RELOC_KVX_S43_TLS_IE_EX6:
1712      return GOT_TLS_IE;
1713
1714    default:
1715      break;
1716    }
1717  return GOT_UNKNOWN;
1718}
1719
1720static bool
1721kvx_can_relax_tls (bfd *input_bfd ATTRIBUTE_UNUSED,
1722		       struct bfd_link_info *info ATTRIBUTE_UNUSED,
1723		       bfd_reloc_code_real_type r_type ATTRIBUTE_UNUSED,
1724		       struct elf_link_hash_entry *h ATTRIBUTE_UNUSED,
1725		       unsigned long r_symndx ATTRIBUTE_UNUSED)
1726{
1727  if (! IS_KVX_TLS_RELAX_RELOC (r_type))
1728    return false;
1729
1730  /* Relaxing hook. Disabled on KVX. */
1731  /* See elfnn-aarch64.c */
1732  return true;
1733}
1734
1735/* Given the relocation code R_TYPE, return the relaxed bfd reloc
1736   enumerator.  */
1737
1738static bfd_reloc_code_real_type
1739kvx_tls_transition (bfd *input_bfd,
1740			struct bfd_link_info *info,
1741			unsigned int r_type,
1742			struct elf_link_hash_entry *h,
1743			unsigned long r_symndx)
1744{
1745  bfd_reloc_code_real_type bfd_r_type
1746    = elfNN_kvx_bfd_reloc_from_type (input_bfd, r_type);
1747
1748  if (! kvx_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
1749    return bfd_r_type;
1750
1751  return bfd_r_type;
1752}
1753
1754/* Return the base VMA address which should be subtracted from real addresses
1755   when resolving R_KVX_*_TLS_GD_* and R_KVX_*_TLS_LD_* relocation.  */
1756
1757static bfd_vma
1758dtpoff_base (struct bfd_link_info *info)
1759{
1760  /* If tls_sec is NULL, we should have signalled an error already.  */
1761  BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
1762  return elf_hash_table (info)->tls_sec->vma;
1763}
1764
1765/* Return the base VMA address which should be subtracted from real addresses
1766   when resolving R_KVX_*_TLS_IE_* and R_KVX_*_TLS_LE_* relocations.  */
1767
1768static bfd_vma
1769tpoff_base (struct bfd_link_info *info)
1770{
1771  struct elf_link_hash_table *htab = elf_hash_table (info);
1772
1773  /* If tls_sec is NULL, we should have signalled an error already.  */
1774  BFD_ASSERT (htab->tls_sec != NULL);
1775
1776  bfd_vma base = align_power ((bfd_vma) 0,
1777			      htab->tls_sec->alignment_power);
1778  return htab->tls_sec->vma - base;
1779}
1780
1781static bfd_vma *
1782symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
1783		       unsigned long r_symndx)
1784{
1785  /* Calculate the address of the GOT entry for symbol
1786     referred to in h.  */
1787  if (h != NULL)
1788    return &h->got.offset;
1789  else
1790    {
1791      /* local symbol */
1792      struct elf_kvx_local_symbol *l;
1793
1794      l = elf_kvx_locals (input_bfd);
1795      return &l[r_symndx].got_offset;
1796    }
1797}
1798
1799static void
1800symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
1801			unsigned long r_symndx)
1802{
1803  bfd_vma *p;
1804  p = symbol_got_offset_ref (input_bfd, h, r_symndx);
1805  *p |= 1;
1806}
1807
1808static int
1809symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
1810			  unsigned long r_symndx)
1811{
1812  bfd_vma value;
1813  value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
1814  return value & 1;
1815}
1816
1817static bfd_vma
1818symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
1819		   unsigned long r_symndx)
1820{
1821  bfd_vma value;
1822  value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
1823  value &= ~1;
1824  return value;
1825}
1826
1827/* N_ONES produces N one bits, without overflowing machine arithmetic.  */
1828#define N_ONES(n) (((((bfd_vma) 1 << ((n) -1)) - 1) << 1) | 1)
1829
1830/* This is a copy/paste + modification from
1831   reloc.c:_bfd_relocate_contents. Relocations are applied to 32bits
1832   words, so all overflow checks will overflow for values above
1833   32bits.  */
1834static bfd_reloc_status_type
1835check_signed_overflow (enum complain_overflow complain_on_overflow,
1836		       bfd_reloc_code_real_type bfd_r_type, bfd *input_bfd,
1837		       bfd_vma relocation)
1838{
1839  bfd_reloc_status_type flag = bfd_reloc_ok;
1840  bfd_vma addrmask, fieldmask, signmask, ss;
1841  bfd_vma a, b, sum;
1842  bfd_vma x = 0;
1843
1844  /* These usually come from howto struct. As we don't check for
1845     values fitting in bitfields or in subpart of words, we set all
1846     these to values to check as if the field is starting from first
1847     bit.  */
1848  unsigned int rightshift = 0;
1849  unsigned int bitpos = 0;
1850  unsigned int bitsize = 0;
1851  bfd_vma src_mask = -1;
1852
1853  /* Only regular symbol relocations are checked here. Others
1854     relocations (GOT, TLS) could be checked if the need is
1855     confirmed. At the moment, we keep previous behavior
1856     (ie. unchecked) for those. */
1857  switch (bfd_r_type)
1858    {
1859    case BFD_RELOC_KVX_S37_LO10:
1860    case BFD_RELOC_KVX_S37_UP27:
1861      bitsize = 37;
1862      break;
1863
1864    case BFD_RELOC_KVX_S32_LO5:
1865    case BFD_RELOC_KVX_S32_UP27:
1866      bitsize = 32;
1867      break;
1868
1869    case BFD_RELOC_KVX_S43_LO10:
1870    case BFD_RELOC_KVX_S43_UP27:
1871    case BFD_RELOC_KVX_S43_EX6:
1872      bitsize = 43;
1873      break;
1874
1875    case BFD_RELOC_KVX_S64_LO10:
1876    case BFD_RELOC_KVX_S64_UP27:
1877    case BFD_RELOC_KVX_S64_EX27:
1878      bitsize = 64;
1879      break;
1880
1881    default:
1882      return bfd_reloc_ok;
1883    }
1884
1885  /* direct copy/paste from reloc.c below */
1886
1887  /* Get the values to be added together.  For signed and unsigned
1888     relocations, we assume that all values should be truncated to
1889     the size of an address.  For bitfields, all the bits matter.
1890     See also bfd_check_overflow.  */
1891  fieldmask = N_ONES (bitsize);
1892  signmask = ~fieldmask;
1893  addrmask = (N_ONES (bfd_arch_bits_per_address (input_bfd))
1894	      | (fieldmask << rightshift));
1895  a = (relocation & addrmask) >> rightshift;
1896  b = (x & src_mask & addrmask) >> bitpos;
1897  addrmask >>= rightshift;
1898
1899  switch (complain_on_overflow)
1900    {
1901    case complain_overflow_signed:
1902      /* If any sign bits are set, all sign bits must be set.
1903	 That is, A must be a valid negative address after
1904	 shifting.  */
1905      signmask = ~(fieldmask >> 1);
1906      /* Fall thru */
1907
1908    case complain_overflow_bitfield:
1909      /* Much like the signed check, but for a field one bit
1910	 wider.  We allow a bitfield to represent numbers in the
1911	 range -2**n to 2**n-1, where n is the number of bits in the
1912	 field.  Note that when bfd_vma is 32 bits, a 32-bit reloc
1913	 can't overflow, which is exactly what we want.  */
1914      ss = a & signmask;
1915      if (ss != 0 && ss != (addrmask & signmask))
1916	flag = bfd_reloc_overflow;
1917
1918      /* We only need this next bit of code if the sign bit of B
1919	 is below the sign bit of A.  This would only happen if
1920	 SRC_MASK had fewer bits than BITSIZE.  Note that if
1921	 SRC_MASK has more bits than BITSIZE, we can get into
1922	 trouble; we would need to verify that B is in range, as
1923	 we do for A above.  */
1924      ss = ((~src_mask) >> 1) & src_mask;
1925      ss >>= bitpos;
1926
1927      /* Set all the bits above the sign bit.  */
1928      b = (b ^ ss) - ss;
1929
1930      /* Now we can do the addition.  */
1931      sum = a + b;
1932
1933      /* See if the result has the correct sign.  Bits above the
1934	 sign bit are junk now; ignore them.  If the sum is
1935	 positive, make sure we did not have all negative inputs;
1936	 if the sum is negative, make sure we did not have all
1937	 positive inputs.  The test below looks only at the sign
1938	 bits, and it really just
1939	 SIGN (A) == SIGN (B) && SIGN (A) != SIGN (SUM)
1940
1941	 We mask with addrmask here to explicitly allow an address
1942	 wrap-around.  The Linux kernel relies on it, and it is
1943	 the only way to write assembler code which can run when
1944	 loaded at a location 0x80000000 away from the location at
1945	 which it is linked.  */
1946      if (((~(a ^ b)) & (a ^ sum)) & signmask & addrmask)
1947	flag = bfd_reloc_overflow;
1948      break;
1949
1950    case complain_overflow_unsigned:
1951      /* Checking for an unsigned overflow is relatively easy:
1952	 trim the addresses and add, and trim the result as well.
1953	 Overflow is normally indicated when the result does not
1954	 fit in the field.  However, we also need to consider the
1955	 case when, e.g., fieldmask is 0x7fffffff or smaller, an
1956	 input is 0x80000000, and bfd_vma is only 32 bits; then we
1957	 will get sum == 0, but there is an overflow, since the
1958	 inputs did not fit in the field.  Instead of doing a
1959	 separate test, we can check for this by or-ing in the
1960	 operands when testing for the sum overflowing its final
1961	 field.  */
1962      sum = (a + b) & addrmask;
1963      if ((a | b | sum) & signmask)
1964	flag = bfd_reloc_overflow;
1965      break;
1966
1967    default:
1968      abort ();
1969    }
1970  return flag;
1971}
1972
1973/* Perform a relocation as part of a final link.  */
1974static bfd_reloc_status_type
1975elfNN_kvx_final_link_relocate (reloc_howto_type *howto,
1976			       bfd *input_bfd,
1977			       bfd *output_bfd,
1978			       asection *input_section,
1979			       bfd_byte *contents,
1980			       Elf_Internal_Rela *rel,
1981			       bfd_vma value,
1982			       struct bfd_link_info *info,
1983			       asection *sym_sec,
1984			       struct elf_link_hash_entry *h,
1985			       bool *unresolved_reloc_p,
1986			       bool save_addend,
1987			       bfd_vma *saved_addend,
1988			       Elf_Internal_Sym *sym)
1989{
1990  Elf_Internal_Shdr *symtab_hdr;
1991  unsigned int r_type = howto->type;
1992  bfd_reloc_code_real_type bfd_r_type
1993    = elfNN_kvx_bfd_reloc_from_howto (howto);
1994  bfd_reloc_code_real_type new_bfd_r_type;
1995  unsigned long r_symndx;
1996  bfd_byte *hit_data = contents + rel->r_offset;
1997  bfd_vma place, off;
1998  bfd_vma addend;
1999  struct elf_kvx_link_hash_table *globals;
2000  bool weak_undef_p;
2001  asection *base_got;
2002  bfd_reloc_status_type rret = bfd_reloc_ok;
2003  bool resolved_to_zero;
2004  globals = elf_kvx_hash_table (info);
2005
2006  symtab_hdr = &elf_symtab_hdr (input_bfd);
2007
2008  BFD_ASSERT (is_kvx_elf (input_bfd));
2009
2010  r_symndx = ELFNN_R_SYM (rel->r_info);
2011
2012  /* It is possible to have linker relaxations on some TLS access
2013     models.  Update our information here.  */
2014  new_bfd_r_type = kvx_tls_transition (input_bfd, info, r_type, h, r_symndx);
2015  if (new_bfd_r_type != bfd_r_type)
2016    {
2017      bfd_r_type = new_bfd_r_type;
2018      howto = elfNN_kvx_howto_from_bfd_reloc (bfd_r_type);
2019      BFD_ASSERT (howto != NULL);
2020      r_type = howto->type;
2021    }
2022
2023  place = input_section->output_section->vma
2024    + input_section->output_offset + rel->r_offset;
2025
2026  /* Get addend, accumulating the addend for consecutive relocs
2027     which refer to the same offset.  */
2028  addend = saved_addend ? *saved_addend : 0;
2029  addend += rel->r_addend;
2030
2031  weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
2032		  : bfd_is_und_section (sym_sec));
2033  resolved_to_zero = (h != NULL
2034		      && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
2035
2036  switch (bfd_r_type)
2037    {
2038    case BFD_RELOC_KVX_NN:
2039#if ARCH_SIZE == 64
2040    case BFD_RELOC_KVX_32:
2041#endif
2042    case BFD_RELOC_KVX_S37_LO10:
2043    case BFD_RELOC_KVX_S37_UP27:
2044
2045    case BFD_RELOC_KVX_S32_LO5:
2046    case BFD_RELOC_KVX_S32_UP27:
2047
2048    case BFD_RELOC_KVX_S43_LO10:
2049    case BFD_RELOC_KVX_S43_UP27:
2050    case BFD_RELOC_KVX_S43_EX6:
2051
2052    case BFD_RELOC_KVX_S64_LO10:
2053    case BFD_RELOC_KVX_S64_UP27:
2054    case BFD_RELOC_KVX_S64_EX27:
2055      /* When generating a shared library or PIE, these relocations
2056	 are copied into the output file to be resolved at run time.  */
2057      if (bfd_link_pic (info)
2058	  && (input_section->flags & SEC_ALLOC)
2059	  && (h == NULL
2060	      || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2061		  && !resolved_to_zero)
2062	      || h->root.type != bfd_link_hash_undefweak))
2063	{
2064	  Elf_Internal_Rela outrel;
2065	  bfd_byte *loc;
2066	  bool skip, relocate;
2067	  asection *sreloc;
2068
2069	  *unresolved_reloc_p = false;
2070
2071	  skip = false;
2072	  relocate = false;
2073
2074	  outrel.r_addend = addend;
2075	  outrel.r_offset =
2076	    _bfd_elf_section_offset (output_bfd, info, input_section,
2077				     rel->r_offset);
2078	  if (outrel.r_offset == (bfd_vma) - 1)
2079	    skip = true;
2080	  else if (outrel.r_offset == (bfd_vma) - 2)
2081	    {
2082	      skip = true;
2083	      relocate = true;
2084	    }
2085
2086	  outrel.r_offset += (input_section->output_section->vma
2087			      + input_section->output_offset);
2088
2089	  if (skip)
2090	    memset (&outrel, 0, sizeof outrel);
2091	  else if (h != NULL
2092		   && h->dynindx != -1
2093		   && (!bfd_link_pic (info) || !info->symbolic
2094		       || !h->def_regular))
2095	    outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
2096	  else if (bfd_r_type == BFD_RELOC_KVX_32
2097		   || bfd_r_type == BFD_RELOC_KVX_64)
2098	    {
2099	      int symbol;
2100
2101	      /* On SVR4-ish systems, the dynamic loader cannot
2102		 relocate the text and data segments independently,
2103		 so the symbol does not matter.  */
2104	      symbol = 0;
2105	      outrel.r_info = ELFNN_R_INFO (symbol, R_KVX_RELATIVE);
2106	      outrel.r_addend += value;
2107	    }
2108	  else if (bfd_link_pic (info) && info->symbolic)
2109	    {
2110	      goto skip_because_pic;
2111	    }
2112	  else
2113	    {
2114	      /* We may endup here from bad input code trying to
2115		 insert relocation on symbols within code.  We do not
2116		 want that currently, and such code should use GOT +
2117		 KVX_32/64 reloc that translate in KVX_RELATIVE.  */
2118	      const char *name;
2119	      if (h && h->root.root.string)
2120		name = h->root.root.string;
2121	      else
2122		name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2123					 NULL);
2124
2125	      (*_bfd_error_handler)
2126		/* xgettext:c-format */
2127		(_("%pB(%pA+%#" PRIx64 "): "
2128		   "unresolvable %s relocation in section `%s'"),
2129		 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
2130		 name);
2131	      return bfd_reloc_notsupported;
2132	    }
2133
2134	  sreloc = elf_section_data (input_section)->sreloc;
2135	  if (sreloc == NULL || sreloc->contents == NULL)
2136	    return bfd_reloc_notsupported;
2137
2138	  loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
2139	  bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
2140
2141	  if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
2142	    {
2143	      /* Sanity to check that we have previously allocated
2144		 sufficient space in the relocation section for the
2145		 number of relocations we actually want to emit.  */
2146	      abort ();
2147	    }
2148
2149	  /* If this reloc is against an external symbol, we do not want to
2150	     fiddle with the addend.  Otherwise, we need to include the symbol
2151	     value so that it becomes an addend for the dynamic reloc.  */
2152	  if (!relocate)
2153	    return bfd_reloc_ok;
2154
2155	  rret = check_signed_overflow (complain_overflow_signed, bfd_r_type,
2156					input_bfd, value + addend);
2157	  if (rret != bfd_reloc_ok)
2158	    return rret;
2159
2160	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
2161					   contents, rel->r_offset, value,
2162					   addend);
2163	}
2164
2165    skip_because_pic:
2166      rret = check_signed_overflow (complain_overflow_signed, bfd_r_type,
2167				    input_bfd, value + addend);
2168      if (rret != bfd_reloc_ok)
2169	return rret;
2170
2171      return _bfd_final_link_relocate (howto, input_bfd, input_section,
2172				       contents, rel->r_offset, value,
2173				       addend);
2174      break;
2175
2176    case BFD_RELOC_KVX_PCREL17:
2177    case BFD_RELOC_KVX_PCREL27:
2178      {
2179	/* BCU insn are always first in a bundle, so there is no need
2180	   to correct the address using offset within bundle.  */
2181
2182	asection *splt = globals->root.splt;
2183	bool via_plt_p =
2184	  splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
2185
2186	/* A call to an undefined weak symbol is converted to a jump to
2187	   the next instruction unless a PLT entry will be created.
2188	   The jump to the next instruction is optimized as a NOP.
2189	   Do the same for local undefined symbols.  */
2190	if (weak_undef_p && ! via_plt_p)
2191	  {
2192	    bfd_putl32 (INSN_NOP, hit_data);
2193	    return bfd_reloc_ok;
2194	  }
2195
2196	/* If the call goes through a PLT entry, make sure to
2197	   check distance to the right destination address.  */
2198	if (via_plt_p)
2199	  value = (splt->output_section->vma
2200		   + splt->output_offset + h->plt.offset);
2201
2202	/* Check if a stub has to be inserted because the destination
2203	   is too far away.  */
2204	struct elf_kvx_stub_hash_entry *stub_entry = NULL;
2205
2206	/* If the target symbol is global and marked as a function the
2207	   relocation applies a function call or a tail call.  In this
2208	   situation we can veneer out of range branches.  The veneers
2209	   use R16 and R17 hence cannot be used arbitrary out of range
2210	   branches that occur within the body of a function.  */
2211
2212	/* Check if a stub has to be inserted because the destination
2213	   is too far away.  */
2214	if (! kvx_valid_call_p (value, place))
2215	  {
2216	    /* The target is out of reach, so redirect the branch to
2217	       the local stub for this function.  */
2218	    stub_entry = elfNN_kvx_get_stub_entry (input_section,
2219						   sym_sec, h,
2220						   rel, globals);
2221	    if (stub_entry != NULL)
2222	      value = (stub_entry->stub_offset
2223		       + stub_entry->stub_sec->output_offset
2224		       + stub_entry->stub_sec->output_section->vma);
2225	    /* We have redirected the destination to stub entry address,
2226	       so ignore any addend record in the original rela entry.  */
2227	    addend = 0;
2228	  }
2229      }
2230      *unresolved_reloc_p = false;
2231
2232      /* FALLTHROUGH */
2233
2234      /* PCREL 32 are used in dwarf2 table for exception handling */
2235    case BFD_RELOC_KVX_32_PCREL:
2236    case BFD_RELOC_KVX_S64_PCREL_LO10:
2237    case BFD_RELOC_KVX_S64_PCREL_UP27:
2238    case BFD_RELOC_KVX_S64_PCREL_EX27:
2239    case BFD_RELOC_KVX_S37_PCREL_LO10:
2240    case BFD_RELOC_KVX_S37_PCREL_UP27:
2241    case BFD_RELOC_KVX_S43_PCREL_LO10:
2242    case BFD_RELOC_KVX_S43_PCREL_UP27:
2243    case BFD_RELOC_KVX_S43_PCREL_EX6:
2244      return _bfd_final_link_relocate (howto, input_bfd, input_section,
2245				       contents, rel->r_offset, value,
2246				       addend);
2247      break;
2248
2249    case BFD_RELOC_KVX_S37_TLS_LE_LO10:
2250    case BFD_RELOC_KVX_S37_TLS_LE_UP27:
2251
2252    case BFD_RELOC_KVX_S43_TLS_LE_LO10:
2253    case BFD_RELOC_KVX_S43_TLS_LE_UP27:
2254    case BFD_RELOC_KVX_S43_TLS_LE_EX6:
2255      return _bfd_final_link_relocate (howto, input_bfd, input_section,
2256				       contents, rel->r_offset,
2257				       value - tpoff_base (info), addend);
2258      break;
2259
2260    case BFD_RELOC_KVX_S37_TLS_DTPOFF_LO10:
2261    case BFD_RELOC_KVX_S37_TLS_DTPOFF_UP27:
2262
2263    case BFD_RELOC_KVX_S43_TLS_DTPOFF_LO10:
2264    case BFD_RELOC_KVX_S43_TLS_DTPOFF_UP27:
2265    case BFD_RELOC_KVX_S43_TLS_DTPOFF_EX6:
2266      return _bfd_final_link_relocate (howto, input_bfd, input_section,
2267				       contents, rel->r_offset,
2268				       value - dtpoff_base (info), addend);
2269
2270    case BFD_RELOC_KVX_S37_TLS_GD_UP27:
2271    case BFD_RELOC_KVX_S37_TLS_GD_LO10:
2272
2273    case BFD_RELOC_KVX_S43_TLS_GD_UP27:
2274    case BFD_RELOC_KVX_S43_TLS_GD_EX6:
2275    case BFD_RELOC_KVX_S43_TLS_GD_LO10:
2276
2277    case BFD_RELOC_KVX_S37_TLS_IE_UP27:
2278    case BFD_RELOC_KVX_S37_TLS_IE_LO10:
2279
2280    case BFD_RELOC_KVX_S43_TLS_IE_UP27:
2281    case BFD_RELOC_KVX_S43_TLS_IE_EX6:
2282    case BFD_RELOC_KVX_S43_TLS_IE_LO10:
2283
2284    case BFD_RELOC_KVX_S37_TLS_LD_UP27:
2285    case BFD_RELOC_KVX_S37_TLS_LD_LO10:
2286
2287    case BFD_RELOC_KVX_S43_TLS_LD_UP27:
2288    case BFD_RELOC_KVX_S43_TLS_LD_EX6:
2289    case BFD_RELOC_KVX_S43_TLS_LD_LO10:
2290
2291      if (globals->root.sgot == NULL)
2292	return bfd_reloc_notsupported;
2293      value = symbol_got_offset (input_bfd, h, r_symndx);
2294
2295      _bfd_final_link_relocate (howto, input_bfd, input_section,
2296				contents, rel->r_offset, value, addend);
2297      *unresolved_reloc_p = false;
2298      break;
2299
2300    case BFD_RELOC_KVX_S37_GOTADDR_UP27:
2301    case BFD_RELOC_KVX_S37_GOTADDR_LO10:
2302
2303    case BFD_RELOC_KVX_S43_GOTADDR_UP27:
2304    case BFD_RELOC_KVX_S43_GOTADDR_EX6:
2305    case BFD_RELOC_KVX_S43_GOTADDR_LO10:
2306
2307    case BFD_RELOC_KVX_S64_GOTADDR_UP27:
2308    case BFD_RELOC_KVX_S64_GOTADDR_EX27:
2309    case BFD_RELOC_KVX_S64_GOTADDR_LO10:
2310      {
2311	if (globals->root.sgot == NULL)
2312	  BFD_ASSERT (h != NULL);
2313
2314	value = globals->root.sgot->output_section->vma
2315	  + globals->root.sgot->output_offset;
2316
2317	return _bfd_final_link_relocate (howto, input_bfd, input_section,
2318					 contents, rel->r_offset, value,
2319					 addend);
2320      }
2321      break;
2322
2323    case BFD_RELOC_KVX_S37_GOTOFF_LO10:
2324    case BFD_RELOC_KVX_S37_GOTOFF_UP27:
2325
2326    case BFD_RELOC_KVX_32_GOTOFF:
2327    case BFD_RELOC_KVX_64_GOTOFF:
2328
2329    case BFD_RELOC_KVX_S43_GOTOFF_LO10:
2330    case BFD_RELOC_KVX_S43_GOTOFF_UP27:
2331    case BFD_RELOC_KVX_S43_GOTOFF_EX6:
2332
2333      {
2334	asection *basegot = globals->root.sgot;
2335	/* BFD_ASSERT(h == NULL); */
2336	BFD_ASSERT(globals->root.sgot != NULL);
2337	value -= basegot->output_section->vma + basegot->output_offset;
2338	return _bfd_final_link_relocate (howto, input_bfd, input_section,
2339					 contents, rel->r_offset, value,
2340					 addend);
2341      }
2342      break;
2343
2344    case BFD_RELOC_KVX_S37_GOT_LO10:
2345    case BFD_RELOC_KVX_S37_GOT_UP27:
2346
2347    case BFD_RELOC_KVX_32_GOT:
2348    case BFD_RELOC_KVX_64_GOT:
2349
2350    case BFD_RELOC_KVX_S43_GOT_LO10:
2351    case BFD_RELOC_KVX_S43_GOT_UP27:
2352    case BFD_RELOC_KVX_S43_GOT_EX6:
2353
2354      if (globals->root.sgot == NULL)
2355	BFD_ASSERT (h != NULL);
2356
2357      if (h != NULL)
2358	{
2359	  value = kvx_calculate_got_entry_vma (h, globals, info, value,
2360					       output_bfd,
2361					       unresolved_reloc_p);
2362#ifdef UGLY_DEBUG
2363	  printf("GOT_LO/HI for %s, value %x\n", h->root.root.string, value);
2364#endif
2365
2366	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
2367					   contents, rel->r_offset, value,
2368					   addend);
2369	}
2370      else
2371	{
2372#ifdef UGLY_DEBUG
2373	  printf("GOT_LO/HI with h NULL, initial value %x\n", value);
2374#endif
2375	  struct elf_kvx_local_symbol *locals = elf_kvx_locals (input_bfd);
2376
2377	  if (locals == NULL)
2378	    {
2379	      int howto_index = bfd_r_type - BFD_RELOC_KVX_RELOC_START;
2380	      _bfd_error_handler
2381		/* xgettext:c-format */
2382		(_("%pB: local symbol descriptor table be NULL when applying "
2383		   "relocation %s against local symbol"),
2384		 input_bfd, elf_kvx_howto_table[howto_index].name);
2385	      abort ();
2386	    }
2387
2388	  off = symbol_got_offset (input_bfd, h, r_symndx);
2389	  base_got = globals->root.sgot;
2390	  bfd_vma got_entry_addr = (base_got->output_section->vma
2391				    + base_got->output_offset + off);
2392
2393	  if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2394	    {
2395	      bfd_put_64 (output_bfd, value, base_got->contents + off);
2396
2397	      if (bfd_link_pic (info))
2398		{
2399		  asection *s;
2400		  Elf_Internal_Rela outrel;
2401
2402		  /* For PIC executables and shared libraries we need
2403		     to relocate the GOT entry at run time.  */
2404		  s = globals->root.srelgot;
2405		  if (s == NULL)
2406		    abort ();
2407
2408		  outrel.r_offset = got_entry_addr;
2409		  outrel.r_info = ELFNN_R_INFO (0, R_KVX_RELATIVE);
2410		  outrel.r_addend = value;
2411		  elf_append_rela (output_bfd, s, &outrel);
2412		}
2413
2414	      symbol_got_offset_mark (input_bfd, h, r_symndx);
2415	    }
2416
2417	  /* Update the relocation value to GOT entry addr as we have
2418	     transformed the direct data access into an indirect data
2419	     access through GOT.  */
2420	  value = got_entry_addr;
2421
2422	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
2423					   contents, rel->r_offset, off, 0);
2424	}
2425      break;
2426
2427    default:
2428      return bfd_reloc_notsupported;
2429    }
2430
2431  if (saved_addend)
2432    *saved_addend = value;
2433
2434  /* Only apply the final relocation in a sequence.  */
2435  if (save_addend)
2436    return bfd_reloc_continue;
2437
2438  return _bfd_kvx_elf_put_addend (input_bfd, hit_data, bfd_r_type,
2439				  howto, value);
2440}
2441
2442
2443
2444/* Relocate a KVX ELF section.  */
2445
2446static int
2447elfNN_kvx_relocate_section (bfd *output_bfd,
2448			    struct bfd_link_info *info,
2449			    bfd *input_bfd,
2450			    asection *input_section,
2451			    bfd_byte *contents,
2452			    Elf_Internal_Rela *relocs,
2453			    Elf_Internal_Sym *local_syms,
2454			    asection **local_sections)
2455{
2456  Elf_Internal_Shdr *symtab_hdr;
2457  struct elf_link_hash_entry **sym_hashes;
2458  Elf_Internal_Rela *rel;
2459  Elf_Internal_Rela *relend;
2460  const char *name;
2461  struct elf_kvx_link_hash_table *globals;
2462  bool save_addend = false;
2463  bfd_vma addend = 0;
2464
2465  globals = elf_kvx_hash_table (info);
2466
2467  symtab_hdr = &elf_symtab_hdr (input_bfd);
2468  sym_hashes = elf_sym_hashes (input_bfd);
2469
2470  rel = relocs;
2471  relend = relocs + input_section->reloc_count;
2472  for (; rel < relend; rel++)
2473    {
2474      unsigned int r_type;
2475      bfd_reloc_code_real_type bfd_r_type;
2476      reloc_howto_type *howto;
2477      unsigned long r_symndx;
2478      Elf_Internal_Sym *sym;
2479      asection *sec;
2480      struct elf_link_hash_entry *h;
2481      bfd_vma relocation;
2482      bfd_reloc_status_type r;
2483      arelent bfd_reloc;
2484      char sym_type;
2485      bool unresolved_reloc = false;
2486      char *error_message = NULL;
2487
2488      r_symndx = ELFNN_R_SYM (rel->r_info);
2489      r_type = ELFNN_R_TYPE (rel->r_info);
2490
2491      bfd_reloc.howto = elfNN_kvx_howto_from_type (input_bfd, r_type);
2492      howto = bfd_reloc.howto;
2493
2494      if (howto == NULL)
2495	return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2496
2497      bfd_r_type = elfNN_kvx_bfd_reloc_from_howto (howto);
2498
2499      h = NULL;
2500      sym = NULL;
2501      sec = NULL;
2502
2503      if (r_symndx < symtab_hdr->sh_info) /* A local symbol. */
2504	{
2505	  sym = local_syms + r_symndx;
2506	  sym_type = ELFNN_ST_TYPE (sym->st_info);
2507	  sec = local_sections[r_symndx];
2508
2509	  /* An object file might have a reference to a local
2510	     undefined symbol.  This is a draft object file, but we
2511	     should at least do something about it.  */
2512	  if (r_type != R_KVX_NONE
2513	      && r_type != R_KVX_S37_GOTADDR_LO10
2514	      && r_type != R_KVX_S37_GOTADDR_UP27
2515	      && r_type != R_KVX_S64_GOTADDR_LO10
2516	      && r_type != R_KVX_S64_GOTADDR_UP27
2517	      && r_type != R_KVX_S64_GOTADDR_EX27
2518	      && r_type != R_KVX_S43_GOTADDR_LO10
2519	      && r_type != R_KVX_S43_GOTADDR_UP27
2520	      && r_type != R_KVX_S43_GOTADDR_EX6
2521	      && bfd_is_und_section (sec)
2522	      && ELF_ST_BIND (sym->st_info) != STB_WEAK)
2523	    (*info->callbacks->undefined_symbol)
2524	      (info, bfd_elf_string_from_elf_section
2525	       (input_bfd, symtab_hdr->sh_link, sym->st_name),
2526	       input_bfd, input_section, rel->r_offset, true);
2527
2528	  relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2529	}
2530      else
2531	{
2532	  bool warned, ignored;
2533
2534	  RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2535				   r_symndx, symtab_hdr, sym_hashes,
2536				   h, sec, relocation,
2537				   unresolved_reloc, warned, ignored);
2538
2539	  sym_type = h->type;
2540	}
2541
2542      if (sec != NULL && discarded_section (sec))
2543	RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
2544					 rel, 1, relend, howto, 0, contents);
2545
2546      if (bfd_link_relocatable (info))
2547	continue;
2548
2549      if (h != NULL)
2550	name = h->root.root.string;
2551      else
2552	{
2553	  name = (bfd_elf_string_from_elf_section
2554		  (input_bfd, symtab_hdr->sh_link, sym->st_name));
2555	  if (name == NULL || *name == '\0')
2556	    name = bfd_section_name (sec);
2557	}
2558
2559      if (r_symndx != 0
2560	  && r_type != R_KVX_NONE
2561	  && (h == NULL
2562	      || h->root.type == bfd_link_hash_defined
2563	      || h->root.type == bfd_link_hash_defweak)
2564	  && IS_KVX_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
2565	{
2566	  (*_bfd_error_handler)
2567	    ((sym_type == STT_TLS
2568	      /* xgettext:c-format */
2569	      ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
2570	      /* xgettext:c-format */
2571	      : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
2572	     input_bfd,
2573	     input_section, (uint64_t) rel->r_offset, howto->name, name);
2574	}
2575
2576      /* Original aarch64 has relaxation handling for TLS here. */
2577      r = bfd_reloc_continue;
2578
2579      /* There may be multiple consecutive relocations for the
2580	 same offset.  In that case we are supposed to treat the
2581	 output of each relocation as the addend for the next.  */
2582      if (rel + 1 < relend
2583	  && rel->r_offset == rel[1].r_offset
2584	  && ELFNN_R_TYPE (rel[1].r_info) != R_KVX_NONE)
2585
2586	save_addend = true;
2587      else
2588	save_addend = false;
2589
2590      if (r == bfd_reloc_continue)
2591	r = elfNN_kvx_final_link_relocate (howto, input_bfd, output_bfd,
2592					   input_section, contents, rel,
2593					   relocation, info, sec,
2594					   h, &unresolved_reloc,
2595					   save_addend, &addend, sym);
2596
2597      switch (elfNN_kvx_bfd_reloc_from_type (input_bfd, r_type))
2598	{
2599	case BFD_RELOC_KVX_S37_TLS_GD_LO10:
2600	case BFD_RELOC_KVX_S37_TLS_GD_UP27:
2601
2602	case BFD_RELOC_KVX_S43_TLS_GD_LO10:
2603	case BFD_RELOC_KVX_S43_TLS_GD_UP27:
2604	case BFD_RELOC_KVX_S43_TLS_GD_EX6:
2605
2606	case BFD_RELOC_KVX_S37_TLS_LD_LO10:
2607	case BFD_RELOC_KVX_S37_TLS_LD_UP27:
2608
2609	case BFD_RELOC_KVX_S43_TLS_LD_LO10:
2610	case BFD_RELOC_KVX_S43_TLS_LD_UP27:
2611	case BFD_RELOC_KVX_S43_TLS_LD_EX6:
2612
2613	  if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2614	    {
2615	      bool need_relocs = false;
2616	      bfd_byte *loc;
2617	      int indx;
2618	      bfd_vma off;
2619
2620	      off = symbol_got_offset (input_bfd, h, r_symndx);
2621	      indx = h && h->dynindx != -1 ? h->dynindx : 0;
2622
2623	      need_relocs =
2624		(bfd_link_pic (info) || indx != 0) &&
2625		(h == NULL
2626		 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2627		 || h->root.type != bfd_link_hash_undefweak);
2628
2629	      BFD_ASSERT (globals->root.srelgot != NULL);
2630
2631	      if (need_relocs)
2632		{
2633		  Elf_Internal_Rela rela;
2634		  rela.r_info = ELFNN_R_INFO (indx, R_KVX_64_DTPMOD);
2635		  rela.r_addend = 0;
2636		  rela.r_offset = globals->root.sgot->output_section->vma +
2637		    globals->root.sgot->output_offset + off;
2638
2639		  loc = globals->root.srelgot->contents;
2640		  loc += globals->root.srelgot->reloc_count++
2641		    * RELOC_SIZE (htab);
2642		  bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
2643
2644		  bfd_reloc_code_real_type real_type =
2645		    elfNN_kvx_bfd_reloc_from_type (input_bfd, r_type);
2646
2647		  if (real_type == BFD_RELOC_KVX_S37_TLS_LD_LO10
2648		      || real_type == BFD_RELOC_KVX_S37_TLS_LD_UP27
2649		      || real_type == BFD_RELOC_KVX_S43_TLS_LD_LO10
2650		      || real_type == BFD_RELOC_KVX_S43_TLS_LD_UP27
2651		      || real_type == BFD_RELOC_KVX_S43_TLS_LD_EX6)
2652		    {
2653		      /* For local dynamic, don't generate DTPOFF in any case.
2654			 Initialize the DTPOFF slot into zero, so we get module
2655			 base address when invoke runtime TLS resolver.  */
2656		      bfd_put_NN (output_bfd, 0,
2657				  globals->root.sgot->contents + off
2658				  + GOT_ENTRY_SIZE);
2659		    }
2660		  else if (indx == 0)
2661		    {
2662		      bfd_put_NN (output_bfd,
2663				  relocation - dtpoff_base (info),
2664				  globals->root.sgot->contents + off
2665				  + GOT_ENTRY_SIZE);
2666		    }
2667		  else
2668		    {
2669		      /* This TLS symbol is global. We emit a
2670			 relocation to fixup the tls offset at load
2671			 time.  */
2672		      rela.r_info =
2673			ELFNN_R_INFO (indx, R_KVX_64_DTPOFF);
2674		      rela.r_addend = 0;
2675		      rela.r_offset =
2676			(globals->root.sgot->output_section->vma
2677			 + globals->root.sgot->output_offset + off
2678			 + GOT_ENTRY_SIZE);
2679
2680		      loc = globals->root.srelgot->contents;
2681		      loc += globals->root.srelgot->reloc_count++
2682			* RELOC_SIZE (globals);
2683		      bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
2684		      bfd_put_NN (output_bfd, (bfd_vma) 0,
2685				  globals->root.sgot->contents + off
2686				  + GOT_ENTRY_SIZE);
2687		    }
2688		}
2689	      else
2690		{
2691		  bfd_put_NN (output_bfd, (bfd_vma) 1,
2692			      globals->root.sgot->contents + off);
2693		  bfd_put_NN (output_bfd,
2694			      relocation - dtpoff_base (info),
2695			      globals->root.sgot->contents + off
2696			      + GOT_ENTRY_SIZE);
2697		}
2698
2699	      symbol_got_offset_mark (input_bfd, h, r_symndx);
2700	    }
2701	  break;
2702
2703	case BFD_RELOC_KVX_S37_TLS_IE_LO10:
2704	case BFD_RELOC_KVX_S37_TLS_IE_UP27:
2705
2706	case BFD_RELOC_KVX_S43_TLS_IE_LO10:
2707	case BFD_RELOC_KVX_S43_TLS_IE_UP27:
2708	case BFD_RELOC_KVX_S43_TLS_IE_EX6:
2709	  if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2710	    {
2711	      bool need_relocs = false;
2712	      bfd_byte *loc;
2713	      int indx;
2714	      bfd_vma off;
2715
2716	      off = symbol_got_offset (input_bfd, h, r_symndx);
2717
2718	      indx = h && h->dynindx != -1 ? h->dynindx : 0;
2719
2720	      need_relocs =
2721		(bfd_link_pic (info) || indx != 0) &&
2722		(h == NULL
2723		 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2724		 || h->root.type != bfd_link_hash_undefweak);
2725
2726	      BFD_ASSERT (globals->root.srelgot != NULL);
2727
2728	      if (need_relocs)
2729		{
2730		  Elf_Internal_Rela rela;
2731
2732		  if (indx == 0)
2733		    rela.r_addend = relocation - dtpoff_base (info);
2734		  else
2735		    rela.r_addend = 0;
2736
2737		  rela.r_info = ELFNN_R_INFO (indx, R_KVX_64_TPOFF);
2738		  rela.r_offset = globals->root.sgot->output_section->vma +
2739		    globals->root.sgot->output_offset + off;
2740
2741		  loc = globals->root.srelgot->contents;
2742		  loc += globals->root.srelgot->reloc_count++
2743		    * RELOC_SIZE (htab);
2744
2745		  bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
2746
2747		  bfd_put_NN (output_bfd, rela.r_addend,
2748			      globals->root.sgot->contents + off);
2749		}
2750	      else
2751		bfd_put_NN (output_bfd, relocation - tpoff_base (info),
2752			    globals->root.sgot->contents + off);
2753
2754	      symbol_got_offset_mark (input_bfd, h, r_symndx);
2755	    }
2756	  break;
2757
2758	default:
2759	  break;
2760	}
2761
2762      /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
2763	 because such sections are not SEC_ALLOC and thus ld.so will
2764	 not process them.  */
2765      if (unresolved_reloc
2766	  && !((input_section->flags & SEC_DEBUGGING) != 0
2767	       && h->def_dynamic)
2768	  && _bfd_elf_section_offset (output_bfd, info, input_section,
2769				      +rel->r_offset) != (bfd_vma) - 1)
2770	{
2771	  (*_bfd_error_handler)
2772	    /* xgettext:c-format */
2773	    (_("%pB(%pA+%#" PRIx64 "): "
2774	       "unresolvable %s relocation against symbol `%s'"),
2775	     input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
2776	     h->root.root.string);
2777	  return false;
2778	}
2779
2780      if (r != bfd_reloc_ok && r != bfd_reloc_continue)
2781	{
2782	  switch (r)
2783	    {
2784	    case bfd_reloc_overflow:
2785	      (*info->callbacks->reloc_overflow)
2786		(info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
2787		 input_bfd, input_section, rel->r_offset);
2788
2789	      /* Original aarch64 code had a check for alignement correctness */
2790	      break;
2791
2792	    case bfd_reloc_undefined:
2793	      (*info->callbacks->undefined_symbol)
2794		(info, name, input_bfd, input_section, rel->r_offset, true);
2795	      break;
2796
2797	    case bfd_reloc_outofrange:
2798	      error_message = _("out of range");
2799	      goto common_error;
2800
2801	    case bfd_reloc_notsupported:
2802	      error_message = _("unsupported relocation");
2803	      goto common_error;
2804
2805	    case bfd_reloc_dangerous:
2806	      /* error_message should already be set.  */
2807	      goto common_error;
2808
2809	    default:
2810	      error_message = _("unknown error");
2811	      /* Fall through.  */
2812
2813	    common_error:
2814	      BFD_ASSERT (error_message != NULL);
2815	      (*info->callbacks->reloc_dangerous)
2816		(info, error_message, input_bfd, input_section, rel->r_offset);
2817	      break;
2818	    }
2819	}
2820
2821      if (!save_addend)
2822	addend = 0;
2823    }
2824
2825  return true;
2826}
2827
2828/* Set the right machine number.  */
2829
2830static bool
2831elfNN_kvx_object_p (bfd *abfd)
2832{
2833  /* must be coherent with default arch in cpu-kvx.c */
2834  int e_set = bfd_mach_kv3_1;
2835
2836  if (elf_elfheader (abfd)->e_machine == EM_KVX)
2837    {
2838      int e_core = elf_elfheader (abfd)->e_flags & ELF_KVX_CORE_MASK;
2839      switch(e_core)
2840	{
2841#if ARCH_SIZE == 64
2842	case ELF_KVX_CORE_KV3_1 : e_set = bfd_mach_kv3_1_64; break;
2843	case ELF_KVX_CORE_KV3_2 : e_set = bfd_mach_kv3_2_64; break;
2844	case ELF_KVX_CORE_KV4_1 : e_set = bfd_mach_kv4_1_64; break;
2845#else
2846	case ELF_KVX_CORE_KV3_1 : e_set = bfd_mach_kv3_1; break;
2847	case ELF_KVX_CORE_KV3_2 : e_set = bfd_mach_kv3_2; break;
2848	case ELF_KVX_CORE_KV4_1 : e_set = bfd_mach_kv4_1; break;
2849#endif
2850	default:
2851	  (*_bfd_error_handler)(_("%s: Bad ELF id: `%d'"),
2852				abfd->filename, e_core);
2853	}
2854    }
2855  return bfd_default_set_arch_mach (abfd, bfd_arch_kvx, e_set);
2856}
2857
2858/* Function to keep KVX specific flags in the ELF header.  */
2859
2860static bool
2861elfNN_kvx_set_private_flags (bfd *abfd, flagword flags)
2862{
2863  if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
2864    {
2865    }
2866  else
2867    {
2868      elf_elfheader (abfd)->e_flags = flags;
2869      elf_flags_init (abfd) = true;
2870    }
2871
2872  return true;
2873}
2874
2875/* Merge backend specific data from an object file to the output
2876   object file when linking.  */
2877
2878static bool
2879elfNN_kvx_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
2880{
2881  bfd *obfd = info->output_bfd;
2882  flagword out_flags;
2883  flagword in_flags;
2884  bool flags_compatible = true;
2885  asection *sec;
2886
2887  /* Check if we have the same endianess.  */
2888  if (!_bfd_generic_verify_endian_match (ibfd, info))
2889    return false;
2890
2891  if (!is_kvx_elf (ibfd) || !is_kvx_elf (obfd))
2892    return true;
2893
2894  /* The input BFD must have had its flags initialised.  */
2895  /* The following seems bogus to me -- The flags are initialized in
2896     the assembler but I don't think an elf_flags_init field is
2897     written into the object.  */
2898  /* BFD_ASSERT (elf_flags_init (ibfd)); */
2899
2900  if (bfd_get_arch_size (ibfd) != bfd_get_arch_size (obfd))
2901    {
2902      const char *msg;
2903
2904      if (bfd_get_arch_size (ibfd) == 32
2905	  && bfd_get_arch_size (obfd) == 64)
2906	msg = _("%s: compiled as 32-bit object and %s is 64-bit");
2907      else if (bfd_get_arch_size (ibfd) == 64
2908	       && bfd_get_arch_size (obfd) == 32)
2909	msg = _("%s: compiled as 64-bit object and %s is 32-bit");
2910      else
2911	msg = _("%s: object size does not match that of target %s");
2912
2913      (*_bfd_error_handler) (msg, bfd_get_filename (ibfd),
2914			     bfd_get_filename (obfd));
2915      bfd_set_error (bfd_error_wrong_format);
2916      return false;
2917    }
2918
2919  in_flags = elf_elfheader (ibfd)->e_flags;
2920  out_flags = elf_elfheader (obfd)->e_flags;
2921
2922  if (!elf_flags_init (obfd))
2923    {
2924      /* If the input is the default architecture and had the default
2925	 flags then do not bother setting the flags for the output
2926	 architecture, instead allow future merges to do this.  If no
2927	 future merges ever set these flags then they will retain their
2928	 uninitialised values, which surprise surprise, correspond
2929	 to the default values.  */
2930      if (bfd_get_arch_info (ibfd)->the_default
2931	  && elf_elfheader (ibfd)->e_flags == 0)
2932	return true;
2933
2934      elf_flags_init (obfd) = true;
2935      elf_elfheader (obfd)->e_flags = in_flags;
2936
2937      if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
2938	  && bfd_get_arch_info (obfd)->the_default)
2939	return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
2940				  bfd_get_mach (ibfd));
2941
2942      return true;
2943    }
2944
2945  /* Identical flags must be compatible.  */
2946  if (in_flags == out_flags)
2947    return true;
2948
2949  /* Check to see if the input BFD actually contains any sections.  If
2950     not, its flags may not have been initialised either, but it
2951     cannot actually cause any incompatiblity.  Do not short-circuit
2952     dynamic objects; their section list may be emptied by
2953     elf_link_add_object_symbols.
2954
2955     Also check to see if there are no code sections in the input.
2956     In this case there is no need to check for code specific flags.
2957     XXX - do we need to worry about floating-point format compatability
2958     in data sections ?  */
2959  if (!(ibfd->flags & DYNAMIC))
2960    {
2961      bool null_input_bfd = true;
2962      bool only_data_sections = true;
2963
2964      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2965	{
2966	  if ((bfd_section_flags (sec)
2967	       & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
2968	      == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
2969	    only_data_sections = false;
2970
2971	  null_input_bfd = false;
2972	  break;
2973	}
2974
2975      if (null_input_bfd || only_data_sections)
2976	return true;
2977    }
2978  return flags_compatible;
2979}
2980
2981/* Display the flags field.  */
2982
2983static bool
2984elfNN_kvx_print_private_bfd_data (bfd *abfd, void *ptr)
2985{
2986  FILE *file = (FILE *) ptr;
2987  unsigned long flags;
2988
2989  BFD_ASSERT (abfd != NULL && ptr != NULL);
2990
2991  /* Print normal ELF private data.  */
2992  _bfd_elf_print_private_bfd_data (abfd, ptr);
2993
2994  flags = elf_elfheader (abfd)->e_flags;
2995  /* Ignore init flag - it may not be set, despite the flags field
2996     containing valid data.  */
2997
2998  /* xgettext:c-format */
2999  fprintf (file, _("Private flags = 0x%lx : "), elf_elfheader (abfd)->e_flags);
3000  if((flags & ELF_KVX_ABI_64B_ADDR_BIT) == ELF_KVX_ABI_64B_ADDR_BIT)
3001    {
3002      if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_1))
3003	fprintf (file, _("Coolidge (kv3) V1 64 bits"));
3004      else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_2))
3005	fprintf (file, _("Coolidge (kv3) V2 64 bits"));
3006      else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV4_1))
3007	fprintf (file, _("Coolidge (kv4) V1 64 bits"));
3008    }
3009  else
3010    {
3011      if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_1))
3012	fprintf (file, _("Coolidge (kv3) V1 32 bits"));
3013      else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_2))
3014	fprintf (file, _("Coolidge (kv3) V2 32 bits"));
3015      else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV4_1))
3016	fprintf (file, _("Coolidge (kv4) V1 32 bits"));
3017    }
3018
3019  fputc ('\n', file);
3020
3021  return true;
3022}
3023
3024/* Adjust a symbol defined by a dynamic object and referenced by a
3025   regular object.  The current definition is in some section of the
3026   dynamic object, but we're not including those sections.  We have to
3027   change the definition to something the rest of the link can
3028   understand.	*/
3029
3030static bool
3031elfNN_kvx_adjust_dynamic_symbol (struct bfd_link_info *info,
3032				 struct elf_link_hash_entry *h)
3033{
3034  struct elf_kvx_link_hash_table *htab;
3035  asection *s;
3036
3037  /* If this is a function, put it in the procedure linkage table.  We
3038     will fill in the contents of the procedure linkage table later,
3039     when we know the address of the .got section.  */
3040  if (h->type == STT_FUNC || h->needs_plt)
3041    {
3042      if (h->plt.refcount <= 0
3043	  || ((SYMBOL_CALLS_LOCAL (info, h)
3044	       || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3045		   && h->root.type == bfd_link_hash_undefweak))))
3046	{
3047	  /* This case can occur if we saw a CALL26 reloc in
3048	     an input file, but the symbol wasn't referred to
3049	     by a dynamic object or all references were
3050	     garbage collected. In which case we can end up
3051	     resolving.  */
3052	  h->plt.offset = (bfd_vma) - 1;
3053	  h->needs_plt = 0;
3054	}
3055
3056      return true;
3057    }
3058  else
3059    /* Otherwise, reset to -1.  */
3060    h->plt.offset = (bfd_vma) - 1;
3061
3062
3063  /* If this is a weak symbol, and there is a real definition, the
3064     processor independent code will have arranged for us to see the
3065     real definition first, and we can just use the same value.  */
3066  if (h->is_weakalias)
3067    {
3068      struct elf_link_hash_entry *def = weakdef (h);
3069      BFD_ASSERT (def->root.type == bfd_link_hash_defined);
3070      h->root.u.def.section = def->root.u.def.section;
3071      h->root.u.def.value = def->root.u.def.value;
3072      if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
3073	h->non_got_ref = def->non_got_ref;
3074      return true;
3075    }
3076
3077  /* If we are creating a shared library, we must presume that the
3078     only references to the symbol are via the global offset table.
3079     For such cases we need not do anything here; the relocations will
3080     be handled correctly by relocate_section.  */
3081  if (bfd_link_pic (info))
3082    return true;
3083
3084  /* If there are no references to this symbol that do not use the
3085     GOT, we don't need to generate a copy reloc.  */
3086  if (!h->non_got_ref)
3087    return true;
3088
3089  /* If -z nocopyreloc was given, we won't generate them either.  */
3090  if (info->nocopyreloc)
3091    {
3092      h->non_got_ref = 0;
3093      return true;
3094    }
3095
3096  /* We must allocate the symbol in our .dynbss section, which will
3097     become part of the .bss section of the executable.  There will be
3098     an entry for this symbol in the .dynsym section.  The dynamic
3099     object will contain position independent code, so all references
3100     from the dynamic object to this symbol will go through the global
3101     offset table.  The dynamic linker will use the .dynsym entry to
3102     determine the address it must put in the global offset table, so
3103     both the dynamic object and the regular object will refer to the
3104     same memory location for the variable.  */
3105
3106  htab = elf_kvx_hash_table (info);
3107
3108  /* We must generate a R_KVX_COPY reloc to tell the dynamic linker
3109     to copy the initial value out of the dynamic object and into the
3110     runtime process image.  */
3111  if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
3112    {
3113      htab->srelbss->size += RELOC_SIZE (htab);
3114      h->needs_copy = 1;
3115    }
3116
3117  s = htab->sdynbss;
3118
3119  return _bfd_elf_adjust_dynamic_copy (info, h, s);
3120}
3121
3122static bool
3123elfNN_kvx_allocate_local_symbols (bfd *abfd, unsigned number)
3124{
3125  struct elf_kvx_local_symbol *locals;
3126  locals = elf_kvx_locals (abfd);
3127  if (locals == NULL)
3128    {
3129      locals = (struct elf_kvx_local_symbol *)
3130	bfd_zalloc (abfd, number * sizeof (struct elf_kvx_local_symbol));
3131      if (locals == NULL)
3132	return false;
3133      elf_kvx_locals (abfd) = locals;
3134    }
3135  return true;
3136}
3137
3138/* Create the .got section to hold the global offset table.  */
3139
3140static bool
3141kvx_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
3142{
3143  const struct elf_backend_data *bed = get_elf_backend_data (abfd);
3144  flagword flags;
3145  asection *s;
3146  struct elf_link_hash_entry *h;
3147  struct elf_link_hash_table *htab = elf_hash_table (info);
3148
3149  /* This function may be called more than once.  */
3150  s = bfd_get_linker_section (abfd, ".got");
3151  if (s != NULL)
3152    return true;
3153
3154  flags = bed->dynamic_sec_flags;
3155
3156  s = bfd_make_section_anyway_with_flags (abfd,
3157					  (bed->rela_plts_and_copies_p
3158					   ? ".rela.got" : ".rel.got"),
3159					  (bed->dynamic_sec_flags
3160					   | SEC_READONLY));
3161  if (s == NULL
3162      || !bfd_set_section_alignment (s, bed->s->log_file_align))
3163
3164    return false;
3165  htab->srelgot = s;
3166
3167  s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
3168  if (s == NULL
3169      || !bfd_set_section_alignment (s, bed->s->log_file_align))
3170    return false;
3171  htab->sgot = s;
3172  htab->sgot->size += GOT_ENTRY_SIZE;
3173
3174  if (bed->want_got_sym)
3175    {
3176      /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
3177	 (or .got.plt) section.  We don't do this in the linker script
3178	 because we don't want to define the symbol if we are not creating
3179	 a global offset table.  */
3180      h = _bfd_elf_define_linkage_sym (abfd, info, s,
3181				       "_GLOBAL_OFFSET_TABLE_");
3182      elf_hash_table (info)->hgot = h;
3183      if (h == NULL)
3184	return false;
3185    }
3186
3187  if (bed->want_got_plt)
3188    {
3189      s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
3190      if (s == NULL
3191	  || !bfd_set_section_alignment (s,
3192					 bed->s->log_file_align))
3193	return false;
3194      htab->sgotplt = s;
3195    }
3196
3197  /* The first bit of the global offset table is the header.  */
3198  s->size += bed->got_header_size;
3199
3200  /* we still need to handle got content when doing static link with PIC */
3201  if (bfd_link_executable (info) && !bfd_link_pic (info)) {
3202    htab->dynobj = abfd;
3203  }
3204
3205  return true;
3206}
3207
3208/* Look through the relocs for a section during the first phase.  */
3209
3210static bool
3211elfNN_kvx_check_relocs (bfd *abfd, struct bfd_link_info *info,
3212			    asection *sec, const Elf_Internal_Rela *relocs)
3213{
3214  Elf_Internal_Shdr *symtab_hdr;
3215  struct elf_link_hash_entry **sym_hashes;
3216  const Elf_Internal_Rela *rel;
3217  const Elf_Internal_Rela *rel_end;
3218  asection *sreloc;
3219
3220  struct elf_kvx_link_hash_table *htab;
3221
3222  if (bfd_link_relocatable (info))
3223    return true;
3224
3225  BFD_ASSERT (is_kvx_elf (abfd));
3226
3227  htab = elf_kvx_hash_table (info);
3228  sreloc = NULL;
3229
3230  symtab_hdr = &elf_symtab_hdr (abfd);
3231  sym_hashes = elf_sym_hashes (abfd);
3232
3233  rel_end = relocs + sec->reloc_count;
3234  for (rel = relocs; rel < rel_end; rel++)
3235    {
3236      struct elf_link_hash_entry *h;
3237      unsigned int r_symndx;
3238      unsigned int r_type;
3239      bfd_reloc_code_real_type bfd_r_type;
3240      Elf_Internal_Sym *isym;
3241
3242      r_symndx = ELFNN_R_SYM (rel->r_info);
3243      r_type = ELFNN_R_TYPE (rel->r_info);
3244
3245      if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
3246	{
3247	  /* xgettext:c-format */
3248	  _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd, r_symndx);
3249	  return false;
3250	}
3251
3252      if (r_symndx < symtab_hdr->sh_info)
3253	{
3254	  /* A local symbol.  */
3255	  isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3256					abfd, r_symndx);
3257	  if (isym == NULL)
3258	    return false;
3259
3260	  h = NULL;
3261	}
3262      else
3263	{
3264	  h = sym_hashes[r_symndx - symtab_hdr->sh_info];
3265	  while (h->root.type == bfd_link_hash_indirect
3266		 || h->root.type == bfd_link_hash_warning)
3267	    h = (struct elf_link_hash_entry *) h->root.u.i.link;
3268	}
3269
3270      /* Could be done earlier, if h were already available.  */
3271      bfd_r_type = kvx_tls_transition (abfd, info, r_type, h, r_symndx);
3272
3273      if (h != NULL)
3274	{
3275	  /* Create the ifunc sections for static executables.  If we
3276	     never see an indirect function symbol nor we are building
3277	     a static executable, those sections will be empty and
3278	     won't appear in output.  */
3279	  switch (bfd_r_type)
3280	    {
3281	    default:
3282	      break;
3283	    }
3284
3285	  /* It is referenced by a non-shared object. */
3286	  h->ref_regular = 1;
3287	}
3288
3289      switch (bfd_r_type)
3290	{
3291
3292	case BFD_RELOC_KVX_S43_LO10:
3293	case BFD_RELOC_KVX_S43_UP27:
3294	case BFD_RELOC_KVX_S43_EX6:
3295
3296	case BFD_RELOC_KVX_S37_LO10:
3297	case BFD_RELOC_KVX_S37_UP27:
3298
3299	case BFD_RELOC_KVX_S64_LO10:
3300	case BFD_RELOC_KVX_S64_UP27:
3301	case BFD_RELOC_KVX_S64_EX27:
3302
3303	case BFD_RELOC_KVX_32:
3304	case BFD_RELOC_KVX_64:
3305
3306	  /* We don't need to handle relocs into sections not going into
3307	     the "real" output.  */
3308	  if ((sec->flags & SEC_ALLOC) == 0)
3309	    break;
3310
3311	  if (h != NULL)
3312	    {
3313	      if (!bfd_link_pic (info))
3314		h->non_got_ref = 1;
3315
3316	      h->plt.refcount += 1;
3317	      h->pointer_equality_needed = 1;
3318	    }
3319
3320	  /* No need to do anything if we're not creating a shared
3321	     object.  */
3322	  if (! bfd_link_pic (info))
3323	    break;
3324
3325	  {
3326	    struct elf_dyn_relocs *p;
3327	    struct elf_dyn_relocs **head;
3328
3329	    /* We must copy these reloc types into the output file.
3330	       Create a reloc section in dynobj and make room for
3331	       this reloc.  */
3332	    if (sreloc == NULL)
3333	      {
3334		if (htab->root.dynobj == NULL)
3335		  htab->root.dynobj = abfd;
3336
3337		sreloc = _bfd_elf_make_dynamic_reloc_section
3338		  (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ true);
3339
3340		if (sreloc == NULL)
3341		  return false;
3342	      }
3343
3344	    /* If this is a global symbol, we count the number of
3345	       relocations we need for this symbol.  */
3346	    if (h != NULL)
3347	      {
3348		head = &h->dyn_relocs;
3349	      }
3350	    else
3351	      {
3352		/* Track dynamic relocs needed for local syms too.
3353		   We really need local syms available to do this
3354		   easily.  Oh well.  */
3355
3356		asection *s;
3357		void **vpp;
3358
3359		isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3360					      abfd, r_symndx);
3361		if (isym == NULL)
3362		  return false;
3363
3364		s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3365		if (s == NULL)
3366		  s = sec;
3367
3368		/* Beware of type punned pointers vs strict aliasing
3369		   rules.  */
3370		vpp = &(elf_section_data (s)->local_dynrel);
3371		head = (struct elf_dyn_relocs **) vpp;
3372	      }
3373
3374	    p = *head;
3375	    if (p == NULL || p->sec != sec)
3376	      {
3377		bfd_size_type amt = sizeof *p;
3378		p = ((struct elf_dyn_relocs *)
3379		     bfd_zalloc (htab->root.dynobj, amt));
3380		if (p == NULL)
3381		  return false;
3382		p->next = *head;
3383		*head = p;
3384		p->sec = sec;
3385	      }
3386
3387	    p->count += 1;
3388
3389	  }
3390	  break;
3391
3392	case BFD_RELOC_KVX_S37_GOT_LO10:
3393	case BFD_RELOC_KVX_S37_GOT_UP27:
3394
3395	case BFD_RELOC_KVX_S37_GOTOFF_LO10:
3396	case BFD_RELOC_KVX_S37_GOTOFF_UP27:
3397
3398	case BFD_RELOC_KVX_S43_GOT_LO10:
3399	case BFD_RELOC_KVX_S43_GOT_UP27:
3400	case BFD_RELOC_KVX_S43_GOT_EX6:
3401
3402	case BFD_RELOC_KVX_S43_GOTOFF_LO10:
3403	case BFD_RELOC_KVX_S43_GOTOFF_UP27:
3404	case BFD_RELOC_KVX_S43_GOTOFF_EX6:
3405
3406	case BFD_RELOC_KVX_S37_TLS_GD_LO10:
3407	case BFD_RELOC_KVX_S37_TLS_GD_UP27:
3408
3409	case BFD_RELOC_KVX_S43_TLS_GD_LO10:
3410	case BFD_RELOC_KVX_S43_TLS_GD_UP27:
3411	case BFD_RELOC_KVX_S43_TLS_GD_EX6:
3412
3413	case BFD_RELOC_KVX_S37_TLS_IE_LO10:
3414	case BFD_RELOC_KVX_S37_TLS_IE_UP27:
3415
3416	case BFD_RELOC_KVX_S43_TLS_IE_LO10:
3417	case BFD_RELOC_KVX_S43_TLS_IE_UP27:
3418	case BFD_RELOC_KVX_S43_TLS_IE_EX6:
3419
3420	case BFD_RELOC_KVX_S37_TLS_LD_LO10:
3421	case BFD_RELOC_KVX_S37_TLS_LD_UP27:
3422
3423	case BFD_RELOC_KVX_S43_TLS_LD_LO10:
3424	case BFD_RELOC_KVX_S43_TLS_LD_UP27:
3425	case BFD_RELOC_KVX_S43_TLS_LD_EX6:
3426	  {
3427	    unsigned got_type;
3428	    unsigned old_got_type;
3429
3430	    got_type = kvx_reloc_got_type (bfd_r_type);
3431
3432	    if (h)
3433	      {
3434		h->got.refcount += 1;
3435		old_got_type = elf_kvx_hash_entry (h)->got_type;
3436	      }
3437	    else
3438	      {
3439		struct elf_kvx_local_symbol *locals;
3440
3441		if (!elfNN_kvx_allocate_local_symbols
3442		    (abfd, symtab_hdr->sh_info))
3443		  return false;
3444
3445		locals = elf_kvx_locals (abfd);
3446		BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
3447		locals[r_symndx].got_refcount += 1;
3448		old_got_type = locals[r_symndx].got_type;
3449	      }
3450
3451	    /* We will already have issued an error message if there
3452	       is a TLS/non-TLS mismatch, based on the symbol type.
3453	       So just combine any TLS types needed.  */
3454	    if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
3455		&& got_type != GOT_NORMAL)
3456	      got_type |= old_got_type;
3457
3458	    /* If the symbol is accessed by both IE and GD methods, we
3459	       are able to relax.  Turn off the GD flag, without
3460	       messing up with any other kind of TLS types that may be
3461	       involved.  */
3462	    /* Disabled untested and unused TLS */
3463	    /* if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type)) */
3464	    /*   got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD); */
3465
3466	    if (old_got_type != got_type)
3467	      {
3468		if (h != NULL)
3469		  elf_kvx_hash_entry (h)->got_type = got_type;
3470		else
3471		  {
3472		    struct elf_kvx_local_symbol *locals;
3473		    locals = elf_kvx_locals (abfd);
3474		    BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
3475		    locals[r_symndx].got_type = got_type;
3476		  }
3477	      }
3478
3479	    if (htab->root.dynobj == NULL)
3480	      htab->root.dynobj = abfd;
3481	    if (! kvx_elf_create_got_section (htab->root.dynobj, info))
3482	      return false;
3483	    break;
3484	  }
3485
3486	case BFD_RELOC_KVX_S64_GOTADDR_LO10:
3487	case BFD_RELOC_KVX_S64_GOTADDR_UP27:
3488	case BFD_RELOC_KVX_S64_GOTADDR_EX27:
3489
3490	case BFD_RELOC_KVX_S43_GOTADDR_LO10:
3491	case BFD_RELOC_KVX_S43_GOTADDR_UP27:
3492	case BFD_RELOC_KVX_S43_GOTADDR_EX6:
3493
3494	case BFD_RELOC_KVX_S37_GOTADDR_LO10:
3495	case BFD_RELOC_KVX_S37_GOTADDR_UP27:
3496
3497	  if (htab->root.dynobj == NULL)
3498	    htab->root.dynobj = abfd;
3499	  if (! kvx_elf_create_got_section (htab->root.dynobj, info))
3500	    return false;
3501	  break;
3502
3503	case BFD_RELOC_KVX_PCREL27:
3504	case BFD_RELOC_KVX_PCREL17:
3505	  /* If this is a local symbol then we resolve it
3506	     directly without creating a PLT entry.  */
3507	  if (h == NULL)
3508	    continue;
3509
3510	  h->needs_plt = 1;
3511	  if (h->plt.refcount <= 0)
3512	    h->plt.refcount = 1;
3513	  else
3514	    h->plt.refcount += 1;
3515	  break;
3516
3517	default:
3518	  break;
3519	}
3520    }
3521
3522  return true;
3523}
3524
3525static bool
3526elfNN_kvx_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
3527{
3528  Elf_Internal_Ehdr *i_ehdrp;	/* ELF file header, internal form.  */
3529
3530  if (!_bfd_elf_init_file_header (abfd, link_info))
3531    return false;
3532
3533  i_ehdrp = elf_elfheader (abfd);
3534  i_ehdrp->e_ident[EI_ABIVERSION] = KVX_ELF_ABI_VERSION;
3535  return true;
3536}
3537
3538static enum elf_reloc_type_class
3539elfNN_kvx_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
3540				const asection *rel_sec ATTRIBUTE_UNUSED,
3541				const Elf_Internal_Rela *rela)
3542{
3543  switch ((int) ELFNN_R_TYPE (rela->r_info))
3544    {
3545    case R_KVX_RELATIVE:
3546      return reloc_class_relative;
3547    case R_KVX_JMP_SLOT:
3548      return reloc_class_plt;
3549    case R_KVX_COPY:
3550      return reloc_class_copy;
3551    default:
3552      return reloc_class_normal;
3553    }
3554}
3555
3556/* A structure used to record a list of sections, independently
3557   of the next and prev fields in the asection structure.  */
3558typedef struct section_list
3559{
3560  asection *sec;
3561  struct section_list *next;
3562  struct section_list *prev;
3563}
3564section_list;
3565
3566typedef struct
3567{
3568  void *finfo;
3569  struct bfd_link_info *info;
3570  asection *sec;
3571  int sec_shndx;
3572  int (*func) (void *, const char *, Elf_Internal_Sym *,
3573	       asection *, struct elf_link_hash_entry *);
3574} output_arch_syminfo;
3575
3576/* Output a single local symbol for a generated stub.  */
3577
3578static bool
3579elfNN_kvx_output_stub_sym (output_arch_syminfo *osi, const char *name,
3580			       bfd_vma offset, bfd_vma size)
3581{
3582  Elf_Internal_Sym sym;
3583
3584  sym.st_value = (osi->sec->output_section->vma
3585		  + osi->sec->output_offset + offset);
3586  sym.st_size = size;
3587  sym.st_other = 0;
3588  sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
3589  sym.st_shndx = osi->sec_shndx;
3590  return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
3591}
3592
3593static bool
3594kvx_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
3595{
3596  struct elf_kvx_stub_hash_entry *stub_entry;
3597  asection *stub_sec;
3598  bfd_vma addr;
3599  char *stub_name;
3600  output_arch_syminfo *osi;
3601
3602  /* Massage our args to the form they really have.  */
3603  stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
3604  osi = (output_arch_syminfo *) in_arg;
3605
3606  stub_sec = stub_entry->stub_sec;
3607
3608  /* Ensure this stub is attached to the current section being
3609     processed.  */
3610  if (stub_sec != osi->sec)
3611    return true;
3612
3613  addr = (bfd_vma) stub_entry->stub_offset;
3614
3615  stub_name = stub_entry->output_name;
3616
3617  switch (stub_entry->stub_type)
3618    {
3619    case kvx_stub_long_branch:
3620      if (!elfNN_kvx_output_stub_sym
3621	  (osi, stub_name, addr, sizeof (elfNN_kvx_long_branch_stub)))
3622	return false;
3623      break;
3624
3625    default:
3626      abort ();
3627    }
3628
3629  return true;
3630}
3631
3632/* Output mapping symbols for linker generated sections.  */
3633
3634static bool
3635elfNN_kvx_output_arch_local_syms (bfd *output_bfd,
3636				  struct bfd_link_info *info,
3637				  void *finfo,
3638				  int (*func) (void *, const char *,
3639					       Elf_Internal_Sym *,
3640					       asection *,
3641					       struct elf_link_hash_entry *))
3642{
3643  output_arch_syminfo osi;
3644  struct elf_kvx_link_hash_table *htab;
3645
3646  htab = elf_kvx_hash_table (info);
3647
3648  osi.finfo = finfo;
3649  osi.info = info;
3650  osi.func = func;
3651
3652  /* Long calls stubs.  */
3653  if (htab->stub_bfd && htab->stub_bfd->sections)
3654    {
3655      asection *stub_sec;
3656
3657      for (stub_sec = htab->stub_bfd->sections;
3658	   stub_sec != NULL; stub_sec = stub_sec->next)
3659	{
3660	  /* Ignore non-stub sections.  */
3661	  if (!strstr (stub_sec->name, STUB_SUFFIX))
3662	    continue;
3663
3664	  osi.sec = stub_sec;
3665
3666	  osi.sec_shndx = _bfd_elf_section_from_bfd_section
3667	    (output_bfd, osi.sec->output_section);
3668
3669	  bfd_hash_traverse (&htab->stub_hash_table, kvx_map_one_stub,
3670			     &osi);
3671	}
3672    }
3673
3674  /* Finally, output mapping symbols for the PLT.  */
3675  if (!htab->root.splt || htab->root.splt->size == 0)
3676    return true;
3677
3678  osi.sec_shndx = _bfd_elf_section_from_bfd_section
3679    (output_bfd, htab->root.splt->output_section);
3680  osi.sec = htab->root.splt;
3681
3682  return true;
3683
3684}
3685
3686/* Allocate target specific section data.  */
3687
3688static bool
3689elfNN_kvx_new_section_hook (bfd *abfd, asection *sec)
3690{
3691  if (!sec->used_by_bfd)
3692    {
3693      _kvx_elf_section_data *sdata;
3694      bfd_size_type amt = sizeof (*sdata);
3695
3696      sdata = bfd_zalloc (abfd, amt);
3697      if (sdata == NULL)
3698	return false;
3699      sec->used_by_bfd = sdata;
3700    }
3701
3702  return _bfd_elf_new_section_hook (abfd, sec);
3703}
3704
3705/* Create dynamic sections. This is different from the ARM backend in that
3706   the got, plt, gotplt and their relocation sections are all created in the
3707   standard part of the bfd elf backend.  */
3708
3709static bool
3710elfNN_kvx_create_dynamic_sections (bfd *dynobj,
3711				   struct bfd_link_info *info)
3712{
3713  struct elf_kvx_link_hash_table *htab;
3714
3715  /* We need to create .got section.  */
3716  if (!kvx_elf_create_got_section (dynobj, info))
3717    return false;
3718
3719  if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3720    return false;
3721
3722  htab = elf_kvx_hash_table (info);
3723  htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3724  if (!bfd_link_pic (info))
3725    htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
3726
3727  if (!htab->sdynbss || (!bfd_link_pic (info) && !htab->srelbss))
3728    abort ();
3729
3730  return true;
3731}
3732
3733
3734/* Allocate space in .plt, .got and associated reloc sections for
3735   dynamic relocs.  */
3736
3737static bool
3738elfNN_kvx_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
3739{
3740  struct bfd_link_info *info;
3741  struct elf_kvx_link_hash_table *htab;
3742  struct elf_dyn_relocs *p;
3743
3744  /* An example of a bfd_link_hash_indirect symbol is versioned
3745     symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
3746     -> __gxx_personality_v0(bfd_link_hash_defined)
3747
3748     There is no need to process bfd_link_hash_indirect symbols here
3749     because we will also be presented with the concrete instance of
3750     the symbol and elfNN_kvx_copy_indirect_symbol () will have been
3751     called to copy all relevant data from the generic to the concrete
3752     symbol instance.  */
3753  if (h->root.type == bfd_link_hash_indirect)
3754    return true;
3755
3756  if (h->root.type == bfd_link_hash_warning)
3757    h = (struct elf_link_hash_entry *) h->root.u.i.link;
3758
3759  info = (struct bfd_link_info *) inf;
3760  htab = elf_kvx_hash_table (info);
3761
3762  if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
3763    {
3764      /* Make sure this symbol is output as a dynamic symbol.
3765	 Undefined weak syms won't yet be marked as dynamic.  */
3766      if (h->dynindx == -1 && !h->forced_local)
3767	{
3768	  if (!bfd_elf_link_record_dynamic_symbol (info, h))
3769	    return false;
3770	}
3771
3772      if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
3773	{
3774	  asection *s = htab->root.splt;
3775
3776	  /* If this is the first .plt entry, make room for the special
3777	     first entry.  */
3778	  if (s->size == 0)
3779	    s->size += htab->plt_header_size;
3780
3781	  h->plt.offset = s->size;
3782
3783	  /* If this symbol is not defined in a regular file, and we are
3784	     not generating a shared library, then set the symbol to this
3785	     location in the .plt.  This is required to make function
3786	     pointers compare as equal between the normal executable and
3787	     the shared library.  */
3788	  if (!bfd_link_pic (info) && !h->def_regular)
3789	    {
3790	      h->root.u.def.section = s;
3791	      h->root.u.def.value = h->plt.offset;
3792	    }
3793
3794	  /* Make room for this entry. For now we only create the
3795	     small model PLT entries. We later need to find a way
3796	     of relaxing into these from the large model PLT entries.  */
3797	  s->size += PLT_SMALL_ENTRY_SIZE;
3798
3799	  /* We also need to make an entry in the .got.plt section, which
3800	     will be placed in the .got section by the linker script.  */
3801	  htab->root.sgotplt->size += GOT_ENTRY_SIZE;
3802
3803	  /* We also need to make an entry in the .rela.plt section.  */
3804	  htab->root.srelplt->size += RELOC_SIZE (htab);
3805
3806	  /* We need to ensure that all GOT entries that serve the PLT
3807	     are consecutive with the special GOT slots [0] [1] and
3808	     [2]. Any addtional relocations must be placed after the
3809	     PLT related entries.  We abuse the reloc_count such that
3810	     during sizing we adjust reloc_count to indicate the
3811	     number of PLT related reserved entries.  In subsequent
3812	     phases when filling in the contents of the reloc entries,
3813	     PLT related entries are placed by computing their PLT
3814	     index (0 .. reloc_count). While other none PLT relocs are
3815	     placed at the slot indicated by reloc_count and
3816	     reloc_count is updated.  */
3817
3818	  htab->root.srelplt->reloc_count++;
3819	}
3820      else
3821	{
3822	  h->plt.offset = (bfd_vma) - 1;
3823	  h->needs_plt = 0;
3824	}
3825    }
3826  else
3827    {
3828      h->plt.offset = (bfd_vma) - 1;
3829      h->needs_plt = 0;
3830    }
3831
3832  if (h->got.refcount > 0)
3833    {
3834      bool dyn;
3835      unsigned got_type = elf_kvx_hash_entry (h)->got_type;
3836
3837      h->got.offset = (bfd_vma) - 1;
3838
3839      dyn = htab->root.dynamic_sections_created;
3840
3841      /* Make sure this symbol is output as a dynamic symbol.
3842	 Undefined weak syms won't yet be marked as dynamic.  */
3843      if (dyn && h->dynindx == -1 && !h->forced_local)
3844	{
3845	  if (!bfd_elf_link_record_dynamic_symbol (info, h))
3846	    return false;
3847	}
3848
3849      if (got_type == GOT_UNKNOWN)
3850	{
3851	  (*_bfd_error_handler)
3852	    (_("relocation against `%s' has faulty GOT type "),
3853	     (h) ? h->root.root.string : "a local symbol");
3854	  bfd_set_error (bfd_error_bad_value);
3855	  return false;
3856	}
3857      else if (got_type == GOT_NORMAL)
3858	{
3859	  h->got.offset = htab->root.sgot->size;
3860	  htab->root.sgot->size += GOT_ENTRY_SIZE;
3861	  if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3862	       || h->root.type != bfd_link_hash_undefweak)
3863	      && (bfd_link_pic (info)
3864		  || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3865	    {
3866	      htab->root.srelgot->size += RELOC_SIZE (htab);
3867	    }
3868	}
3869      else
3870	{
3871	  int indx;
3872
3873	  /* Any of these will require 2 GOT slots because
3874	   * they use __tls_get_addr() */
3875	  if (got_type & (GOT_TLS_GD | GOT_TLS_LD))
3876	    {
3877	      h->got.offset = htab->root.sgot->size;
3878	      htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
3879	    }
3880
3881	  if (got_type & GOT_TLS_IE)
3882	    {
3883	      h->got.offset = htab->root.sgot->size;
3884	      htab->root.sgot->size += GOT_ENTRY_SIZE;
3885	    }
3886
3887	  indx = h && h->dynindx != -1 ? h->dynindx : 0;
3888	  if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3889	       || h->root.type != bfd_link_hash_undefweak)
3890	      && (bfd_link_pic (info)
3891		  || indx != 0
3892		  || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3893	    {
3894	      /* Only the GD case requires 2 relocations. */
3895	      if (got_type & GOT_TLS_GD)
3896		htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
3897
3898	      /* LD needs a DTPMOD reloc, IE needs a DTPOFF. */
3899	      if (got_type & (GOT_TLS_LD | GOT_TLS_IE))
3900		htab->root.srelgot->size += RELOC_SIZE (htab);
3901	    }
3902	}
3903    }
3904  else
3905    {
3906      h->got.offset = (bfd_vma) - 1;
3907    }
3908
3909  if (h->dyn_relocs == NULL)
3910    return true;
3911
3912  /* In the shared -Bsymbolic case, discard space allocated for
3913     dynamic pc-relative relocs against symbols which turn out to be
3914     defined in regular objects.  For the normal shared case, discard
3915     space for pc-relative relocs that have become local due to symbol
3916     visibility changes.  */
3917
3918  if (bfd_link_pic (info))
3919    {
3920      /* Relocs that use pc_count are those that appear on a call
3921	 insn, or certain REL relocs that can generated via assembly.
3922	 We want calls to protected symbols to resolve directly to the
3923	 function rather than going via the plt.  If people want
3924	 function pointer comparisons to work as expected then they
3925	 should avoid writing weird assembly.  */
3926      if (SYMBOL_CALLS_LOCAL (info, h))
3927	{
3928	  struct elf_dyn_relocs **pp;
3929
3930	  for (pp = &h->dyn_relocs; (p = *pp) != NULL;)
3931	    {
3932	      p->count -= p->pc_count;
3933	      p->pc_count = 0;
3934	      if (p->count == 0)
3935		*pp = p->next;
3936	      else
3937		pp = &p->next;
3938	    }
3939	}
3940
3941      /* Also discard relocs on undefined weak syms with non-default
3942	 visibility.  */
3943      if (h->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
3944	{
3945	  if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3946	      || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
3947	    h->dyn_relocs = NULL;
3948
3949	  /* Make sure undefined weak symbols are output as a dynamic
3950	     symbol in PIEs.  */
3951	  else if (h->dynindx == -1
3952		   && !h->forced_local
3953		   && !bfd_elf_link_record_dynamic_symbol (info, h))
3954	    return false;
3955	}
3956
3957    }
3958  else if (ELIMINATE_COPY_RELOCS)
3959    {
3960      /* For the non-shared case, discard space for relocs against
3961	 symbols which turn out to need copy relocs or are not
3962	 dynamic.  */
3963
3964      if (!h->non_got_ref
3965	  && ((h->def_dynamic
3966	       && !h->def_regular)
3967	      || (htab->root.dynamic_sections_created
3968		  && (h->root.type == bfd_link_hash_undefweak
3969		      || h->root.type == bfd_link_hash_undefined))))
3970	{
3971	  /* Make sure this symbol is output as a dynamic symbol.
3972	     Undefined weak syms won't yet be marked as dynamic.  */
3973	  if (h->dynindx == -1
3974	      && !h->forced_local
3975	      && !bfd_elf_link_record_dynamic_symbol (info, h))
3976	    return false;
3977
3978	  /* If that succeeded, we know we'll be keeping all the
3979	     relocs.  */
3980	  if (h->dynindx != -1)
3981	    goto keep;
3982	}
3983
3984      h->dyn_relocs = NULL;
3985
3986    keep:;
3987    }
3988
3989  /* Finally, allocate space.  */
3990  for (p = h->dyn_relocs; p != NULL; p = p->next)
3991    {
3992      asection *sreloc;
3993
3994      sreloc = elf_section_data (p->sec)->sreloc;
3995
3996      BFD_ASSERT (sreloc != NULL);
3997
3998      sreloc->size += p->count * RELOC_SIZE (htab);
3999    }
4000
4001  return true;
4002}
4003
4004/* Find any dynamic relocs that apply to read-only sections.  */
4005
4006static bool
4007kvx_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
4008{
4009  struct elf_dyn_relocs * p;
4010
4011  for (p = h->dyn_relocs; p != NULL; p = p->next)
4012    {
4013      asection *s = p->sec;
4014
4015      if (s != NULL && (s->flags & SEC_READONLY) != 0)
4016	{
4017	  struct bfd_link_info *info = (struct bfd_link_info *) inf;
4018
4019	  info->flags |= DF_TEXTREL;
4020	  info->callbacks->minfo (_("%pB: dynamic relocation against `%pT' in "
4021				    "read-only section `%pA'\n"),
4022				  s->owner, h->root.root.string, s);
4023
4024	  /* Not an error, just cut short the traversal.  */
4025	  return false;
4026	}
4027    }
4028  return true;
4029}
4030
4031/* This is the most important function of all . Innocuosly named
4032   though !  */
4033static bool
4034elfNN_kvx_late_size_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
4035			      struct bfd_link_info *info)
4036{
4037  struct elf_kvx_link_hash_table *htab;
4038  bfd *dynobj;
4039  asection *s;
4040  bool relocs;
4041  bfd *ibfd;
4042
4043  htab = elf_kvx_hash_table ((info));
4044  dynobj = htab->root.dynobj;
4045  if (dynobj == NULL)
4046    return true;
4047
4048  if (htab->root.dynamic_sections_created)
4049    {
4050      if (bfd_link_executable (info) && !info->nointerp)
4051	{
4052	  s = bfd_get_linker_section (dynobj, ".interp");
4053	  if (s == NULL)
4054	    abort ();
4055	  s->size = sizeof ELF_DYNAMIC_INTERPRETER;
4056	  s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
4057	}
4058    }
4059
4060  /* Set up .got offsets for local syms, and space for local dynamic
4061     relocs.  */
4062  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4063    {
4064      struct elf_kvx_local_symbol *locals = NULL;
4065      Elf_Internal_Shdr *symtab_hdr;
4066      asection *srel;
4067      unsigned int i;
4068
4069      if (!is_kvx_elf (ibfd))
4070	continue;
4071
4072      for (s = ibfd->sections; s != NULL; s = s->next)
4073	{
4074	  struct elf_dyn_relocs *p;
4075
4076	  for (p = (struct elf_dyn_relocs *)
4077		 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
4078	    {
4079	      if (!bfd_is_abs_section (p->sec)
4080		  && bfd_is_abs_section (p->sec->output_section))
4081		{
4082		  /* Input section has been discarded, either because
4083		     it is a copy of a linkonce section or due to
4084		     linker script /DISCARD/, so we'll be discarding
4085		     the relocs too.  */
4086		}
4087	      else if (p->count != 0)
4088		{
4089		  srel = elf_section_data (p->sec)->sreloc;
4090		  srel->size += p->count * RELOC_SIZE (htab);
4091		  if ((p->sec->output_section->flags & SEC_READONLY) != 0)
4092		    info->flags |= DF_TEXTREL;
4093		}
4094	    }
4095	}
4096
4097      locals = elf_kvx_locals (ibfd);
4098      if (!locals)
4099	continue;
4100
4101      symtab_hdr = &elf_symtab_hdr (ibfd);
4102      srel = htab->root.srelgot;
4103      for (i = 0; i < symtab_hdr->sh_info; i++)
4104	{
4105	  locals[i].got_offset = (bfd_vma) - 1;
4106	  if (locals[i].got_refcount > 0)
4107	    {
4108	      unsigned got_type = locals[i].got_type;
4109	      if (got_type & (GOT_TLS_GD | GOT_TLS_LD))
4110		{
4111		  locals[i].got_offset = htab->root.sgot->size;
4112		  htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
4113		}
4114
4115	      if (got_type & (GOT_NORMAL | GOT_TLS_IE ))
4116		{
4117		  locals[i].got_offset = htab->root.sgot->size;
4118		  htab->root.sgot->size += GOT_ENTRY_SIZE;
4119		}
4120
4121	      if (got_type == GOT_UNKNOWN)
4122		{
4123		}
4124
4125	      if (bfd_link_pic (info))
4126		{
4127		  if (got_type & GOT_TLS_GD)
4128		    htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
4129
4130		  if (got_type & GOT_TLS_IE
4131		      || got_type & GOT_TLS_LD
4132		      || got_type & GOT_NORMAL)
4133		    htab->root.srelgot->size += RELOC_SIZE (htab);
4134		}
4135	    }
4136	  else
4137	    {
4138	      locals[i].got_refcount = (bfd_vma) - 1;
4139	    }
4140	}
4141    }
4142
4143
4144  /* Allocate global sym .plt and .got entries, and space for global
4145     sym dynamic relocs.  */
4146  elf_link_hash_traverse (&htab->root, elfNN_kvx_allocate_dynrelocs,
4147			  info);
4148
4149  /* For every jump slot reserved in the sgotplt, reloc_count is
4150     incremented.  However, when we reserve space for TLS descriptors,
4151     it's not incremented, so in order to compute the space reserved
4152     for them, it suffices to multiply the reloc count by the jump
4153     slot size.  */
4154
4155  if (htab->root.srelplt)
4156    htab->sgotplt_jump_table_size = kvx_compute_jump_table_size (htab);
4157
4158  /* We now have determined the sizes of the various dynamic sections.
4159     Allocate memory for them.  */
4160  relocs = false;
4161  for (s = dynobj->sections; s != NULL; s = s->next)
4162    {
4163      if ((s->flags & SEC_LINKER_CREATED) == 0)
4164	continue;
4165
4166      if (s == htab->root.splt
4167	  || s == htab->root.sgot
4168	  || s == htab->root.sgotplt
4169	  || s == htab->root.iplt
4170	  || s == htab->root.igotplt || s == htab->sdynbss)
4171	{
4172	  /* Strip this section if we don't need it; see the
4173	     comment below.  */
4174	}
4175      else if (startswith (bfd_section_name (s), ".rela"))
4176	{
4177	  if (s->size != 0 && s != htab->root.srelplt)
4178	    relocs = true;
4179
4180	  /* We use the reloc_count field as a counter if we need
4181	     to copy relocs into the output file.  */
4182	  if (s != htab->root.srelplt)
4183	    s->reloc_count = 0;
4184	}
4185      else
4186	{
4187	  /* It's not one of our sections, so don't allocate space.  */
4188	  continue;
4189	}
4190
4191      if (s->size == 0)
4192	{
4193	  /* If we don't need this section, strip it from the
4194	     output file.  This is mostly to handle .rela.bss and
4195	     .rela.plt.  We must create both sections in
4196	     create_dynamic_sections, because they must be created
4197	     before the linker maps input sections to output
4198	     sections.  The linker does that before
4199	     adjust_dynamic_symbol is called, and it is that
4200	     function which decides whether anything needs to go
4201	     into these sections.  */
4202
4203	  s->flags |= SEC_EXCLUDE;
4204	  continue;
4205	}
4206
4207      if ((s->flags & SEC_HAS_CONTENTS) == 0)
4208	continue;
4209
4210      /* Allocate memory for the section contents.  We use bfd_zalloc
4211	 here in case unused entries are not reclaimed before the
4212	 section's contents are written out.  This should not happen,
4213	 but this way if it does, we get a R_KVX_NONE reloc instead
4214	 of garbage.  */
4215      s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
4216      if (s->contents == NULL)
4217	return false;
4218    }
4219
4220  if (htab->root.dynamic_sections_created)
4221    {
4222      /* Add some entries to the .dynamic section.  We fill in the
4223	 values later, in elfNN_kvx_finish_dynamic_sections, but we
4224	 must add the entries now so that we get the correct size for
4225	 the .dynamic section.  The DT_DEBUG entry is filled in by the
4226	 dynamic linker and used by the debugger.  */
4227#define add_dynamic_entry(TAG, VAL)			\
4228      _bfd_elf_add_dynamic_entry (info, TAG, VAL)
4229
4230      if (bfd_link_executable (info))
4231	{
4232	  if (!add_dynamic_entry (DT_DEBUG, 0))
4233	    return false;
4234	}
4235
4236      if (htab->root.splt->size != 0)
4237	{
4238	  if (!add_dynamic_entry (DT_PLTGOT, 0)
4239	      || !add_dynamic_entry (DT_PLTRELSZ, 0)
4240	      || !add_dynamic_entry (DT_PLTREL, DT_RELA)
4241	      || !add_dynamic_entry (DT_JMPREL, 0))
4242	    return false;
4243	}
4244
4245      if (relocs)
4246	{
4247	  if (!add_dynamic_entry (DT_RELA, 0)
4248	      || !add_dynamic_entry (DT_RELASZ, 0)
4249	      || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
4250	    return false;
4251
4252	  /* If any dynamic relocs apply to a read-only section,
4253	     then we need a DT_TEXTREL entry.  */
4254	  if ((info->flags & DF_TEXTREL) == 0)
4255	    elf_link_hash_traverse (&htab->root, kvx_readonly_dynrelocs,
4256				    info);
4257
4258	  if ((info->flags & DF_TEXTREL) != 0)
4259	    {
4260	      if (!add_dynamic_entry (DT_TEXTREL, 0))
4261		return false;
4262	    }
4263	}
4264    }
4265#undef add_dynamic_entry
4266
4267  return true;
4268}
4269
4270static inline void
4271elf_kvx_update_plt_entry (bfd *output_bfd,
4272			  bfd_reloc_code_real_type r_type,
4273			  bfd_byte *plt_entry, bfd_vma value)
4274{
4275  reloc_howto_type *howto = elfNN_kvx_howto_from_bfd_reloc (r_type);
4276  BFD_ASSERT(howto != NULL);
4277  _bfd_kvx_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
4278}
4279
4280static void
4281elfNN_kvx_create_small_pltn_entry (struct elf_link_hash_entry *h,
4282				   struct elf_kvx_link_hash_table *htab,
4283				   bfd *output_bfd)
4284{
4285  bfd_byte *plt_entry;
4286  bfd_vma plt_index;
4287  bfd_vma got_offset;
4288  bfd_vma gotplt_entry_address;
4289  bfd_vma plt_entry_address;
4290  Elf_Internal_Rela rela;
4291  bfd_byte *loc;
4292  asection *plt, *gotplt, *relplt;
4293
4294  plt = htab->root.splt;
4295  gotplt = htab->root.sgotplt;
4296  relplt = htab->root.srelplt;
4297
4298  /* Get the index in the procedure linkage table which
4299     corresponds to this symbol.  This is the index of this symbol
4300     in all the symbols for which we are making plt entries.  The
4301     first entry in the procedure linkage table is reserved.
4302
4303     Get the offset into the .got table of the entry that
4304     corresponds to this function.	Each .got entry is GOT_ENTRY_SIZE
4305     bytes. The first three are reserved for the dynamic linker.
4306
4307     For static executables, we don't reserve anything.  */
4308
4309  if (plt == htab->root.splt)
4310    {
4311      plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
4312      got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
4313    }
4314  else
4315    {
4316      plt_index = h->plt.offset / htab->plt_entry_size;
4317      got_offset = plt_index * GOT_ENTRY_SIZE;
4318    }
4319
4320  plt_entry = plt->contents + h->plt.offset;
4321  plt_entry_address = plt->output_section->vma
4322    + plt->output_offset + h->plt.offset;
4323  gotplt_entry_address = gotplt->output_section->vma +
4324    gotplt->output_offset + got_offset;
4325
4326  /* Copy in the boiler-plate for the PLTn entry.  */
4327  memcpy (plt_entry, elfNN_kvx_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
4328
4329  /* Patch the loading of the GOT entry, relative to the PLT entry
4330     address. */
4331
4332  /* Use 37bits offset for both 32 and 64bits mode.
4333     Fill the LO10 of of lw $r9 = 0[$r14].  */
4334  elf_kvx_update_plt_entry(output_bfd, BFD_RELOC_KVX_S37_LO10,
4335			   plt_entry+4,
4336			   gotplt_entry_address - plt_entry_address);
4337
4338  /* Fill the UP27 of of lw $r9 = 0[$r14].  */
4339  elf_kvx_update_plt_entry(output_bfd, BFD_RELOC_KVX_S37_UP27,
4340			   plt_entry+8,
4341			   gotplt_entry_address - plt_entry_address);
4342
4343  rela.r_offset = gotplt_entry_address;
4344
4345  /* Fill in the entry in the .rela.plt section.  */
4346  rela.r_info = ELFNN_R_INFO (h->dynindx, R_KVX_JMP_SLOT);
4347  rela.r_addend = 0;
4348
4349  /* Compute the relocation entry to used based on PLT index and do
4350     not adjust reloc_count. The reloc_count has already been adjusted
4351     to account for this entry.  */
4352  loc = relplt->contents + plt_index * RELOC_SIZE (htab);
4353  bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4354}
4355
4356/* Size sections even though they're not dynamic.  We use it to setup
4357   _TLS_MODULE_BASE_, if needed.  */
4358
4359static bool
4360elfNN_kvx_early_size_sections (bfd *output_bfd, struct bfd_link_info *info)
4361{
4362  asection *tls_sec;
4363
4364  if (bfd_link_relocatable (info))
4365    return true;
4366
4367  tls_sec = elf_hash_table (info)->tls_sec;
4368
4369  if (tls_sec)
4370    {
4371      struct elf_link_hash_entry *tlsbase;
4372
4373      tlsbase = elf_link_hash_lookup (elf_hash_table (info),
4374				      "_TLS_MODULE_BASE_", true, true, false);
4375
4376      if (tlsbase)
4377	{
4378	  struct bfd_link_hash_entry *h = NULL;
4379	  const struct elf_backend_data *bed =
4380	    get_elf_backend_data (output_bfd);
4381
4382	  if (!(_bfd_generic_link_add_one_symbol
4383		(info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
4384		 tls_sec, 0, NULL, false, bed->collect, &h)))
4385	    return false;
4386
4387	  tlsbase->type = STT_TLS;
4388	  tlsbase = (struct elf_link_hash_entry *) h;
4389	  tlsbase->def_regular = 1;
4390	  tlsbase->other = STV_HIDDEN;
4391	  (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
4392	}
4393    }
4394
4395  return true;
4396}
4397
4398/* Finish up dynamic symbol handling.  We set the contents of various
4399   dynamic sections here.  */
4400static bool
4401elfNN_kvx_finish_dynamic_symbol (bfd *output_bfd,
4402				 struct bfd_link_info *info,
4403				 struct elf_link_hash_entry *h,
4404				 Elf_Internal_Sym *sym)
4405{
4406  struct elf_kvx_link_hash_table *htab;
4407  htab = elf_kvx_hash_table (info);
4408
4409  if (h->plt.offset != (bfd_vma) - 1)
4410    {
4411      asection *plt = NULL, *gotplt = NULL, *relplt = NULL;
4412
4413      /* This symbol has an entry in the procedure linkage table.  Set
4414	 it up.  */
4415
4416      if (htab->root.splt != NULL)
4417	{
4418	  plt = htab->root.splt;
4419	  gotplt = htab->root.sgotplt;
4420	  relplt = htab->root.srelplt;
4421	}
4422
4423      /* This symbol has an entry in the procedure linkage table.  Set
4424	 it up.	 */
4425      if ((h->dynindx == -1
4426	   && !((h->forced_local || bfd_link_executable (info))
4427		&& h->def_regular
4428		&& h->type == STT_GNU_IFUNC))
4429	  || plt == NULL
4430	  || gotplt == NULL
4431	  || relplt == NULL)
4432	abort ();
4433
4434      elfNN_kvx_create_small_pltn_entry (h, htab, output_bfd);
4435      if (!h->def_regular)
4436	{
4437	  /* Mark the symbol as undefined, rather than as defined in
4438	     the .plt section.  */
4439	  sym->st_shndx = SHN_UNDEF;
4440	  /* If the symbol is weak we need to clear the value.
4441	     Otherwise, the PLT entry would provide a definition for
4442	     the symbol even if the symbol wasn't defined anywhere,
4443	     and so the symbol would never be NULL.  Leave the value if
4444	     there were any relocations where pointer equality matters
4445	     (this is a clue for the dynamic linker, to make function
4446	     pointer comparisons work between an application and shared
4447	     library).  */
4448	  if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
4449	    sym->st_value = 0;
4450	}
4451    }
4452
4453  if (h->got.offset != (bfd_vma) - 1
4454      && elf_kvx_hash_entry (h)->got_type == GOT_NORMAL)
4455    {
4456      Elf_Internal_Rela rela;
4457      bfd_byte *loc;
4458
4459      /* This symbol has an entry in the global offset table.  Set it
4460	 up.  */
4461      if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
4462	abort ();
4463
4464      rela.r_offset = (htab->root.sgot->output_section->vma
4465		       + htab->root.sgot->output_offset
4466		       + (h->got.offset & ~(bfd_vma) 1));
4467
4468#ifdef UGLY_DEBUG
4469      printf("setting rela at offset 0x%x(0x%x + 0x%x + 0x%x) for %s\n",
4470	     rela.r_offset,
4471	     htab->root.sgot->output_section->vma,
4472	     htab->root.sgot->output_offset,
4473	     h->got.offset,
4474	     h->root.root.string);
4475#endif
4476
4477      if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h))
4478	{
4479	  if (!h->def_regular)
4480	    return false;
4481
4482	  /* in case of PLT related GOT entry, it is not clear who is
4483	     supposed to set the LSB of GOT entry...
4484	     kvx_calculate_got_entry_vma() would be a good candidate,
4485	     but it is not called currently
4486	     So we are commenting it ATM.  */
4487	  // BFD_ASSERT ((h->got.offset & 1) != 0);
4488	  rela.r_info = ELFNN_R_INFO (0, R_KVX_RELATIVE);
4489	  rela.r_addend = (h->root.u.def.value
4490			   + h->root.u.def.section->output_section->vma
4491			   + h->root.u.def.section->output_offset);
4492	}
4493      else
4494	{
4495	  BFD_ASSERT ((h->got.offset & 1) == 0);
4496	  bfd_put_NN (output_bfd, (bfd_vma) 0,
4497		      htab->root.sgot->contents + h->got.offset);
4498	  rela.r_info = ELFNN_R_INFO (h->dynindx, R_KVX_GLOB_DAT);
4499	  rela.r_addend = 0;
4500	}
4501
4502      loc = htab->root.srelgot->contents;
4503      loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
4504      bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4505    }
4506
4507  if (h->needs_copy)
4508    {
4509      Elf_Internal_Rela rela;
4510      bfd_byte *loc;
4511
4512      /* This symbol needs a copy reloc.  Set it up.  */
4513
4514      if (h->dynindx == -1
4515	  || (h->root.type != bfd_link_hash_defined
4516	      && h->root.type != bfd_link_hash_defweak)
4517	  || htab->srelbss == NULL)
4518	abort ();
4519
4520      rela.r_offset = (h->root.u.def.value
4521		       + h->root.u.def.section->output_section->vma
4522		       + h->root.u.def.section->output_offset);
4523      rela.r_info = ELFNN_R_INFO (h->dynindx, R_KVX_COPY);
4524      rela.r_addend = 0;
4525      loc = htab->srelbss->contents;
4526      loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
4527      bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4528    }
4529
4530  /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute.  SYM may
4531     be NULL for local symbols.  */
4532  if (sym != NULL
4533      && (h == elf_hash_table (info)->hdynamic
4534	  || h == elf_hash_table (info)->hgot))
4535    sym->st_shndx = SHN_ABS;
4536
4537  return true;
4538}
4539
4540static void
4541elfNN_kvx_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
4542				 struct elf_kvx_link_hash_table *htab)
4543{
4544  memcpy (htab->root.splt->contents, elfNN_kvx_small_plt0_entry,
4545	  PLT_ENTRY_SIZE);
4546  elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
4547    PLT_ENTRY_SIZE;
4548}
4549
4550static bool
4551elfNN_kvx_finish_dynamic_sections (bfd *output_bfd,
4552				   struct bfd_link_info *info)
4553{
4554  struct elf_kvx_link_hash_table *htab;
4555  bfd *dynobj;
4556  asection *sdyn;
4557
4558  htab = elf_kvx_hash_table (info);
4559  dynobj = htab->root.dynobj;
4560  sdyn = bfd_get_linker_section (dynobj, ".dynamic");
4561
4562  if (htab->root.dynamic_sections_created)
4563    {
4564      ElfNN_External_Dyn *dyncon, *dynconend;
4565
4566      if (sdyn == NULL || htab->root.sgot == NULL)
4567	abort ();
4568
4569      dyncon = (ElfNN_External_Dyn *) sdyn->contents;
4570      dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
4571      for (; dyncon < dynconend; dyncon++)
4572	{
4573	  Elf_Internal_Dyn dyn;
4574	  asection *s;
4575
4576	  bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
4577
4578	  switch (dyn.d_tag)
4579	    {
4580	    default:
4581	      continue;
4582
4583	    case DT_PLTGOT:
4584	      s = htab->root.sgotplt;
4585	      dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
4586	      break;
4587
4588	    case DT_JMPREL:
4589	      s = htab->root.srelplt;
4590	      dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
4591	      break;
4592
4593	    case DT_PLTRELSZ:
4594	      s = htab->root.srelplt;
4595	      dyn.d_un.d_val = s->size;
4596	      break;
4597
4598	    case DT_RELASZ:
4599	      /* The procedure linkage table relocs (DT_JMPREL) should
4600		 not be included in the overall relocs (DT_RELA).
4601		 Therefore, we override the DT_RELASZ entry here to
4602		 make it not include the JMPREL relocs.  Since the
4603		 linker script arranges for .rela.plt to follow all
4604		 other relocation sections, we don't have to worry
4605		 about changing the DT_RELA entry.  */
4606	      if (htab->root.srelplt != NULL)
4607		{
4608		  s = htab->root.srelplt;
4609		  dyn.d_un.d_val -= s->size;
4610		}
4611	      break;
4612	    }
4613
4614	  bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
4615	}
4616
4617    }
4618
4619  /* Fill in the special first entry in the procedure linkage table.  */
4620  if (htab->root.splt && htab->root.splt->size > 0)
4621    {
4622      elfNN_kvx_init_small_plt0_entry (output_bfd, htab);
4623
4624      elf_section_data (htab->root.splt->output_section)->
4625	this_hdr.sh_entsize = htab->plt_entry_size;
4626    }
4627
4628  if (htab->root.sgotplt)
4629    {
4630      if (bfd_is_abs_section (htab->root.sgotplt->output_section))
4631	{
4632	  (*_bfd_error_handler)
4633	    (_("discarded output section: `%pA'"), htab->root.sgotplt);
4634	  return false;
4635	}
4636
4637      /* Fill in the first three entries in the global offset table.  */
4638      if (htab->root.sgotplt->size > 0)
4639	{
4640	  bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
4641
4642	  /* Write GOT[1] and GOT[2], needed for the dynamic linker.  */
4643	  bfd_put_NN (output_bfd,
4644		      (bfd_vma) 0,
4645		      htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
4646	  bfd_put_NN (output_bfd,
4647		      (bfd_vma) 0,
4648		      htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
4649	}
4650
4651      if (htab->root.sgot)
4652	{
4653	  if (htab->root.sgot->size > 0)
4654	    {
4655	      bfd_vma addr =
4656		sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
4657	      bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
4658	    }
4659	}
4660
4661      elf_section_data (htab->root.sgotplt->output_section)->
4662	this_hdr.sh_entsize = GOT_ENTRY_SIZE;
4663    }
4664
4665  if (htab->root.sgot && htab->root.sgot->size > 0)
4666    elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
4667      = GOT_ENTRY_SIZE;
4668
4669  return true;
4670}
4671
4672/* Return address for Ith PLT stub in section PLT, for relocation REL
4673   or (bfd_vma) -1 if it should not be included.  */
4674
4675static bfd_vma
4676elfNN_kvx_plt_sym_val (bfd_vma i, const asection *plt,
4677		       const arelent *rel ATTRIBUTE_UNUSED)
4678{
4679  return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
4680}
4681
4682#define ELF_ARCH			bfd_arch_kvx
4683#define ELF_MACHINE_CODE		EM_KVX
4684#define ELF_MAXPAGESIZE			0x10000
4685#define ELF_MINPAGESIZE			0x1000
4686#define ELF_COMMONPAGESIZE		0x1000
4687
4688#define bfd_elfNN_bfd_link_hash_table_create    \
4689  elfNN_kvx_link_hash_table_create
4690
4691#define bfd_elfNN_bfd_merge_private_bfd_data	\
4692  elfNN_kvx_merge_private_bfd_data
4693
4694#define bfd_elfNN_bfd_print_private_bfd_data	\
4695  elfNN_kvx_print_private_bfd_data
4696
4697#define bfd_elfNN_bfd_reloc_type_lookup		\
4698  elfNN_kvx_reloc_type_lookup
4699
4700#define bfd_elfNN_bfd_reloc_name_lookup		\
4701  elfNN_kvx_reloc_name_lookup
4702
4703#define bfd_elfNN_bfd_set_private_flags		\
4704  elfNN_kvx_set_private_flags
4705
4706#define bfd_elfNN_mkobject			\
4707  elfNN_kvx_mkobject
4708
4709#define bfd_elfNN_new_section_hook		\
4710  elfNN_kvx_new_section_hook
4711
4712#define elf_backend_adjust_dynamic_symbol	\
4713  elfNN_kvx_adjust_dynamic_symbol
4714
4715#define elf_backend_early_size_sections		\
4716  elfNN_kvx_early_size_sections
4717
4718#define elf_backend_check_relocs		\
4719  elfNN_kvx_check_relocs
4720
4721#define elf_backend_copy_indirect_symbol	\
4722  elfNN_kvx_copy_indirect_symbol
4723
4724/* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
4725   to them in our hash.  */
4726#define elf_backend_create_dynamic_sections	\
4727  elfNN_kvx_create_dynamic_sections
4728
4729#define elf_backend_init_index_section		\
4730  _bfd_elf_init_2_index_sections
4731
4732#define elf_backend_finish_dynamic_sections	\
4733  elfNN_kvx_finish_dynamic_sections
4734
4735#define elf_backend_finish_dynamic_symbol	\
4736  elfNN_kvx_finish_dynamic_symbol
4737
4738#define elf_backend_object_p			\
4739  elfNN_kvx_object_p
4740
4741#define elf_backend_output_arch_local_syms      \
4742  elfNN_kvx_output_arch_local_syms
4743
4744#define elf_backend_plt_sym_val			\
4745  elfNN_kvx_plt_sym_val
4746
4747#define elf_backend_init_file_header		\
4748  elfNN_kvx_init_file_header
4749
4750#define elf_backend_init_process_headers	\
4751  elfNN_kvx_init_process_headers
4752
4753#define elf_backend_relocate_section		\
4754  elfNN_kvx_relocate_section
4755
4756#define elf_backend_reloc_type_class		\
4757  elfNN_kvx_reloc_type_class
4758
4759#define elf_backend_late_size_sections	\
4760  elfNN_kvx_late_size_sections
4761
4762#define elf_backend_can_refcount       1
4763#define elf_backend_can_gc_sections    1
4764#define elf_backend_plt_readonly       1
4765#define elf_backend_want_got_plt       1
4766#define elf_backend_want_plt_sym       0
4767#define elf_backend_may_use_rel_p      0
4768#define elf_backend_may_use_rela_p     1
4769#define elf_backend_default_use_rela_p 1
4770#define elf_backend_rela_normal        1
4771#define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
4772#define elf_backend_default_execstack  0
4773#define elf_backend_extern_protected_data 1
4774#define elf_backend_hash_symbol elf_kvx_hash_symbol
4775
4776#include "elfNN-target.h"
4777