1/* 32-bit ELF support for ARM
2   Copyright (C) 1998-2022 Free Software Foundation, Inc.
3
4   This file is part of BFD, the Binary File Descriptor library.
5
6   This program is free software; you can redistribute it and/or modify
7   it under the terms of the GNU General Public License as published by
8   the Free Software Foundation; either version 3 of the License, or
9   (at your option) any later version.
10
11   This program is distributed in the hope that it will be useful,
12   but WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14   GNU General Public License for more details.
15
16   You should have received a copy of the GNU General Public License
17   along with this program; if not, write to the Free Software
18   Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19   MA 02110-1301, USA.  */
20
21#include "sysdep.h"
22#include <limits.h>
23
24#include "bfd.h"
25#include "libiberty.h"
26#include "libbfd.h"
27#include "elf-bfd.h"
28#include "elf-nacl.h"
29#include "elf-vxworks.h"
30#include "elf/arm.h"
31#include "elf32-arm.h"
32#include "cpu-arm.h"
33
34/* Return the relocation section associated with NAME.  HTAB is the
35   bfd's elf32_arm_link_hash_entry.  */
36#define RELOC_SECTION(HTAB, NAME) \
37  ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38
39/* Return size of a relocation entry.  HTAB is the bfd's
40   elf32_arm_link_hash_entry.  */
41#define RELOC_SIZE(HTAB) \
42  ((HTAB)->use_rel \
43   ? sizeof (Elf32_External_Rel) \
44   : sizeof (Elf32_External_Rela))
45
46/* Return function to swap relocations in.  HTAB is the bfd's
47   elf32_arm_link_hash_entry.  */
48#define SWAP_RELOC_IN(HTAB) \
49  ((HTAB)->use_rel \
50   ? bfd_elf32_swap_reloc_in \
51   : bfd_elf32_swap_reloca_in)
52
53/* Return function to swap relocations out.  HTAB is the bfd's
54   elf32_arm_link_hash_entry.  */
55#define SWAP_RELOC_OUT(HTAB) \
56  ((HTAB)->use_rel \
57   ? bfd_elf32_swap_reloc_out \
58   : bfd_elf32_swap_reloca_out)
59
60#define elf_info_to_howto		NULL
61#define elf_info_to_howto_rel		elf32_arm_info_to_howto
62
63#define ARM_ELF_ABI_VERSION		0
64#define ARM_ELF_OS_ABI_VERSION		ELFOSABI_ARM
65
66/* The Adjusted Place, as defined by AAELF.  */
67#define Pa(X) ((X) & 0xfffffffc)
68
69static bool elf32_arm_write_section (bfd *output_bfd,
70				     struct bfd_link_info *link_info,
71				     asection *sec,
72				     bfd_byte *contents);
73
74/* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75   R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
76   in that slot.  */
77
78static reloc_howto_type elf32_arm_howto_table_1[] =
79{
80  /* No relocation.  */
81  HOWTO (R_ARM_NONE,		/* type */
82	 0,			/* rightshift */
83	 0,			/* size */
84	 0,			/* bitsize */
85	 false,			/* pc_relative */
86	 0,			/* bitpos */
87	 complain_overflow_dont,/* complain_on_overflow */
88	 bfd_elf_generic_reloc,	/* special_function */
89	 "R_ARM_NONE",		/* name */
90	 false,			/* partial_inplace */
91	 0,			/* src_mask */
92	 0,			/* dst_mask */
93	 false),		/* pcrel_offset */
94
95  HOWTO (R_ARM_PC24,		/* type */
96	 2,			/* rightshift */
97	 4,			/* size */
98	 24,			/* bitsize */
99	 true,			/* pc_relative */
100	 0,			/* bitpos */
101	 complain_overflow_signed,/* complain_on_overflow */
102	 bfd_elf_generic_reloc,	/* special_function */
103	 "R_ARM_PC24",		/* name */
104	 false,			/* partial_inplace */
105	 0x00ffffff,		/* src_mask */
106	 0x00ffffff,		/* dst_mask */
107	 true),			/* pcrel_offset */
108
109  /* 32 bit absolute */
110  HOWTO (R_ARM_ABS32,		/* type */
111	 0,			/* rightshift */
112	 4,			/* size */
113	 32,			/* bitsize */
114	 false,			/* pc_relative */
115	 0,			/* bitpos */
116	 complain_overflow_bitfield,/* complain_on_overflow */
117	 bfd_elf_generic_reloc,	/* special_function */
118	 "R_ARM_ABS32",		/* name */
119	 false,			/* partial_inplace */
120	 0xffffffff,		/* src_mask */
121	 0xffffffff,		/* dst_mask */
122	 false),		/* pcrel_offset */
123
124  /* standard 32bit pc-relative reloc */
125  HOWTO (R_ARM_REL32,		/* type */
126	 0,			/* rightshift */
127	 4,			/* size */
128	 32,			/* bitsize */
129	 true,			/* pc_relative */
130	 0,			/* bitpos */
131	 complain_overflow_bitfield,/* complain_on_overflow */
132	 bfd_elf_generic_reloc,	/* special_function */
133	 "R_ARM_REL32",		/* name */
134	 false,			/* partial_inplace */
135	 0xffffffff,		/* src_mask */
136	 0xffffffff,		/* dst_mask */
137	 true),			/* pcrel_offset */
138
139  /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140  HOWTO (R_ARM_LDR_PC_G0,	/* type */
141	 0,			/* rightshift */
142	 1,			/* size */
143	 32,			/* bitsize */
144	 true,			/* pc_relative */
145	 0,			/* bitpos */
146	 complain_overflow_dont,/* complain_on_overflow */
147	 bfd_elf_generic_reloc,	/* special_function */
148	 "R_ARM_LDR_PC_G0",     /* name */
149	 false,			/* partial_inplace */
150	 0xffffffff,		/* src_mask */
151	 0xffffffff,		/* dst_mask */
152	 true),			/* pcrel_offset */
153
154   /* 16 bit absolute */
155  HOWTO (R_ARM_ABS16,		/* type */
156	 0,			/* rightshift */
157	 2,			/* size */
158	 16,			/* bitsize */
159	 false,			/* pc_relative */
160	 0,			/* bitpos */
161	 complain_overflow_bitfield,/* complain_on_overflow */
162	 bfd_elf_generic_reloc,	/* special_function */
163	 "R_ARM_ABS16",		/* name */
164	 false,			/* partial_inplace */
165	 0x0000ffff,		/* src_mask */
166	 0x0000ffff,		/* dst_mask */
167	 false),		/* pcrel_offset */
168
169  /* 12 bit absolute */
170  HOWTO (R_ARM_ABS12,		/* type */
171	 0,			/* rightshift */
172	 4,			/* size */
173	 12,			/* bitsize */
174	 false,			/* pc_relative */
175	 0,			/* bitpos */
176	 complain_overflow_bitfield,/* complain_on_overflow */
177	 bfd_elf_generic_reloc,	/* special_function */
178	 "R_ARM_ABS12",		/* name */
179	 false,			/* partial_inplace */
180	 0x00000fff,		/* src_mask */
181	 0x00000fff,		/* dst_mask */
182	 false),		/* pcrel_offset */
183
184  HOWTO (R_ARM_THM_ABS5,	/* type */
185	 6,			/* rightshift */
186	 2,			/* size */
187	 5,			/* bitsize */
188	 false,			/* pc_relative */
189	 0,			/* bitpos */
190	 complain_overflow_bitfield,/* complain_on_overflow */
191	 bfd_elf_generic_reloc,	/* special_function */
192	 "R_ARM_THM_ABS5",	/* name */
193	 false,			/* partial_inplace */
194	 0x000007e0,		/* src_mask */
195	 0x000007e0,		/* dst_mask */
196	 false),		/* pcrel_offset */
197
198  /* 8 bit absolute */
199  HOWTO (R_ARM_ABS8,		/* type */
200	 0,			/* rightshift */
201	 1,			/* size */
202	 8,			/* bitsize */
203	 false,			/* pc_relative */
204	 0,			/* bitpos */
205	 complain_overflow_bitfield,/* complain_on_overflow */
206	 bfd_elf_generic_reloc,	/* special_function */
207	 "R_ARM_ABS8",		/* name */
208	 false,			/* partial_inplace */
209	 0x000000ff,		/* src_mask */
210	 0x000000ff,		/* dst_mask */
211	 false),		/* pcrel_offset */
212
213  HOWTO (R_ARM_SBREL32,		/* type */
214	 0,			/* rightshift */
215	 4,			/* size */
216	 32,			/* bitsize */
217	 false,			/* pc_relative */
218	 0,			/* bitpos */
219	 complain_overflow_dont,/* complain_on_overflow */
220	 bfd_elf_generic_reloc,	/* special_function */
221	 "R_ARM_SBREL32",	/* name */
222	 false,			/* partial_inplace */
223	 0xffffffff,		/* src_mask */
224	 0xffffffff,		/* dst_mask */
225	 false),		/* pcrel_offset */
226
227  HOWTO (R_ARM_THM_CALL,	/* type */
228	 1,			/* rightshift */
229	 4,			/* size */
230	 24,			/* bitsize */
231	 true,			/* pc_relative */
232	 0,			/* bitpos */
233	 complain_overflow_signed,/* complain_on_overflow */
234	 bfd_elf_generic_reloc,	/* special_function */
235	 "R_ARM_THM_CALL",	/* name */
236	 false,			/* partial_inplace */
237	 0x07ff2fff,		/* src_mask */
238	 0x07ff2fff,		/* dst_mask */
239	 true),			/* pcrel_offset */
240
241  HOWTO (R_ARM_THM_PC8,		/* type */
242	 1,			/* rightshift */
243	 2,			/* size */
244	 8,			/* bitsize */
245	 true,			/* pc_relative */
246	 0,			/* bitpos */
247	 complain_overflow_signed,/* complain_on_overflow */
248	 bfd_elf_generic_reloc,	/* special_function */
249	 "R_ARM_THM_PC8",	/* name */
250	 false,			/* partial_inplace */
251	 0x000000ff,		/* src_mask */
252	 0x000000ff,		/* dst_mask */
253	 true),			/* pcrel_offset */
254
255  HOWTO (R_ARM_BREL_ADJ,	/* type */
256	 1,			/* rightshift */
257	 2,			/* size */
258	 32,			/* bitsize */
259	 false,			/* pc_relative */
260	 0,			/* bitpos */
261	 complain_overflow_signed,/* complain_on_overflow */
262	 bfd_elf_generic_reloc,	/* special_function */
263	 "R_ARM_BREL_ADJ",	/* name */
264	 false,			/* partial_inplace */
265	 0xffffffff,		/* src_mask */
266	 0xffffffff,		/* dst_mask */
267	 false),		/* pcrel_offset */
268
269  HOWTO (R_ARM_TLS_DESC,	/* type */
270	 0,			/* rightshift */
271	 4,			/* size */
272	 32,			/* bitsize */
273	 false,			/* pc_relative */
274	 0,			/* bitpos */
275	 complain_overflow_bitfield,/* complain_on_overflow */
276	 bfd_elf_generic_reloc,	/* special_function */
277	 "R_ARM_TLS_DESC",	/* name */
278	 false,			/* partial_inplace */
279	 0xffffffff,		/* src_mask */
280	 0xffffffff,		/* dst_mask */
281	 false),		/* pcrel_offset */
282
283  HOWTO (R_ARM_THM_SWI8,	/* type */
284	 0,			/* rightshift */
285	 0,			/* size */
286	 0,			/* bitsize */
287	 false,			/* pc_relative */
288	 0,			/* bitpos */
289	 complain_overflow_signed,/* complain_on_overflow */
290	 bfd_elf_generic_reloc,	/* special_function */
291	 "R_ARM_SWI8",		/* name */
292	 false,			/* partial_inplace */
293	 0x00000000,		/* src_mask */
294	 0x00000000,		/* dst_mask */
295	 false),		/* pcrel_offset */
296
297  /* BLX instruction for the ARM.  */
298  HOWTO (R_ARM_XPC25,		/* type */
299	 2,			/* rightshift */
300	 4,			/* size */
301	 24,			/* bitsize */
302	 true,			/* pc_relative */
303	 0,			/* bitpos */
304	 complain_overflow_signed,/* complain_on_overflow */
305	 bfd_elf_generic_reloc,	/* special_function */
306	 "R_ARM_XPC25",		/* name */
307	 false,			/* partial_inplace */
308	 0x00ffffff,		/* src_mask */
309	 0x00ffffff,		/* dst_mask */
310	 true),			/* pcrel_offset */
311
312  /* BLX instruction for the Thumb.  */
313  HOWTO (R_ARM_THM_XPC22,	/* type */
314	 2,			/* rightshift */
315	 4,			/* size */
316	 24,			/* bitsize */
317	 true,			/* pc_relative */
318	 0,			/* bitpos */
319	 complain_overflow_signed,/* complain_on_overflow */
320	 bfd_elf_generic_reloc,	/* special_function */
321	 "R_ARM_THM_XPC22",	/* name */
322	 false,			/* partial_inplace */
323	 0x07ff2fff,		/* src_mask */
324	 0x07ff2fff,		/* dst_mask */
325	 true),			/* pcrel_offset */
326
327  /* Dynamic TLS relocations.  */
328
329  HOWTO (R_ARM_TLS_DTPMOD32,	/* type */
330	 0,			/* rightshift */
331	 4,			/* size */
332	 32,			/* bitsize */
333	 false,			/* pc_relative */
334	 0,			/* bitpos */
335	 complain_overflow_bitfield,/* complain_on_overflow */
336	 bfd_elf_generic_reloc, /* special_function */
337	 "R_ARM_TLS_DTPMOD32",	/* name */
338	 true,			/* partial_inplace */
339	 0xffffffff,		/* src_mask */
340	 0xffffffff,		/* dst_mask */
341	 false),		/* pcrel_offset */
342
343  HOWTO (R_ARM_TLS_DTPOFF32,	/* type */
344	 0,			/* rightshift */
345	 4,			/* size */
346	 32,			/* bitsize */
347	 false,			/* pc_relative */
348	 0,			/* bitpos */
349	 complain_overflow_bitfield,/* complain_on_overflow */
350	 bfd_elf_generic_reloc, /* special_function */
351	 "R_ARM_TLS_DTPOFF32",	/* name */
352	 true,			/* partial_inplace */
353	 0xffffffff,		/* src_mask */
354	 0xffffffff,		/* dst_mask */
355	 false),		/* pcrel_offset */
356
357  HOWTO (R_ARM_TLS_TPOFF32,	/* type */
358	 0,			/* rightshift */
359	 4,			/* size */
360	 32,			/* bitsize */
361	 false,			/* pc_relative */
362	 0,			/* bitpos */
363	 complain_overflow_bitfield,/* complain_on_overflow */
364	 bfd_elf_generic_reloc, /* special_function */
365	 "R_ARM_TLS_TPOFF32",	/* name */
366	 true,			/* partial_inplace */
367	 0xffffffff,		/* src_mask */
368	 0xffffffff,		/* dst_mask */
369	 false),		/* pcrel_offset */
370
371  /* Relocs used in ARM Linux */
372
373  HOWTO (R_ARM_COPY,		/* type */
374	 0,			/* rightshift */
375	 4,			/* size */
376	 32,			/* bitsize */
377	 false,			/* pc_relative */
378	 0,			/* bitpos */
379	 complain_overflow_bitfield,/* complain_on_overflow */
380	 bfd_elf_generic_reloc, /* special_function */
381	 "R_ARM_COPY",		/* name */
382	 true,			/* partial_inplace */
383	 0xffffffff,		/* src_mask */
384	 0xffffffff,		/* dst_mask */
385	 false),		/* pcrel_offset */
386
387  HOWTO (R_ARM_GLOB_DAT,	/* type */
388	 0,			/* rightshift */
389	 4,			/* size */
390	 32,			/* bitsize */
391	 false,			/* pc_relative */
392	 0,			/* bitpos */
393	 complain_overflow_bitfield,/* complain_on_overflow */
394	 bfd_elf_generic_reloc, /* special_function */
395	 "R_ARM_GLOB_DAT",	/* name */
396	 true,			/* partial_inplace */
397	 0xffffffff,		/* src_mask */
398	 0xffffffff,		/* dst_mask */
399	 false),		/* pcrel_offset */
400
401  HOWTO (R_ARM_JUMP_SLOT,	/* type */
402	 0,			/* rightshift */
403	 4,			/* size */
404	 32,			/* bitsize */
405	 false,			/* pc_relative */
406	 0,			/* bitpos */
407	 complain_overflow_bitfield,/* complain_on_overflow */
408	 bfd_elf_generic_reloc, /* special_function */
409	 "R_ARM_JUMP_SLOT",	/* name */
410	 true,			/* partial_inplace */
411	 0xffffffff,		/* src_mask */
412	 0xffffffff,		/* dst_mask */
413	 false),		/* pcrel_offset */
414
415  HOWTO (R_ARM_RELATIVE,	/* type */
416	 0,			/* rightshift */
417	 4,			/* size */
418	 32,			/* bitsize */
419	 false,			/* pc_relative */
420	 0,			/* bitpos */
421	 complain_overflow_bitfield,/* complain_on_overflow */
422	 bfd_elf_generic_reloc, /* special_function */
423	 "R_ARM_RELATIVE",	/* name */
424	 true,			/* partial_inplace */
425	 0xffffffff,		/* src_mask */
426	 0xffffffff,		/* dst_mask */
427	 false),		/* pcrel_offset */
428
429  HOWTO (R_ARM_GOTOFF32,	/* type */
430	 0,			/* rightshift */
431	 4,			/* size */
432	 32,			/* bitsize */
433	 false,			/* pc_relative */
434	 0,			/* bitpos */
435	 complain_overflow_bitfield,/* complain_on_overflow */
436	 bfd_elf_generic_reloc, /* special_function */
437	 "R_ARM_GOTOFF32",	/* name */
438	 true,			/* partial_inplace */
439	 0xffffffff,		/* src_mask */
440	 0xffffffff,		/* dst_mask */
441	 false),		/* pcrel_offset */
442
443  HOWTO (R_ARM_GOTPC,		/* type */
444	 0,			/* rightshift */
445	 4,			/* size */
446	 32,			/* bitsize */
447	 true,			/* pc_relative */
448	 0,			/* bitpos */
449	 complain_overflow_bitfield,/* complain_on_overflow */
450	 bfd_elf_generic_reloc, /* special_function */
451	 "R_ARM_GOTPC",		/* name */
452	 true,			/* partial_inplace */
453	 0xffffffff,		/* src_mask */
454	 0xffffffff,		/* dst_mask */
455	 true),			/* pcrel_offset */
456
457  HOWTO (R_ARM_GOT32,		/* type */
458	 0,			/* rightshift */
459	 4,			/* size */
460	 32,			/* bitsize */
461	 false,			/* pc_relative */
462	 0,			/* bitpos */
463	 complain_overflow_bitfield,/* complain_on_overflow */
464	 bfd_elf_generic_reloc, /* special_function */
465	 "R_ARM_GOT32",		/* name */
466	 true,			/* partial_inplace */
467	 0xffffffff,		/* src_mask */
468	 0xffffffff,		/* dst_mask */
469	 false),		/* pcrel_offset */
470
471  HOWTO (R_ARM_PLT32,		/* type */
472	 2,			/* rightshift */
473	 4,			/* size */
474	 24,			/* bitsize */
475	 true,			/* pc_relative */
476	 0,			/* bitpos */
477	 complain_overflow_bitfield,/* complain_on_overflow */
478	 bfd_elf_generic_reloc, /* special_function */
479	 "R_ARM_PLT32",		/* name */
480	 false,			/* partial_inplace */
481	 0x00ffffff,		/* src_mask */
482	 0x00ffffff,		/* dst_mask */
483	 true),			/* pcrel_offset */
484
485  HOWTO (R_ARM_CALL,		/* type */
486	 2,			/* rightshift */
487	 4,			/* size */
488	 24,			/* bitsize */
489	 true,			/* pc_relative */
490	 0,			/* bitpos */
491	 complain_overflow_signed,/* complain_on_overflow */
492	 bfd_elf_generic_reloc,	/* special_function */
493	 "R_ARM_CALL",		/* name */
494	 false,			/* partial_inplace */
495	 0x00ffffff,		/* src_mask */
496	 0x00ffffff,		/* dst_mask */
497	 true),			/* pcrel_offset */
498
499  HOWTO (R_ARM_JUMP24,		/* type */
500	 2,			/* rightshift */
501	 4,			/* size */
502	 24,			/* bitsize */
503	 true,			/* pc_relative */
504	 0,			/* bitpos */
505	 complain_overflow_signed,/* complain_on_overflow */
506	 bfd_elf_generic_reloc,	/* special_function */
507	 "R_ARM_JUMP24",	/* name */
508	 false,			/* partial_inplace */
509	 0x00ffffff,		/* src_mask */
510	 0x00ffffff,		/* dst_mask */
511	 true),			/* pcrel_offset */
512
513  HOWTO (R_ARM_THM_JUMP24,	/* type */
514	 1,			/* rightshift */
515	 4,			/* size */
516	 24,			/* bitsize */
517	 true,			/* pc_relative */
518	 0,			/* bitpos */
519	 complain_overflow_signed,/* complain_on_overflow */
520	 bfd_elf_generic_reloc,	/* special_function */
521	 "R_ARM_THM_JUMP24",	/* name */
522	 false,			/* partial_inplace */
523	 0x07ff2fff,		/* src_mask */
524	 0x07ff2fff,		/* dst_mask */
525	 true),			/* pcrel_offset */
526
527  HOWTO (R_ARM_BASE_ABS,	/* type */
528	 0,			/* rightshift */
529	 4,			/* size */
530	 32,			/* bitsize */
531	 false,			/* pc_relative */
532	 0,			/* bitpos */
533	 complain_overflow_dont,/* complain_on_overflow */
534	 bfd_elf_generic_reloc,	/* special_function */
535	 "R_ARM_BASE_ABS",	/* name */
536	 false,			/* partial_inplace */
537	 0xffffffff,		/* src_mask */
538	 0xffffffff,		/* dst_mask */
539	 false),		/* pcrel_offset */
540
541  HOWTO (R_ARM_ALU_PCREL7_0,	/* type */
542	 0,			/* rightshift */
543	 4,			/* size */
544	 12,			/* bitsize */
545	 true,			/* pc_relative */
546	 0,			/* bitpos */
547	 complain_overflow_dont,/* complain_on_overflow */
548	 bfd_elf_generic_reloc,	/* special_function */
549	 "R_ARM_ALU_PCREL_7_0",	/* name */
550	 false,			/* partial_inplace */
551	 0x00000fff,		/* src_mask */
552	 0x00000fff,		/* dst_mask */
553	 true),			/* pcrel_offset */
554
555  HOWTO (R_ARM_ALU_PCREL15_8,	/* type */
556	 0,			/* rightshift */
557	 4,			/* size */
558	 12,			/* bitsize */
559	 true,			/* pc_relative */
560	 8,			/* bitpos */
561	 complain_overflow_dont,/* complain_on_overflow */
562	 bfd_elf_generic_reloc,	/* special_function */
563	 "R_ARM_ALU_PCREL_15_8",/* name */
564	 false,			/* partial_inplace */
565	 0x00000fff,		/* src_mask */
566	 0x00000fff,		/* dst_mask */
567	 true),			/* pcrel_offset */
568
569  HOWTO (R_ARM_ALU_PCREL23_15,	/* type */
570	 0,			/* rightshift */
571	 4,			/* size */
572	 12,			/* bitsize */
573	 true,			/* pc_relative */
574	 16,			/* bitpos */
575	 complain_overflow_dont,/* complain_on_overflow */
576	 bfd_elf_generic_reloc,	/* special_function */
577	 "R_ARM_ALU_PCREL_23_15",/* name */
578	 false,			/* partial_inplace */
579	 0x00000fff,		/* src_mask */
580	 0x00000fff,		/* dst_mask */
581	 true),			/* pcrel_offset */
582
583  HOWTO (R_ARM_LDR_SBREL_11_0,	/* type */
584	 0,			/* rightshift */
585	 4,			/* size */
586	 12,			/* bitsize */
587	 false,			/* pc_relative */
588	 0,			/* bitpos */
589	 complain_overflow_dont,/* complain_on_overflow */
590	 bfd_elf_generic_reloc,	/* special_function */
591	 "R_ARM_LDR_SBREL_11_0",/* name */
592	 false,			/* partial_inplace */
593	 0x00000fff,		/* src_mask */
594	 0x00000fff,		/* dst_mask */
595	 false),		/* pcrel_offset */
596
597  HOWTO (R_ARM_ALU_SBREL_19_12,	/* type */
598	 0,			/* rightshift */
599	 4,			/* size */
600	 8,			/* bitsize */
601	 false,			/* pc_relative */
602	 12,			/* bitpos */
603	 complain_overflow_dont,/* complain_on_overflow */
604	 bfd_elf_generic_reloc,	/* special_function */
605	 "R_ARM_ALU_SBREL_19_12",/* name */
606	 false,			/* partial_inplace */
607	 0x000ff000,		/* src_mask */
608	 0x000ff000,		/* dst_mask */
609	 false),		/* pcrel_offset */
610
611  HOWTO (R_ARM_ALU_SBREL_27_20,	/* type */
612	 0,			/* rightshift */
613	 4,			/* size */
614	 8,			/* bitsize */
615	 false,			/* pc_relative */
616	 20,			/* bitpos */
617	 complain_overflow_dont,/* complain_on_overflow */
618	 bfd_elf_generic_reloc,	/* special_function */
619	 "R_ARM_ALU_SBREL_27_20",/* name */
620	 false,			/* partial_inplace */
621	 0x0ff00000,		/* src_mask */
622	 0x0ff00000,		/* dst_mask */
623	 false),		/* pcrel_offset */
624
625  HOWTO (R_ARM_TARGET1,		/* type */
626	 0,			/* rightshift */
627	 4,			/* size */
628	 32,			/* bitsize */
629	 false,			/* pc_relative */
630	 0,			/* bitpos */
631	 complain_overflow_dont,/* complain_on_overflow */
632	 bfd_elf_generic_reloc,	/* special_function */
633	 "R_ARM_TARGET1",	/* name */
634	 false,			/* partial_inplace */
635	 0xffffffff,		/* src_mask */
636	 0xffffffff,		/* dst_mask */
637	 false),		/* pcrel_offset */
638
639  HOWTO (R_ARM_ROSEGREL32,	/* type */
640	 0,			/* rightshift */
641	 4,			/* size */
642	 32,			/* bitsize */
643	 false,			/* pc_relative */
644	 0,			/* bitpos */
645	 complain_overflow_dont,/* complain_on_overflow */
646	 bfd_elf_generic_reloc,	/* special_function */
647	 "R_ARM_ROSEGREL32",	/* name */
648	 false,			/* partial_inplace */
649	 0xffffffff,		/* src_mask */
650	 0xffffffff,		/* dst_mask */
651	 false),		/* pcrel_offset */
652
653  HOWTO (R_ARM_V4BX,		/* type */
654	 0,			/* rightshift */
655	 4,			/* size */
656	 32,			/* bitsize */
657	 false,			/* pc_relative */
658	 0,			/* bitpos */
659	 complain_overflow_dont,/* complain_on_overflow */
660	 bfd_elf_generic_reloc,	/* special_function */
661	 "R_ARM_V4BX",		/* name */
662	 false,			/* partial_inplace */
663	 0xffffffff,		/* src_mask */
664	 0xffffffff,		/* dst_mask */
665	 false),		/* pcrel_offset */
666
667  HOWTO (R_ARM_TARGET2,		/* type */
668	 0,			/* rightshift */
669	 4,			/* size */
670	 32,			/* bitsize */
671	 false,			/* pc_relative */
672	 0,			/* bitpos */
673	 complain_overflow_signed,/* complain_on_overflow */
674	 bfd_elf_generic_reloc,	/* special_function */
675	 "R_ARM_TARGET2",	/* name */
676	 false,			/* partial_inplace */
677	 0xffffffff,		/* src_mask */
678	 0xffffffff,		/* dst_mask */
679	 true),			/* pcrel_offset */
680
681  HOWTO (R_ARM_PREL31,		/* type */
682	 0,			/* rightshift */
683	 4,			/* size */
684	 31,			/* bitsize */
685	 true,			/* pc_relative */
686	 0,			/* bitpos */
687	 complain_overflow_signed,/* complain_on_overflow */
688	 bfd_elf_generic_reloc,	/* special_function */
689	 "R_ARM_PREL31",	/* name */
690	 false,			/* partial_inplace */
691	 0x7fffffff,		/* src_mask */
692	 0x7fffffff,		/* dst_mask */
693	 true),			/* pcrel_offset */
694
695  HOWTO (R_ARM_MOVW_ABS_NC,	/* type */
696	 0,			/* rightshift */
697	 4,			/* size */
698	 16,			/* bitsize */
699	 false,			/* pc_relative */
700	 0,			/* bitpos */
701	 complain_overflow_dont,/* complain_on_overflow */
702	 bfd_elf_generic_reloc,	/* special_function */
703	 "R_ARM_MOVW_ABS_NC",	/* name */
704	 false,			/* partial_inplace */
705	 0x000f0fff,		/* src_mask */
706	 0x000f0fff,		/* dst_mask */
707	 false),		/* pcrel_offset */
708
709  HOWTO (R_ARM_MOVT_ABS,	/* type */
710	 0,			/* rightshift */
711	 4,			/* size */
712	 16,			/* bitsize */
713	 false,			/* pc_relative */
714	 0,			/* bitpos */
715	 complain_overflow_bitfield,/* complain_on_overflow */
716	 bfd_elf_generic_reloc,	/* special_function */
717	 "R_ARM_MOVT_ABS",	/* name */
718	 false,			/* partial_inplace */
719	 0x000f0fff,		/* src_mask */
720	 0x000f0fff,		/* dst_mask */
721	 false),		/* pcrel_offset */
722
723  HOWTO (R_ARM_MOVW_PREL_NC,	/* type */
724	 0,			/* rightshift */
725	 4,			/* size */
726	 16,			/* bitsize */
727	 true,			/* pc_relative */
728	 0,			/* bitpos */
729	 complain_overflow_dont,/* complain_on_overflow */
730	 bfd_elf_generic_reloc,	/* special_function */
731	 "R_ARM_MOVW_PREL_NC",	/* name */
732	 false,			/* partial_inplace */
733	 0x000f0fff,		/* src_mask */
734	 0x000f0fff,		/* dst_mask */
735	 true),			/* pcrel_offset */
736
737  HOWTO (R_ARM_MOVT_PREL,	/* type */
738	 0,			/* rightshift */
739	 4,			/* size */
740	 16,			/* bitsize */
741	 true,			/* pc_relative */
742	 0,			/* bitpos */
743	 complain_overflow_bitfield,/* complain_on_overflow */
744	 bfd_elf_generic_reloc,	/* special_function */
745	 "R_ARM_MOVT_PREL",	/* name */
746	 false,			/* partial_inplace */
747	 0x000f0fff,		/* src_mask */
748	 0x000f0fff,		/* dst_mask */
749	 true),			/* pcrel_offset */
750
751  HOWTO (R_ARM_THM_MOVW_ABS_NC,	/* type */
752	 0,			/* rightshift */
753	 4,			/* size */
754	 16,			/* bitsize */
755	 false,			/* pc_relative */
756	 0,			/* bitpos */
757	 complain_overflow_dont,/* complain_on_overflow */
758	 bfd_elf_generic_reloc,	/* special_function */
759	 "R_ARM_THM_MOVW_ABS_NC",/* name */
760	 false,			/* partial_inplace */
761	 0x040f70ff,		/* src_mask */
762	 0x040f70ff,		/* dst_mask */
763	 false),		/* pcrel_offset */
764
765  HOWTO (R_ARM_THM_MOVT_ABS,	/* type */
766	 0,			/* rightshift */
767	 4,			/* size */
768	 16,			/* bitsize */
769	 false,			/* pc_relative */
770	 0,			/* bitpos */
771	 complain_overflow_bitfield,/* complain_on_overflow */
772	 bfd_elf_generic_reloc,	/* special_function */
773	 "R_ARM_THM_MOVT_ABS",	/* name */
774	 false,			/* partial_inplace */
775	 0x040f70ff,		/* src_mask */
776	 0x040f70ff,		/* dst_mask */
777	 false),		/* pcrel_offset */
778
779  HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
780	 0,			/* rightshift */
781	 4,			/* size */
782	 16,			/* bitsize */
783	 true,			/* pc_relative */
784	 0,			/* bitpos */
785	 complain_overflow_dont,/* complain_on_overflow */
786	 bfd_elf_generic_reloc,	/* special_function */
787	 "R_ARM_THM_MOVW_PREL_NC",/* name */
788	 false,			/* partial_inplace */
789	 0x040f70ff,		/* src_mask */
790	 0x040f70ff,		/* dst_mask */
791	 true),			/* pcrel_offset */
792
793  HOWTO (R_ARM_THM_MOVT_PREL,	/* type */
794	 0,			/* rightshift */
795	 4,			/* size */
796	 16,			/* bitsize */
797	 true,			/* pc_relative */
798	 0,			/* bitpos */
799	 complain_overflow_bitfield,/* complain_on_overflow */
800	 bfd_elf_generic_reloc,	/* special_function */
801	 "R_ARM_THM_MOVT_PREL",	/* name */
802	 false,			/* partial_inplace */
803	 0x040f70ff,		/* src_mask */
804	 0x040f70ff,		/* dst_mask */
805	 true),			/* pcrel_offset */
806
807  HOWTO (R_ARM_THM_JUMP19,	/* type */
808	 1,			/* rightshift */
809	 4,			/* size */
810	 19,			/* bitsize */
811	 true,			/* pc_relative */
812	 0,			/* bitpos */
813	 complain_overflow_signed,/* complain_on_overflow */
814	 bfd_elf_generic_reloc, /* special_function */
815	 "R_ARM_THM_JUMP19",	/* name */
816	 false,			/* partial_inplace */
817	 0x043f2fff,		/* src_mask */
818	 0x043f2fff,		/* dst_mask */
819	 true),			/* pcrel_offset */
820
821  HOWTO (R_ARM_THM_JUMP6,	/* type */
822	 1,			/* rightshift */
823	 2,			/* size */
824	 6,			/* bitsize */
825	 true,			/* pc_relative */
826	 0,			/* bitpos */
827	 complain_overflow_unsigned,/* complain_on_overflow */
828	 bfd_elf_generic_reloc,	/* special_function */
829	 "R_ARM_THM_JUMP6",	/* name */
830	 false,			/* partial_inplace */
831	 0x02f8,		/* src_mask */
832	 0x02f8,		/* dst_mask */
833	 true),			/* pcrel_offset */
834
835  /* These are declared as 13-bit signed relocations because we can
836     address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837     versa.  */
838  HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
839	 0,			/* rightshift */
840	 4,			/* size */
841	 13,			/* bitsize */
842	 true,			/* pc_relative */
843	 0,			/* bitpos */
844	 complain_overflow_dont,/* complain_on_overflow */
845	 bfd_elf_generic_reloc,	/* special_function */
846	 "R_ARM_THM_ALU_PREL_11_0",/* name */
847	 false,			/* partial_inplace */
848	 0xffffffff,		/* src_mask */
849	 0xffffffff,		/* dst_mask */
850	 true),			/* pcrel_offset */
851
852  HOWTO (R_ARM_THM_PC12,	/* type */
853	 0,			/* rightshift */
854	 4,			/* size */
855	 13,			/* bitsize */
856	 true,			/* pc_relative */
857	 0,			/* bitpos */
858	 complain_overflow_dont,/* complain_on_overflow */
859	 bfd_elf_generic_reloc,	/* special_function */
860	 "R_ARM_THM_PC12",	/* name */
861	 false,			/* partial_inplace */
862	 0xffffffff,		/* src_mask */
863	 0xffffffff,		/* dst_mask */
864	 true),			/* pcrel_offset */
865
866  HOWTO (R_ARM_ABS32_NOI,	/* type */
867	 0,			/* rightshift */
868	 4,			/* size */
869	 32,			/* bitsize */
870	 false,			/* pc_relative */
871	 0,			/* bitpos */
872	 complain_overflow_dont,/* complain_on_overflow */
873	 bfd_elf_generic_reloc,	/* special_function */
874	 "R_ARM_ABS32_NOI",	/* name */
875	 false,			/* partial_inplace */
876	 0xffffffff,		/* src_mask */
877	 0xffffffff,		/* dst_mask */
878	 false),		/* pcrel_offset */
879
880  HOWTO (R_ARM_REL32_NOI,	/* type */
881	 0,			/* rightshift */
882	 4,			/* size */
883	 32,			/* bitsize */
884	 true,			/* pc_relative */
885	 0,			/* bitpos */
886	 complain_overflow_dont,/* complain_on_overflow */
887	 bfd_elf_generic_reloc,	/* special_function */
888	 "R_ARM_REL32_NOI",	/* name */
889	 false,			/* partial_inplace */
890	 0xffffffff,		/* src_mask */
891	 0xffffffff,		/* dst_mask */
892	 false),		/* pcrel_offset */
893
894  /* Group relocations.  */
895
896  HOWTO (R_ARM_ALU_PC_G0_NC,	/* type */
897	 0,			/* rightshift */
898	 4,			/* size */
899	 32,			/* bitsize */
900	 true,			/* pc_relative */
901	 0,			/* bitpos */
902	 complain_overflow_dont,/* complain_on_overflow */
903	 bfd_elf_generic_reloc,	/* special_function */
904	 "R_ARM_ALU_PC_G0_NC",	/* name */
905	 false,			/* partial_inplace */
906	 0xffffffff,		/* src_mask */
907	 0xffffffff,		/* dst_mask */
908	 true),			/* pcrel_offset */
909
910  HOWTO (R_ARM_ALU_PC_G0,	/* type */
911	 0,			/* rightshift */
912	 4,			/* size */
913	 32,			/* bitsize */
914	 true,			/* pc_relative */
915	 0,			/* bitpos */
916	 complain_overflow_dont,/* complain_on_overflow */
917	 bfd_elf_generic_reloc,	/* special_function */
918	 "R_ARM_ALU_PC_G0",	/* name */
919	 false,			/* partial_inplace */
920	 0xffffffff,		/* src_mask */
921	 0xffffffff,		/* dst_mask */
922	 true),			/* pcrel_offset */
923
924  HOWTO (R_ARM_ALU_PC_G1_NC,	/* type */
925	 0,			/* rightshift */
926	 4,			/* size */
927	 32,			/* bitsize */
928	 true,			/* pc_relative */
929	 0,			/* bitpos */
930	 complain_overflow_dont,/* complain_on_overflow */
931	 bfd_elf_generic_reloc,	/* special_function */
932	 "R_ARM_ALU_PC_G1_NC",	/* name */
933	 false,			/* partial_inplace */
934	 0xffffffff,		/* src_mask */
935	 0xffffffff,		/* dst_mask */
936	 true),			/* pcrel_offset */
937
938  HOWTO (R_ARM_ALU_PC_G1,	/* type */
939	 0,			/* rightshift */
940	 4,			/* size */
941	 32,			/* bitsize */
942	 true,			/* pc_relative */
943	 0,			/* bitpos */
944	 complain_overflow_dont,/* complain_on_overflow */
945	 bfd_elf_generic_reloc,	/* special_function */
946	 "R_ARM_ALU_PC_G1",	/* name */
947	 false,			/* partial_inplace */
948	 0xffffffff,		/* src_mask */
949	 0xffffffff,		/* dst_mask */
950	 true),			/* pcrel_offset */
951
952  HOWTO (R_ARM_ALU_PC_G2,	/* type */
953	 0,			/* rightshift */
954	 4,			/* size */
955	 32,			/* bitsize */
956	 true,			/* pc_relative */
957	 0,			/* bitpos */
958	 complain_overflow_dont,/* complain_on_overflow */
959	 bfd_elf_generic_reloc,	/* special_function */
960	 "R_ARM_ALU_PC_G2",	/* name */
961	 false,			/* partial_inplace */
962	 0xffffffff,		/* src_mask */
963	 0xffffffff,		/* dst_mask */
964	 true),			/* pcrel_offset */
965
966  HOWTO (R_ARM_LDR_PC_G1,	/* type */
967	 0,			/* rightshift */
968	 4,			/* size */
969	 32,			/* bitsize */
970	 true,			/* pc_relative */
971	 0,			/* bitpos */
972	 complain_overflow_dont,/* complain_on_overflow */
973	 bfd_elf_generic_reloc,	/* special_function */
974	 "R_ARM_LDR_PC_G1",	/* name */
975	 false,			/* partial_inplace */
976	 0xffffffff,		/* src_mask */
977	 0xffffffff,		/* dst_mask */
978	 true),			/* pcrel_offset */
979
980  HOWTO (R_ARM_LDR_PC_G2,	/* type */
981	 0,			/* rightshift */
982	 4,			/* size */
983	 32,			/* bitsize */
984	 true,			/* pc_relative */
985	 0,			/* bitpos */
986	 complain_overflow_dont,/* complain_on_overflow */
987	 bfd_elf_generic_reloc,	/* special_function */
988	 "R_ARM_LDR_PC_G2",	/* name */
989	 false,			/* partial_inplace */
990	 0xffffffff,		/* src_mask */
991	 0xffffffff,		/* dst_mask */
992	 true),			/* pcrel_offset */
993
994  HOWTO (R_ARM_LDRS_PC_G0,	/* type */
995	 0,			/* rightshift */
996	 4,			/* size */
997	 32,			/* bitsize */
998	 true,			/* pc_relative */
999	 0,			/* bitpos */
1000	 complain_overflow_dont,/* complain_on_overflow */
1001	 bfd_elf_generic_reloc,	/* special_function */
1002	 "R_ARM_LDRS_PC_G0",	/* name */
1003	 false,			/* partial_inplace */
1004	 0xffffffff,		/* src_mask */
1005	 0xffffffff,		/* dst_mask */
1006	 true),			/* pcrel_offset */
1007
1008  HOWTO (R_ARM_LDRS_PC_G1,	/* type */
1009	 0,			/* rightshift */
1010	 4,			/* size */
1011	 32,			/* bitsize */
1012	 true,			/* pc_relative */
1013	 0,			/* bitpos */
1014	 complain_overflow_dont,/* complain_on_overflow */
1015	 bfd_elf_generic_reloc,	/* special_function */
1016	 "R_ARM_LDRS_PC_G1",	/* name */
1017	 false,			/* partial_inplace */
1018	 0xffffffff,		/* src_mask */
1019	 0xffffffff,		/* dst_mask */
1020	 true),			/* pcrel_offset */
1021
1022  HOWTO (R_ARM_LDRS_PC_G2,	/* type */
1023	 0,			/* rightshift */
1024	 4,			/* size */
1025	 32,			/* bitsize */
1026	 true,			/* pc_relative */
1027	 0,			/* bitpos */
1028	 complain_overflow_dont,/* complain_on_overflow */
1029	 bfd_elf_generic_reloc,	/* special_function */
1030	 "R_ARM_LDRS_PC_G2",	/* name */
1031	 false,			/* partial_inplace */
1032	 0xffffffff,		/* src_mask */
1033	 0xffffffff,		/* dst_mask */
1034	 true),			/* pcrel_offset */
1035
1036  HOWTO (R_ARM_LDC_PC_G0,	/* type */
1037	 0,			/* rightshift */
1038	 4,			/* size */
1039	 32,			/* bitsize */
1040	 true,			/* pc_relative */
1041	 0,			/* bitpos */
1042	 complain_overflow_dont,/* complain_on_overflow */
1043	 bfd_elf_generic_reloc,	/* special_function */
1044	 "R_ARM_LDC_PC_G0",	/* name */
1045	 false,			/* partial_inplace */
1046	 0xffffffff,		/* src_mask */
1047	 0xffffffff,		/* dst_mask */
1048	 true),			/* pcrel_offset */
1049
1050  HOWTO (R_ARM_LDC_PC_G1,	/* type */
1051	 0,			/* rightshift */
1052	 4,			/* size */
1053	 32,			/* bitsize */
1054	 true,			/* pc_relative */
1055	 0,			/* bitpos */
1056	 complain_overflow_dont,/* complain_on_overflow */
1057	 bfd_elf_generic_reloc,	/* special_function */
1058	 "R_ARM_LDC_PC_G1",	/* name */
1059	 false,			/* partial_inplace */
1060	 0xffffffff,		/* src_mask */
1061	 0xffffffff,		/* dst_mask */
1062	 true),			/* pcrel_offset */
1063
1064  HOWTO (R_ARM_LDC_PC_G2,	/* type */
1065	 0,			/* rightshift */
1066	 4,			/* size */
1067	 32,			/* bitsize */
1068	 true,			/* pc_relative */
1069	 0,			/* bitpos */
1070	 complain_overflow_dont,/* complain_on_overflow */
1071	 bfd_elf_generic_reloc,	/* special_function */
1072	 "R_ARM_LDC_PC_G2",	/* name */
1073	 false,			/* partial_inplace */
1074	 0xffffffff,		/* src_mask */
1075	 0xffffffff,		/* dst_mask */
1076	 true),			/* pcrel_offset */
1077
1078  HOWTO (R_ARM_ALU_SB_G0_NC,	/* type */
1079	 0,			/* rightshift */
1080	 4,			/* size */
1081	 32,			/* bitsize */
1082	 true,			/* pc_relative */
1083	 0,			/* bitpos */
1084	 complain_overflow_dont,/* complain_on_overflow */
1085	 bfd_elf_generic_reloc,	/* special_function */
1086	 "R_ARM_ALU_SB_G0_NC",	/* name */
1087	 false,			/* partial_inplace */
1088	 0xffffffff,		/* src_mask */
1089	 0xffffffff,		/* dst_mask */
1090	 true),			/* pcrel_offset */
1091
1092  HOWTO (R_ARM_ALU_SB_G0,	/* type */
1093	 0,			/* rightshift */
1094	 4,			/* size */
1095	 32,			/* bitsize */
1096	 true,			/* pc_relative */
1097	 0,			/* bitpos */
1098	 complain_overflow_dont,/* complain_on_overflow */
1099	 bfd_elf_generic_reloc,	/* special_function */
1100	 "R_ARM_ALU_SB_G0",	/* name */
1101	 false,			/* partial_inplace */
1102	 0xffffffff,		/* src_mask */
1103	 0xffffffff,		/* dst_mask */
1104	 true),			/* pcrel_offset */
1105
1106  HOWTO (R_ARM_ALU_SB_G1_NC,	/* type */
1107	 0,			/* rightshift */
1108	 4,			/* size */
1109	 32,			/* bitsize */
1110	 true,			/* pc_relative */
1111	 0,			/* bitpos */
1112	 complain_overflow_dont,/* complain_on_overflow */
1113	 bfd_elf_generic_reloc,	/* special_function */
1114	 "R_ARM_ALU_SB_G1_NC",	/* name */
1115	 false,			/* partial_inplace */
1116	 0xffffffff,		/* src_mask */
1117	 0xffffffff,		/* dst_mask */
1118	 true),			/* pcrel_offset */
1119
1120  HOWTO (R_ARM_ALU_SB_G1,	/* type */
1121	 0,			/* rightshift */
1122	 4,			/* size */
1123	 32,			/* bitsize */
1124	 true,			/* pc_relative */
1125	 0,			/* bitpos */
1126	 complain_overflow_dont,/* complain_on_overflow */
1127	 bfd_elf_generic_reloc,	/* special_function */
1128	 "R_ARM_ALU_SB_G1",	/* name */
1129	 false,			/* partial_inplace */
1130	 0xffffffff,		/* src_mask */
1131	 0xffffffff,		/* dst_mask */
1132	 true),			/* pcrel_offset */
1133
1134  HOWTO (R_ARM_ALU_SB_G2,	/* type */
1135	 0,			/* rightshift */
1136	 4,			/* size */
1137	 32,			/* bitsize */
1138	 true,			/* pc_relative */
1139	 0,			/* bitpos */
1140	 complain_overflow_dont,/* complain_on_overflow */
1141	 bfd_elf_generic_reloc,	/* special_function */
1142	 "R_ARM_ALU_SB_G2",	/* name */
1143	 false,			/* partial_inplace */
1144	 0xffffffff,		/* src_mask */
1145	 0xffffffff,		/* dst_mask */
1146	 true),			/* pcrel_offset */
1147
1148  HOWTO (R_ARM_LDR_SB_G0,	/* type */
1149	 0,			/* rightshift */
1150	 4,			/* size */
1151	 32,			/* bitsize */
1152	 true,			/* pc_relative */
1153	 0,			/* bitpos */
1154	 complain_overflow_dont,/* complain_on_overflow */
1155	 bfd_elf_generic_reloc,	/* special_function */
1156	 "R_ARM_LDR_SB_G0",	/* name */
1157	 false,			/* partial_inplace */
1158	 0xffffffff,		/* src_mask */
1159	 0xffffffff,		/* dst_mask */
1160	 true),			/* pcrel_offset */
1161
1162  HOWTO (R_ARM_LDR_SB_G1,	/* type */
1163	 0,			/* rightshift */
1164	 4,			/* size */
1165	 32,			/* bitsize */
1166	 true,			/* pc_relative */
1167	 0,			/* bitpos */
1168	 complain_overflow_dont,/* complain_on_overflow */
1169	 bfd_elf_generic_reloc,	/* special_function */
1170	 "R_ARM_LDR_SB_G1",	/* name */
1171	 false,			/* partial_inplace */
1172	 0xffffffff,		/* src_mask */
1173	 0xffffffff,		/* dst_mask */
1174	 true),			/* pcrel_offset */
1175
1176  HOWTO (R_ARM_LDR_SB_G2,	/* type */
1177	 0,			/* rightshift */
1178	 4,			/* size */
1179	 32,			/* bitsize */
1180	 true,			/* pc_relative */
1181	 0,			/* bitpos */
1182	 complain_overflow_dont,/* complain_on_overflow */
1183	 bfd_elf_generic_reloc,	/* special_function */
1184	 "R_ARM_LDR_SB_G2",	/* name */
1185	 false,			/* partial_inplace */
1186	 0xffffffff,		/* src_mask */
1187	 0xffffffff,		/* dst_mask */
1188	 true),			/* pcrel_offset */
1189
1190  HOWTO (R_ARM_LDRS_SB_G0,	/* type */
1191	 0,			/* rightshift */
1192	 4,			/* size */
1193	 32,			/* bitsize */
1194	 true,			/* pc_relative */
1195	 0,			/* bitpos */
1196	 complain_overflow_dont,/* complain_on_overflow */
1197	 bfd_elf_generic_reloc,	/* special_function */
1198	 "R_ARM_LDRS_SB_G0",	/* name */
1199	 false,			/* partial_inplace */
1200	 0xffffffff,		/* src_mask */
1201	 0xffffffff,		/* dst_mask */
1202	 true),			/* pcrel_offset */
1203
1204  HOWTO (R_ARM_LDRS_SB_G1,	/* type */
1205	 0,			/* rightshift */
1206	 4,			/* size */
1207	 32,			/* bitsize */
1208	 true,			/* pc_relative */
1209	 0,			/* bitpos */
1210	 complain_overflow_dont,/* complain_on_overflow */
1211	 bfd_elf_generic_reloc,	/* special_function */
1212	 "R_ARM_LDRS_SB_G1",	/* name */
1213	 false,			/* partial_inplace */
1214	 0xffffffff,		/* src_mask */
1215	 0xffffffff,		/* dst_mask */
1216	 true),			/* pcrel_offset */
1217
1218  HOWTO (R_ARM_LDRS_SB_G2,	/* type */
1219	 0,			/* rightshift */
1220	 4,			/* size */
1221	 32,			/* bitsize */
1222	 true,			/* pc_relative */
1223	 0,			/* bitpos */
1224	 complain_overflow_dont,/* complain_on_overflow */
1225	 bfd_elf_generic_reloc,	/* special_function */
1226	 "R_ARM_LDRS_SB_G2",	/* name */
1227	 false,			/* partial_inplace */
1228	 0xffffffff,		/* src_mask */
1229	 0xffffffff,		/* dst_mask */
1230	 true),			/* pcrel_offset */
1231
1232  HOWTO (R_ARM_LDC_SB_G0,	/* type */
1233	 0,			/* rightshift */
1234	 4,			/* size */
1235	 32,			/* bitsize */
1236	 true,			/* pc_relative */
1237	 0,			/* bitpos */
1238	 complain_overflow_dont,/* complain_on_overflow */
1239	 bfd_elf_generic_reloc,	/* special_function */
1240	 "R_ARM_LDC_SB_G0",	/* name */
1241	 false,			/* partial_inplace */
1242	 0xffffffff,		/* src_mask */
1243	 0xffffffff,		/* dst_mask */
1244	 true),			/* pcrel_offset */
1245
1246  HOWTO (R_ARM_LDC_SB_G1,	/* type */
1247	 0,			/* rightshift */
1248	 4,			/* size */
1249	 32,			/* bitsize */
1250	 true,			/* pc_relative */
1251	 0,			/* bitpos */
1252	 complain_overflow_dont,/* complain_on_overflow */
1253	 bfd_elf_generic_reloc,	/* special_function */
1254	 "R_ARM_LDC_SB_G1",	/* name */
1255	 false,			/* partial_inplace */
1256	 0xffffffff,		/* src_mask */
1257	 0xffffffff,		/* dst_mask */
1258	 true),			/* pcrel_offset */
1259
1260  HOWTO (R_ARM_LDC_SB_G2,	/* type */
1261	 0,			/* rightshift */
1262	 4,			/* size */
1263	 32,			/* bitsize */
1264	 true,			/* pc_relative */
1265	 0,			/* bitpos */
1266	 complain_overflow_dont,/* complain_on_overflow */
1267	 bfd_elf_generic_reloc,	/* special_function */
1268	 "R_ARM_LDC_SB_G2",	/* name */
1269	 false,			/* partial_inplace */
1270	 0xffffffff,		/* src_mask */
1271	 0xffffffff,		/* dst_mask */
1272	 true),			/* pcrel_offset */
1273
1274  /* End of group relocations.  */
1275
1276  HOWTO (R_ARM_MOVW_BREL_NC,	/* type */
1277	 0,			/* rightshift */
1278	 4,			/* size */
1279	 16,			/* bitsize */
1280	 false,			/* pc_relative */
1281	 0,			/* bitpos */
1282	 complain_overflow_dont,/* complain_on_overflow */
1283	 bfd_elf_generic_reloc,	/* special_function */
1284	 "R_ARM_MOVW_BREL_NC",	/* name */
1285	 false,			/* partial_inplace */
1286	 0x0000ffff,		/* src_mask */
1287	 0x0000ffff,		/* dst_mask */
1288	 false),		/* pcrel_offset */
1289
1290  HOWTO (R_ARM_MOVT_BREL,	/* type */
1291	 0,			/* rightshift */
1292	 4,			/* size */
1293	 16,			/* bitsize */
1294	 false,			/* pc_relative */
1295	 0,			/* bitpos */
1296	 complain_overflow_bitfield,/* complain_on_overflow */
1297	 bfd_elf_generic_reloc,	/* special_function */
1298	 "R_ARM_MOVT_BREL",	/* name */
1299	 false,			/* partial_inplace */
1300	 0x0000ffff,		/* src_mask */
1301	 0x0000ffff,		/* dst_mask */
1302	 false),		/* pcrel_offset */
1303
1304  HOWTO (R_ARM_MOVW_BREL,	/* type */
1305	 0,			/* rightshift */
1306	 4,			/* size */
1307	 16,			/* bitsize */
1308	 false,			/* pc_relative */
1309	 0,			/* bitpos */
1310	 complain_overflow_dont,/* complain_on_overflow */
1311	 bfd_elf_generic_reloc,	/* special_function */
1312	 "R_ARM_MOVW_BREL",	/* name */
1313	 false,			/* partial_inplace */
1314	 0x0000ffff,		/* src_mask */
1315	 0x0000ffff,		/* dst_mask */
1316	 false),		/* pcrel_offset */
1317
1318  HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1319	 0,			/* rightshift */
1320	 4,			/* size */
1321	 16,			/* bitsize */
1322	 false,			/* pc_relative */
1323	 0,			/* bitpos */
1324	 complain_overflow_dont,/* complain_on_overflow */
1325	 bfd_elf_generic_reloc,	/* special_function */
1326	 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327	 false,			/* partial_inplace */
1328	 0x040f70ff,		/* src_mask */
1329	 0x040f70ff,		/* dst_mask */
1330	 false),		/* pcrel_offset */
1331
1332  HOWTO (R_ARM_THM_MOVT_BREL,	/* type */
1333	 0,			/* rightshift */
1334	 4,			/* size */
1335	 16,			/* bitsize */
1336	 false,			/* pc_relative */
1337	 0,			/* bitpos */
1338	 complain_overflow_bitfield,/* complain_on_overflow */
1339	 bfd_elf_generic_reloc,	/* special_function */
1340	 "R_ARM_THM_MOVT_BREL",	/* name */
1341	 false,			/* partial_inplace */
1342	 0x040f70ff,		/* src_mask */
1343	 0x040f70ff,		/* dst_mask */
1344	 false),		/* pcrel_offset */
1345
1346  HOWTO (R_ARM_THM_MOVW_BREL,	/* type */
1347	 0,			/* rightshift */
1348	 4,			/* size */
1349	 16,			/* bitsize */
1350	 false,			/* pc_relative */
1351	 0,			/* bitpos */
1352	 complain_overflow_dont,/* complain_on_overflow */
1353	 bfd_elf_generic_reloc,	/* special_function */
1354	 "R_ARM_THM_MOVW_BREL",	/* name */
1355	 false,			/* partial_inplace */
1356	 0x040f70ff,		/* src_mask */
1357	 0x040f70ff,		/* dst_mask */
1358	 false),		/* pcrel_offset */
1359
1360  HOWTO (R_ARM_TLS_GOTDESC,	/* type */
1361	 0,			/* rightshift */
1362	 4,			/* size */
1363	 32,			/* bitsize */
1364	 false,			/* pc_relative */
1365	 0,			/* bitpos */
1366	 complain_overflow_bitfield,/* complain_on_overflow */
1367	 NULL,			/* special_function */
1368	 "R_ARM_TLS_GOTDESC",	/* name */
1369	 true,			/* partial_inplace */
1370	 0xffffffff,		/* src_mask */
1371	 0xffffffff,		/* dst_mask */
1372	 false),		/* pcrel_offset */
1373
1374  HOWTO (R_ARM_TLS_CALL,	/* type */
1375	 0,			/* rightshift */
1376	 4,			/* size */
1377	 24,			/* bitsize */
1378	 false,			/* pc_relative */
1379	 0,			/* bitpos */
1380	 complain_overflow_dont,/* complain_on_overflow */
1381	 bfd_elf_generic_reloc,	/* special_function */
1382	 "R_ARM_TLS_CALL",	/* name */
1383	 false,			/* partial_inplace */
1384	 0x00ffffff,		/* src_mask */
1385	 0x00ffffff,		/* dst_mask */
1386	 false),		/* pcrel_offset */
1387
1388  HOWTO (R_ARM_TLS_DESCSEQ,	/* type */
1389	 0,			/* rightshift */
1390	 4,			/* size */
1391	 0,			/* bitsize */
1392	 false,			/* pc_relative */
1393	 0,			/* bitpos */
1394	 complain_overflow_dont,/* complain_on_overflow */
1395	 bfd_elf_generic_reloc,	/* special_function */
1396	 "R_ARM_TLS_DESCSEQ",	/* name */
1397	 false,			/* partial_inplace */
1398	 0x00000000,		/* src_mask */
1399	 0x00000000,		/* dst_mask */
1400	 false),		/* pcrel_offset */
1401
1402  HOWTO (R_ARM_THM_TLS_CALL,	/* type */
1403	 0,			/* rightshift */
1404	 4,			/* size */
1405	 24,			/* bitsize */
1406	 false,			/* pc_relative */
1407	 0,			/* bitpos */
1408	 complain_overflow_dont,/* complain_on_overflow */
1409	 bfd_elf_generic_reloc,	/* special_function */
1410	 "R_ARM_THM_TLS_CALL",	/* name */
1411	 false,			/* partial_inplace */
1412	 0x07ff07ff,		/* src_mask */
1413	 0x07ff07ff,		/* dst_mask */
1414	 false),		/* pcrel_offset */
1415
1416  HOWTO (R_ARM_PLT32_ABS,	/* type */
1417	 0,			/* rightshift */
1418	 4,			/* size */
1419	 32,			/* bitsize */
1420	 false,			/* pc_relative */
1421	 0,			/* bitpos */
1422	 complain_overflow_dont,/* complain_on_overflow */
1423	 bfd_elf_generic_reloc,	/* special_function */
1424	 "R_ARM_PLT32_ABS",	/* name */
1425	 false,			/* partial_inplace */
1426	 0xffffffff,		/* src_mask */
1427	 0xffffffff,		/* dst_mask */
1428	 false),		/* pcrel_offset */
1429
1430  HOWTO (R_ARM_GOT_ABS,		/* type */
1431	 0,			/* rightshift */
1432	 4,			/* size */
1433	 32,			/* bitsize */
1434	 false,			/* pc_relative */
1435	 0,			/* bitpos */
1436	 complain_overflow_dont,/* complain_on_overflow */
1437	 bfd_elf_generic_reloc,	/* special_function */
1438	 "R_ARM_GOT_ABS",	/* name */
1439	 false,			/* partial_inplace */
1440	 0xffffffff,		/* src_mask */
1441	 0xffffffff,		/* dst_mask */
1442	 false),			/* pcrel_offset */
1443
1444  HOWTO (R_ARM_GOT_PREL,	/* type */
1445	 0,			/* rightshift */
1446	 4,			/* size */
1447	 32,			/* bitsize */
1448	 true,			/* pc_relative */
1449	 0,			/* bitpos */
1450	 complain_overflow_dont,	/* complain_on_overflow */
1451	 bfd_elf_generic_reloc,	/* special_function */
1452	 "R_ARM_GOT_PREL",	/* name */
1453	 false,			/* partial_inplace */
1454	 0xffffffff,		/* src_mask */
1455	 0xffffffff,		/* dst_mask */
1456	 true),			/* pcrel_offset */
1457
1458  HOWTO (R_ARM_GOT_BREL12,	/* type */
1459	 0,			/* rightshift */
1460	 4,			/* size */
1461	 12,			/* bitsize */
1462	 false,			/* pc_relative */
1463	 0,			/* bitpos */
1464	 complain_overflow_bitfield,/* complain_on_overflow */
1465	 bfd_elf_generic_reloc,	/* special_function */
1466	 "R_ARM_GOT_BREL12",	/* name */
1467	 false,			/* partial_inplace */
1468	 0x00000fff,		/* src_mask */
1469	 0x00000fff,		/* dst_mask */
1470	 false),		/* pcrel_offset */
1471
1472  HOWTO (R_ARM_GOTOFF12,	/* type */
1473	 0,			/* rightshift */
1474	 4,			/* size */
1475	 12,			/* bitsize */
1476	 false,			/* pc_relative */
1477	 0,			/* bitpos */
1478	 complain_overflow_bitfield,/* complain_on_overflow */
1479	 bfd_elf_generic_reloc,	/* special_function */
1480	 "R_ARM_GOTOFF12",	/* name */
1481	 false,			/* partial_inplace */
1482	 0x00000fff,		/* src_mask */
1483	 0x00000fff,		/* dst_mask */
1484	 false),		/* pcrel_offset */
1485
1486  EMPTY_HOWTO (R_ARM_GOTRELAX),	 /* reserved for future GOT-load optimizations */
1487
1488  /* GNU extension to record C++ vtable member usage */
1489  HOWTO (R_ARM_GNU_VTENTRY,	/* type */
1490	 0,			/* rightshift */
1491	 4,			/* size */
1492	 0,			/* bitsize */
1493	 false,			/* pc_relative */
1494	 0,			/* bitpos */
1495	 complain_overflow_dont, /* complain_on_overflow */
1496	 _bfd_elf_rel_vtable_reloc_fn,	/* special_function */
1497	 "R_ARM_GNU_VTENTRY",	/* name */
1498	 false,			/* partial_inplace */
1499	 0,			/* src_mask */
1500	 0,			/* dst_mask */
1501	 false),		/* pcrel_offset */
1502
1503  /* GNU extension to record C++ vtable hierarchy */
1504  HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1505	 0,			/* rightshift */
1506	 4,			/* size */
1507	 0,			/* bitsize */
1508	 false,			/* pc_relative */
1509	 0,			/* bitpos */
1510	 complain_overflow_dont, /* complain_on_overflow */
1511	 NULL,			/* special_function */
1512	 "R_ARM_GNU_VTINHERIT", /* name */
1513	 false,			/* partial_inplace */
1514	 0,			/* src_mask */
1515	 0,			/* dst_mask */
1516	 false),		/* pcrel_offset */
1517
1518  HOWTO (R_ARM_THM_JUMP11,	/* type */
1519	 1,			/* rightshift */
1520	 2,			/* size */
1521	 11,			/* bitsize */
1522	 true,			/* pc_relative */
1523	 0,			/* bitpos */
1524	 complain_overflow_signed,	/* complain_on_overflow */
1525	 bfd_elf_generic_reloc,	/* special_function */
1526	 "R_ARM_THM_JUMP11",	/* name */
1527	 false,			/* partial_inplace */
1528	 0x000007ff,		/* src_mask */
1529	 0x000007ff,		/* dst_mask */
1530	 true),			/* pcrel_offset */
1531
1532  HOWTO (R_ARM_THM_JUMP8,	/* type */
1533	 1,			/* rightshift */
1534	 2,			/* size */
1535	 8,			/* bitsize */
1536	 true,			/* pc_relative */
1537	 0,			/* bitpos */
1538	 complain_overflow_signed,	/* complain_on_overflow */
1539	 bfd_elf_generic_reloc,	/* special_function */
1540	 "R_ARM_THM_JUMP8",	/* name */
1541	 false,			/* partial_inplace */
1542	 0x000000ff,		/* src_mask */
1543	 0x000000ff,		/* dst_mask */
1544	 true),			/* pcrel_offset */
1545
1546  /* TLS relocations */
1547  HOWTO (R_ARM_TLS_GD32,	/* type */
1548	 0,			/* rightshift */
1549	 4,			/* size */
1550	 32,			/* bitsize */
1551	 false,			/* pc_relative */
1552	 0,			/* bitpos */
1553	 complain_overflow_bitfield,/* complain_on_overflow */
1554	 NULL,			/* special_function */
1555	 "R_ARM_TLS_GD32",	/* name */
1556	 true,			/* partial_inplace */
1557	 0xffffffff,		/* src_mask */
1558	 0xffffffff,		/* dst_mask */
1559	 false),		/* pcrel_offset */
1560
1561  HOWTO (R_ARM_TLS_LDM32,	/* type */
1562	 0,			/* rightshift */
1563	 4,			/* size */
1564	 32,			/* bitsize */
1565	 false,			/* pc_relative */
1566	 0,			/* bitpos */
1567	 complain_overflow_bitfield,/* complain_on_overflow */
1568	 bfd_elf_generic_reloc, /* special_function */
1569	 "R_ARM_TLS_LDM32",	/* name */
1570	 true,			/* partial_inplace */
1571	 0xffffffff,		/* src_mask */
1572	 0xffffffff,		/* dst_mask */
1573	 false),		/* pcrel_offset */
1574
1575  HOWTO (R_ARM_TLS_LDO32,	/* type */
1576	 0,			/* rightshift */
1577	 4,			/* size */
1578	 32,			/* bitsize */
1579	 false,			/* pc_relative */
1580	 0,			/* bitpos */
1581	 complain_overflow_bitfield,/* complain_on_overflow */
1582	 bfd_elf_generic_reloc, /* special_function */
1583	 "R_ARM_TLS_LDO32",	/* name */
1584	 true,			/* partial_inplace */
1585	 0xffffffff,		/* src_mask */
1586	 0xffffffff,		/* dst_mask */
1587	 false),		/* pcrel_offset */
1588
1589  HOWTO (R_ARM_TLS_IE32,	/* type */
1590	 0,			/* rightshift */
1591	 4,			/* size */
1592	 32,			/* bitsize */
1593	 false,			 /* pc_relative */
1594	 0,			/* bitpos */
1595	 complain_overflow_bitfield,/* complain_on_overflow */
1596	 NULL,			/* special_function */
1597	 "R_ARM_TLS_IE32",	/* name */
1598	 true,			/* partial_inplace */
1599	 0xffffffff,		/* src_mask */
1600	 0xffffffff,		/* dst_mask */
1601	 false),		/* pcrel_offset */
1602
1603  HOWTO (R_ARM_TLS_LE32,	/* type */
1604	 0,			/* rightshift */
1605	 4,			/* size */
1606	 32,			/* bitsize */
1607	 false,			/* pc_relative */
1608	 0,			/* bitpos */
1609	 complain_overflow_bitfield,/* complain_on_overflow */
1610	 NULL,			/* special_function */
1611	 "R_ARM_TLS_LE32",	/* name */
1612	 true,			/* partial_inplace */
1613	 0xffffffff,		/* src_mask */
1614	 0xffffffff,		/* dst_mask */
1615	 false),		/* pcrel_offset */
1616
1617  HOWTO (R_ARM_TLS_LDO12,	/* type */
1618	 0,			/* rightshift */
1619	 4,			/* size */
1620	 12,			/* bitsize */
1621	 false,			/* pc_relative */
1622	 0,			/* bitpos */
1623	 complain_overflow_bitfield,/* complain_on_overflow */
1624	 bfd_elf_generic_reloc,	/* special_function */
1625	 "R_ARM_TLS_LDO12",	/* name */
1626	 false,			/* partial_inplace */
1627	 0x00000fff,		/* src_mask */
1628	 0x00000fff,		/* dst_mask */
1629	 false),		/* pcrel_offset */
1630
1631  HOWTO (R_ARM_TLS_LE12,	/* type */
1632	 0,			/* rightshift */
1633	 4,			/* size */
1634	 12,			/* bitsize */
1635	 false,			/* pc_relative */
1636	 0,			/* bitpos */
1637	 complain_overflow_bitfield,/* complain_on_overflow */
1638	 bfd_elf_generic_reloc,	/* special_function */
1639	 "R_ARM_TLS_LE12",	/* name */
1640	 false,			/* partial_inplace */
1641	 0x00000fff,		/* src_mask */
1642	 0x00000fff,		/* dst_mask */
1643	 false),		/* pcrel_offset */
1644
1645  HOWTO (R_ARM_TLS_IE12GP,	/* type */
1646	 0,			/* rightshift */
1647	 4,			/* size */
1648	 12,			/* bitsize */
1649	 false,			/* pc_relative */
1650	 0,			/* bitpos */
1651	 complain_overflow_bitfield,/* complain_on_overflow */
1652	 bfd_elf_generic_reloc,	/* special_function */
1653	 "R_ARM_TLS_IE12GP",	/* name */
1654	 false,			/* partial_inplace */
1655	 0x00000fff,		/* src_mask */
1656	 0x00000fff,		/* dst_mask */
1657	 false),		/* pcrel_offset */
1658
1659  /* 112-127 private relocations.  */
1660  EMPTY_HOWTO (112),
1661  EMPTY_HOWTO (113),
1662  EMPTY_HOWTO (114),
1663  EMPTY_HOWTO (115),
1664  EMPTY_HOWTO (116),
1665  EMPTY_HOWTO (117),
1666  EMPTY_HOWTO (118),
1667  EMPTY_HOWTO (119),
1668  EMPTY_HOWTO (120),
1669  EMPTY_HOWTO (121),
1670  EMPTY_HOWTO (122),
1671  EMPTY_HOWTO (123),
1672  EMPTY_HOWTO (124),
1673  EMPTY_HOWTO (125),
1674  EMPTY_HOWTO (126),
1675  EMPTY_HOWTO (127),
1676
1677  /* R_ARM_ME_TOO, obsolete.  */
1678  EMPTY_HOWTO (128),
1679
1680  HOWTO (R_ARM_THM_TLS_DESCSEQ,	/* type */
1681	 0,			/* rightshift */
1682	 2,			/* size */
1683	 0,			/* bitsize */
1684	 false,			/* pc_relative */
1685	 0,			/* bitpos */
1686	 complain_overflow_dont,/* complain_on_overflow */
1687	 bfd_elf_generic_reloc,	/* special_function */
1688	 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689	 false,			/* partial_inplace */
1690	 0x00000000,		/* src_mask */
1691	 0x00000000,		/* dst_mask */
1692	 false),		/* pcrel_offset */
1693  EMPTY_HOWTO (130),
1694  EMPTY_HOWTO (131),
1695  HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type.  */
1696	 0,			/* rightshift.  */
1697	 2,			/* size.  */
1698	 16,			/* bitsize.  */
1699	 false,			/* pc_relative.  */
1700	 0,			/* bitpos.  */
1701	 complain_overflow_bitfield,/* complain_on_overflow.  */
1702	 bfd_elf_generic_reloc,	/* special_function.  */
1703	 "R_ARM_THM_ALU_ABS_G0_NC",/* name.  */
1704	 false,			/* partial_inplace.  */
1705	 0x00000000,		/* src_mask.  */
1706	 0x00000000,		/* dst_mask.  */
1707	 false),		/* pcrel_offset.  */
1708  HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type.  */
1709	 0,			/* rightshift.  */
1710	 2,			/* size.  */
1711	 16,			/* bitsize.  */
1712	 false,			/* pc_relative.  */
1713	 0,			/* bitpos.  */
1714	 complain_overflow_bitfield,/* complain_on_overflow.  */
1715	 bfd_elf_generic_reloc,	/* special_function.  */
1716	 "R_ARM_THM_ALU_ABS_G1_NC",/* name.  */
1717	 false,			/* partial_inplace.  */
1718	 0x00000000,		/* src_mask.  */
1719	 0x00000000,		/* dst_mask.  */
1720	 false),		/* pcrel_offset.  */
1721  HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type.  */
1722	 0,			/* rightshift.  */
1723	 2,			/* size.  */
1724	 16,			/* bitsize.  */
1725	 false,			/* pc_relative.  */
1726	 0,			/* bitpos.  */
1727	 complain_overflow_bitfield,/* complain_on_overflow.  */
1728	 bfd_elf_generic_reloc,	/* special_function.  */
1729	 "R_ARM_THM_ALU_ABS_G2_NC",/* name.  */
1730	 false,			/* partial_inplace.  */
1731	 0x00000000,		/* src_mask.  */
1732	 0x00000000,		/* dst_mask.  */
1733	 false),		/* pcrel_offset.  */
1734  HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type.  */
1735	 0,			/* rightshift.  */
1736	 2,			/* size.  */
1737	 16,			/* bitsize.  */
1738	 false,			/* pc_relative.  */
1739	 0,			/* bitpos.  */
1740	 complain_overflow_bitfield,/* complain_on_overflow.  */
1741	 bfd_elf_generic_reloc,	/* special_function.  */
1742	 "R_ARM_THM_ALU_ABS_G3_NC",/* name.  */
1743	 false,			/* partial_inplace.  */
1744	 0x00000000,		/* src_mask.  */
1745	 0x00000000,		/* dst_mask.  */
1746	 false),		/* pcrel_offset.  */
1747  /* Relocations for Armv8.1-M Mainline.  */
1748  HOWTO (R_ARM_THM_BF16,	/* type.  */
1749	 0,			/* rightshift.  */
1750	 2,			/* size.  */
1751	 16,			/* bitsize.  */
1752	 true,			/* pc_relative.  */
1753	 0,			/* bitpos.  */
1754	 complain_overflow_dont,/* do not complain_on_overflow.  */
1755	 bfd_elf_generic_reloc,	/* special_function.  */
1756	 "R_ARM_THM_BF16",	/* name.  */
1757	 false,			/* partial_inplace.  */
1758	 0x001f0ffe,		/* src_mask.  */
1759	 0x001f0ffe,		/* dst_mask.  */
1760	 true),			/* pcrel_offset.  */
1761  HOWTO (R_ARM_THM_BF12,	/* type.  */
1762	 0,			/* rightshift.  */
1763	 2,			/* size.  */
1764	 12,			/* bitsize.  */
1765	 true,			/* pc_relative.  */
1766	 0,			/* bitpos.  */
1767	 complain_overflow_dont,/* do not complain_on_overflow.  */
1768	 bfd_elf_generic_reloc,	/* special_function.  */
1769	 "R_ARM_THM_BF12",	/* name.  */
1770	 false,			/* partial_inplace.  */
1771	 0x00010ffe,		/* src_mask.  */
1772	 0x00010ffe,		/* dst_mask.  */
1773	 true),			/* pcrel_offset.  */
1774  HOWTO (R_ARM_THM_BF18,	/* type.  */
1775	 0,			/* rightshift.  */
1776	 2,			/* size.  */
1777	 18,			/* bitsize.  */
1778	 true,			/* pc_relative.  */
1779	 0,			/* bitpos.  */
1780	 complain_overflow_dont,/* do not complain_on_overflow.  */
1781	 bfd_elf_generic_reloc,	/* special_function.  */
1782	 "R_ARM_THM_BF18",	/* name.  */
1783	 false,			/* partial_inplace.  */
1784	 0x007f0ffe,		/* src_mask.  */
1785	 0x007f0ffe,		/* dst_mask.  */
1786	 true),			/* pcrel_offset.  */
1787};
1788
1789/* 160 onwards: */
1790static reloc_howto_type elf32_arm_howto_table_2[8] =
1791{
1792  HOWTO (R_ARM_IRELATIVE,	/* type */
1793	 0,			/* rightshift */
1794	 4,			/* size */
1795	 32,			/* bitsize */
1796	 false,			/* pc_relative */
1797	 0,			/* bitpos */
1798	 complain_overflow_bitfield,/* complain_on_overflow */
1799	 bfd_elf_generic_reloc, /* special_function */
1800	 "R_ARM_IRELATIVE",	/* name */
1801	 true,			/* partial_inplace */
1802	 0xffffffff,		/* src_mask */
1803	 0xffffffff,		/* dst_mask */
1804	 false),		/* pcrel_offset */
1805  HOWTO (R_ARM_GOTFUNCDESC,	/* type */
1806	 0,			/* rightshift */
1807	 4,			/* size */
1808	 32,			/* bitsize */
1809	 false,			/* pc_relative */
1810	 0,			/* bitpos */
1811	 complain_overflow_bitfield,/* complain_on_overflow */
1812	 bfd_elf_generic_reloc,	/* special_function */
1813	 "R_ARM_GOTFUNCDESC",	/* name */
1814	 false,			/* partial_inplace */
1815	 0,			/* src_mask */
1816	 0xffffffff,		/* dst_mask */
1817	 false),		/* pcrel_offset */
1818  HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1819	 0,			/* rightshift */
1820	 4,			/* size */
1821	 32,			/* bitsize */
1822	 false,			/* pc_relative */
1823	 0,			/* bitpos */
1824	 complain_overflow_bitfield,/* complain_on_overflow */
1825	 bfd_elf_generic_reloc,	/* special_function */
1826	 "R_ARM_GOTOFFFUNCDESC",/* name */
1827	 false,			/* partial_inplace */
1828	 0,			/* src_mask */
1829	 0xffffffff,		/* dst_mask */
1830	 false),		/* pcrel_offset */
1831  HOWTO (R_ARM_FUNCDESC,	/* type */
1832	 0,			/* rightshift */
1833	 4,			/* size */
1834	 32,			/* bitsize */
1835	 false,			/* pc_relative */
1836	 0,			/* bitpos */
1837	 complain_overflow_bitfield,/* complain_on_overflow */
1838	 bfd_elf_generic_reloc,	/* special_function */
1839	 "R_ARM_FUNCDESC",	/* name */
1840	 false,			/* partial_inplace */
1841	 0,			/* src_mask */
1842	 0xffffffff,		/* dst_mask */
1843	 false),		/* pcrel_offset */
1844  HOWTO (R_ARM_FUNCDESC_VALUE,	/* type */
1845	 0,			/* rightshift */
1846	 4,			/* size */
1847	 64,			/* bitsize */
1848	 false,			/* pc_relative */
1849	 0,			/* bitpos */
1850	 complain_overflow_bitfield,/* complain_on_overflow */
1851	 bfd_elf_generic_reloc,	/* special_function */
1852	 "R_ARM_FUNCDESC_VALUE",/* name */
1853	 false,			/* partial_inplace */
1854	 0,			/* src_mask */
1855	 0xffffffff,		/* dst_mask */
1856	 false),		/* pcrel_offset */
1857  HOWTO (R_ARM_TLS_GD32_FDPIC,	/* type */
1858	 0,			/* rightshift */
1859	 4,			/* size */
1860	 32,			/* bitsize */
1861	 false,			/* pc_relative */
1862	 0,			/* bitpos */
1863	 complain_overflow_bitfield,/* complain_on_overflow */
1864	 bfd_elf_generic_reloc,	/* special_function */
1865	 "R_ARM_TLS_GD32_FDPIC",/* name */
1866	 false,			/* partial_inplace */
1867	 0,			/* src_mask */
1868	 0xffffffff,		/* dst_mask */
1869	 false),		/* pcrel_offset */
1870  HOWTO (R_ARM_TLS_LDM32_FDPIC,	/* type */
1871	 0,			/* rightshift */
1872	 4,			/* size */
1873	 32,			/* bitsize */
1874	 false,			/* pc_relative */
1875	 0,			/* bitpos */
1876	 complain_overflow_bitfield,/* complain_on_overflow */
1877	 bfd_elf_generic_reloc,	/* special_function */
1878	 "R_ARM_TLS_LDM32_FDPIC",/* name */
1879	 false,			/* partial_inplace */
1880	 0,			/* src_mask */
1881	 0xffffffff,		/* dst_mask */
1882	 false),		/* pcrel_offset */
1883  HOWTO (R_ARM_TLS_IE32_FDPIC,	/* type */
1884	 0,			/* rightshift */
1885	 4,			/* size */
1886	 32,			/* bitsize */
1887	 false,			/* pc_relative */
1888	 0,			/* bitpos */
1889	 complain_overflow_bitfield,/* complain_on_overflow */
1890	 bfd_elf_generic_reloc,	/* special_function */
1891	 "R_ARM_TLS_IE32_FDPIC",/* name */
1892	 false,			/* partial_inplace */
1893	 0,			/* src_mask */
1894	 0xffffffff,		/* dst_mask */
1895	 false),		/* pcrel_offset */
1896};
1897
1898/* 249-255 extended, currently unused, relocations:  */
1899static reloc_howto_type elf32_arm_howto_table_3[4] =
1900{
1901  HOWTO (R_ARM_RREL32,		/* type */
1902	 0,			/* rightshift */
1903	 0,			/* size */
1904	 0,			/* bitsize */
1905	 false,			/* pc_relative */
1906	 0,			/* bitpos */
1907	 complain_overflow_dont,/* complain_on_overflow */
1908	 bfd_elf_generic_reloc,	/* special_function */
1909	 "R_ARM_RREL32",	/* name */
1910	 false,			/* partial_inplace */
1911	 0,			/* src_mask */
1912	 0,			/* dst_mask */
1913	 false),		/* pcrel_offset */
1914
1915  HOWTO (R_ARM_RABS32,		/* type */
1916	 0,			/* rightshift */
1917	 0,			/* size */
1918	 0,			/* bitsize */
1919	 false,			/* pc_relative */
1920	 0,			/* bitpos */
1921	 complain_overflow_dont,/* complain_on_overflow */
1922	 bfd_elf_generic_reloc,	/* special_function */
1923	 "R_ARM_RABS32",	/* name */
1924	 false,			/* partial_inplace */
1925	 0,			/* src_mask */
1926	 0,			/* dst_mask */
1927	 false),		/* pcrel_offset */
1928
1929  HOWTO (R_ARM_RPC24,		/* type */
1930	 0,			/* rightshift */
1931	 0,			/* size */
1932	 0,			/* bitsize */
1933	 false,			/* pc_relative */
1934	 0,			/* bitpos */
1935	 complain_overflow_dont,/* complain_on_overflow */
1936	 bfd_elf_generic_reloc,	/* special_function */
1937	 "R_ARM_RPC24",		/* name */
1938	 false,			/* partial_inplace */
1939	 0,			/* src_mask */
1940	 0,			/* dst_mask */
1941	 false),		/* pcrel_offset */
1942
1943  HOWTO (R_ARM_RBASE,		/* type */
1944	 0,			/* rightshift */
1945	 0,			/* size */
1946	 0,			/* bitsize */
1947	 false,			/* pc_relative */
1948	 0,			/* bitpos */
1949	 complain_overflow_dont,/* complain_on_overflow */
1950	 bfd_elf_generic_reloc,	/* special_function */
1951	 "R_ARM_RBASE",		/* name */
1952	 false,			/* partial_inplace */
1953	 0,			/* src_mask */
1954	 0,			/* dst_mask */
1955	 false)			/* pcrel_offset */
1956};
1957
1958static reloc_howto_type *
1959elf32_arm_howto_from_type (unsigned int r_type)
1960{
1961  if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1962    return &elf32_arm_howto_table_1[r_type];
1963
1964  if (r_type >= R_ARM_IRELATIVE
1965      && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1966    return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1967
1968  if (r_type >= R_ARM_RREL32
1969      && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1970    return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1971
1972  return NULL;
1973}
1974
1975static bool
1976elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1977			 Elf_Internal_Rela * elf_reloc)
1978{
1979  unsigned int r_type;
1980
1981  r_type = ELF32_R_TYPE (elf_reloc->r_info);
1982  if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1983    {
1984      /* xgettext:c-format */
1985      _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1986			  abfd, r_type);
1987      bfd_set_error (bfd_error_bad_value);
1988      return false;
1989    }
1990  return true;
1991}
1992
1993struct elf32_arm_reloc_map
1994  {
1995    bfd_reloc_code_real_type  bfd_reloc_val;
1996    unsigned char	      elf_reloc_val;
1997  };
1998
1999/* All entries in this list must also be present in elf32_arm_howto_table.  */
2000static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
2001  {
2002    {BFD_RELOC_NONE,		     R_ARM_NONE},
2003    {BFD_RELOC_ARM_PCREL_BRANCH,     R_ARM_PC24},
2004    {BFD_RELOC_ARM_PCREL_CALL,	     R_ARM_CALL},
2005    {BFD_RELOC_ARM_PCREL_JUMP,	     R_ARM_JUMP24},
2006    {BFD_RELOC_ARM_PCREL_BLX,	     R_ARM_XPC25},
2007    {BFD_RELOC_THUMB_PCREL_BLX,	     R_ARM_THM_XPC22},
2008    {BFD_RELOC_32,		     R_ARM_ABS32},
2009    {BFD_RELOC_32_PCREL,	     R_ARM_REL32},
2010    {BFD_RELOC_8,		     R_ARM_ABS8},
2011    {BFD_RELOC_16,		     R_ARM_ABS16},
2012    {BFD_RELOC_ARM_OFFSET_IMM,	     R_ARM_ABS12},
2013    {BFD_RELOC_ARM_THUMB_OFFSET,     R_ARM_THM_ABS5},
2014    {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
2015    {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
2016    {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
2017    {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
2018    {BFD_RELOC_THUMB_PCREL_BRANCH9,  R_ARM_THM_JUMP8},
2019    {BFD_RELOC_THUMB_PCREL_BRANCH7,  R_ARM_THM_JUMP6},
2020    {BFD_RELOC_ARM_GLOB_DAT,	     R_ARM_GLOB_DAT},
2021    {BFD_RELOC_ARM_JUMP_SLOT,	     R_ARM_JUMP_SLOT},
2022    {BFD_RELOC_ARM_RELATIVE,	     R_ARM_RELATIVE},
2023    {BFD_RELOC_ARM_GOTOFF,	     R_ARM_GOTOFF32},
2024    {BFD_RELOC_ARM_GOTPC,	     R_ARM_GOTPC},
2025    {BFD_RELOC_ARM_GOT_PREL,	     R_ARM_GOT_PREL},
2026    {BFD_RELOC_ARM_GOT32,	     R_ARM_GOT32},
2027    {BFD_RELOC_ARM_PLT32,	     R_ARM_PLT32},
2028    {BFD_RELOC_ARM_TARGET1,	     R_ARM_TARGET1},
2029    {BFD_RELOC_ARM_ROSEGREL32,	     R_ARM_ROSEGREL32},
2030    {BFD_RELOC_ARM_SBREL32,	     R_ARM_SBREL32},
2031    {BFD_RELOC_ARM_PREL31,	     R_ARM_PREL31},
2032    {BFD_RELOC_ARM_TARGET2,	     R_ARM_TARGET2},
2033    {BFD_RELOC_ARM_PLT32,	     R_ARM_PLT32},
2034    {BFD_RELOC_ARM_TLS_GOTDESC,	     R_ARM_TLS_GOTDESC},
2035    {BFD_RELOC_ARM_TLS_CALL,	     R_ARM_TLS_CALL},
2036    {BFD_RELOC_ARM_THM_TLS_CALL,     R_ARM_THM_TLS_CALL},
2037    {BFD_RELOC_ARM_TLS_DESCSEQ,	     R_ARM_TLS_DESCSEQ},
2038    {BFD_RELOC_ARM_THM_TLS_DESCSEQ,  R_ARM_THM_TLS_DESCSEQ},
2039    {BFD_RELOC_ARM_TLS_DESC,	     R_ARM_TLS_DESC},
2040    {BFD_RELOC_ARM_TLS_GD32,	     R_ARM_TLS_GD32},
2041    {BFD_RELOC_ARM_TLS_LDO32,	     R_ARM_TLS_LDO32},
2042    {BFD_RELOC_ARM_TLS_LDM32,	     R_ARM_TLS_LDM32},
2043    {BFD_RELOC_ARM_TLS_DTPMOD32,     R_ARM_TLS_DTPMOD32},
2044    {BFD_RELOC_ARM_TLS_DTPOFF32,     R_ARM_TLS_DTPOFF32},
2045    {BFD_RELOC_ARM_TLS_TPOFF32,	     R_ARM_TLS_TPOFF32},
2046    {BFD_RELOC_ARM_TLS_IE32,	     R_ARM_TLS_IE32},
2047    {BFD_RELOC_ARM_TLS_LE32,	     R_ARM_TLS_LE32},
2048    {BFD_RELOC_ARM_IRELATIVE,	     R_ARM_IRELATIVE},
2049    {BFD_RELOC_ARM_GOTFUNCDESC,      R_ARM_GOTFUNCDESC},
2050    {BFD_RELOC_ARM_GOTOFFFUNCDESC,   R_ARM_GOTOFFFUNCDESC},
2051    {BFD_RELOC_ARM_FUNCDESC,         R_ARM_FUNCDESC},
2052    {BFD_RELOC_ARM_FUNCDESC_VALUE,   R_ARM_FUNCDESC_VALUE},
2053    {BFD_RELOC_ARM_TLS_GD32_FDPIC,   R_ARM_TLS_GD32_FDPIC},
2054    {BFD_RELOC_ARM_TLS_LDM32_FDPIC,  R_ARM_TLS_LDM32_FDPIC},
2055    {BFD_RELOC_ARM_TLS_IE32_FDPIC,   R_ARM_TLS_IE32_FDPIC},
2056    {BFD_RELOC_VTABLE_INHERIT,	     R_ARM_GNU_VTINHERIT},
2057    {BFD_RELOC_VTABLE_ENTRY,	     R_ARM_GNU_VTENTRY},
2058    {BFD_RELOC_ARM_MOVW,	     R_ARM_MOVW_ABS_NC},
2059    {BFD_RELOC_ARM_MOVT,	     R_ARM_MOVT_ABS},
2060    {BFD_RELOC_ARM_MOVW_PCREL,	     R_ARM_MOVW_PREL_NC},
2061    {BFD_RELOC_ARM_MOVT_PCREL,	     R_ARM_MOVT_PREL},
2062    {BFD_RELOC_ARM_THUMB_MOVW,	     R_ARM_THM_MOVW_ABS_NC},
2063    {BFD_RELOC_ARM_THUMB_MOVT,	     R_ARM_THM_MOVT_ABS},
2064    {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
2065    {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
2066    {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
2067    {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
2068    {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
2069    {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
2070    {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
2071    {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
2072    {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
2073    {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
2074    {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
2075    {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
2076    {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
2077    {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
2078    {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
2079    {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
2080    {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
2081    {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
2082    {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2083    {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2084    {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2085    {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2086    {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2087    {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2088    {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2089    {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2090    {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2091    {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2092    {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2093    {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2094    {BFD_RELOC_ARM_V4BX,	     R_ARM_V4BX},
2095    {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2096    {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2097    {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2098    {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC},
2099    {BFD_RELOC_ARM_THUMB_BF17, R_ARM_THM_BF16},
2100    {BFD_RELOC_ARM_THUMB_BF13, R_ARM_THM_BF12},
2101    {BFD_RELOC_ARM_THUMB_BF19, R_ARM_THM_BF18}
2102  };
2103
2104static reloc_howto_type *
2105elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2106			     bfd_reloc_code_real_type code)
2107{
2108  unsigned int i;
2109
2110  for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2111    if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2112      return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2113
2114  return NULL;
2115}
2116
2117static reloc_howto_type *
2118elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2119			     const char *r_name)
2120{
2121  unsigned int i;
2122
2123  for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2124    if (elf32_arm_howto_table_1[i].name != NULL
2125	&& strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2126      return &elf32_arm_howto_table_1[i];
2127
2128  for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2129    if (elf32_arm_howto_table_2[i].name != NULL
2130	&& strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2131      return &elf32_arm_howto_table_2[i];
2132
2133  for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2134    if (elf32_arm_howto_table_3[i].name != NULL
2135	&& strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2136      return &elf32_arm_howto_table_3[i];
2137
2138  return NULL;
2139}
2140
2141/* Support for core dump NOTE sections.  */
2142
2143static bool
2144elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2145{
2146  int offset;
2147  size_t size;
2148
2149  switch (note->descsz)
2150    {
2151      default:
2152	return false;
2153
2154      case 148:		/* Linux/ARM 32-bit.  */
2155	/* pr_cursig */
2156	elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2157
2158	/* pr_pid */
2159	elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2160
2161	/* pr_reg */
2162	offset = 72;
2163	size = 72;
2164
2165	break;
2166    }
2167
2168  /* Make a ".reg/999" section.  */
2169  return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2170					  size, note->descpos + offset);
2171}
2172
2173static bool
2174elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2175{
2176  switch (note->descsz)
2177    {
2178      default:
2179	return false;
2180
2181      case 124:		/* Linux/ARM elf_prpsinfo.  */
2182	elf_tdata (abfd)->core->pid
2183	 = bfd_get_32 (abfd, note->descdata + 12);
2184	elf_tdata (abfd)->core->program
2185	 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2186	elf_tdata (abfd)->core->command
2187	 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2188    }
2189
2190  /* Note that for some reason, a spurious space is tacked
2191     onto the end of the args in some (at least one anyway)
2192     implementations, so strip it off if it exists.  */
2193  {
2194    char *command = elf_tdata (abfd)->core->command;
2195    int n = strlen (command);
2196
2197    if (0 < n && command[n - 1] == ' ')
2198      command[n - 1] = '\0';
2199  }
2200
2201  return true;
2202}
2203
2204static char *
2205elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2206				int note_type, ...)
2207{
2208  switch (note_type)
2209    {
2210    default:
2211      return NULL;
2212
2213    case NT_PRPSINFO:
2214      {
2215	char data[124] ATTRIBUTE_NONSTRING;
2216	va_list ap;
2217
2218	va_start (ap, note_type);
2219	memset (data, 0, sizeof (data));
2220	strncpy (data + 28, va_arg (ap, const char *), 16);
2221#if GCC_VERSION == 8000 || GCC_VERSION == 8001
2222	DIAGNOSTIC_PUSH;
2223	/* GCC 8.0 and 8.1 warn about 80 equals destination size with
2224	   -Wstringop-truncation:
2225	   https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2226	 */
2227	DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
2228#endif
2229	strncpy (data + 44, va_arg (ap, const char *), 80);
2230#if GCC_VERSION == 8000 || GCC_VERSION == 8001
2231	DIAGNOSTIC_POP;
2232#endif
2233	va_end (ap);
2234
2235	return elfcore_write_note (abfd, buf, bufsiz,
2236				   "CORE", note_type, data, sizeof (data));
2237      }
2238
2239    case NT_PRSTATUS:
2240      {
2241	char data[148];
2242	va_list ap;
2243	long pid;
2244	int cursig;
2245	const void *greg;
2246
2247	va_start (ap, note_type);
2248	memset (data, 0, sizeof (data));
2249	pid = va_arg (ap, long);
2250	bfd_put_32 (abfd, pid, data + 24);
2251	cursig = va_arg (ap, int);
2252	bfd_put_16 (abfd, cursig, data + 12);
2253	greg = va_arg (ap, const void *);
2254	memcpy (data + 72, greg, 72);
2255	va_end (ap);
2256
2257	return elfcore_write_note (abfd, buf, bufsiz,
2258				   "CORE", note_type, data, sizeof (data));
2259      }
2260    }
2261}
2262
2263#define TARGET_LITTLE_SYM		arm_elf32_le_vec
2264#define TARGET_LITTLE_NAME		"elf32-littlearm"
2265#define TARGET_BIG_SYM			arm_elf32_be_vec
2266#define TARGET_BIG_NAME			"elf32-bigarm"
2267
2268#define elf_backend_grok_prstatus	elf32_arm_nabi_grok_prstatus
2269#define elf_backend_grok_psinfo		elf32_arm_nabi_grok_psinfo
2270#define elf_backend_write_core_note	elf32_arm_nabi_write_core_note
2271
2272typedef unsigned long int insn32;
2273typedef unsigned short int insn16;
2274
2275/* In lieu of proper flags, assume all EABIv4 or later objects are
2276   interworkable.  */
2277#define INTERWORK_FLAG(abfd)  \
2278  (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2279  || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2280  || ((abfd)->flags & BFD_LINKER_CREATED))
2281
2282/* The linker script knows the section names for placement.
2283   The entry_names are used to do simple name mangling on the stubs.
2284   Given a function name, and its type, the stub can be found. The
2285   name can be changed. The only requirement is the %s be present.  */
2286#define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2287#define THUMB2ARM_GLUE_ENTRY_NAME   "__%s_from_thumb"
2288
2289#define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2290#define ARM2THUMB_GLUE_ENTRY_NAME   "__%s_from_arm"
2291
2292#define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2293#define VFP11_ERRATUM_VENEER_ENTRY_NAME   "__vfp11_veneer_%x"
2294
2295#define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2296#define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME   "__stm32l4xx_veneer_%x"
2297
2298#define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2299#define ARM_BX_GLUE_ENTRY_NAME   "__bx_r%d"
2300
2301#define STUB_ENTRY_NAME   "__%s_veneer"
2302
2303#define CMSE_PREFIX "__acle_se_"
2304
2305#define CMSE_STUB_NAME ".gnu.sgstubs"
2306
2307/* The name of the dynamic interpreter.  This is put in the .interp
2308   section.  */
2309#define ELF_DYNAMIC_INTERPRETER     "/usr/lib/ld.so.1"
2310
2311/* FDPIC default stack size.  */
2312#define DEFAULT_STACK_SIZE 0x8000
2313
2314static const unsigned long tls_trampoline [] =
2315{
2316  0xe08e0000,		/* add r0, lr, r0 */
2317  0xe5901004,		/* ldr r1, [r0,#4] */
2318  0xe12fff11,		/* bx  r1 */
2319};
2320
2321static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2322{
2323  0xe52d2004, /*	push    {r2}			*/
2324  0xe59f200c, /*      ldr     r2, [pc, #3f - . - 8]	*/
2325  0xe59f100c, /*      ldr     r1, [pc, #4f - . - 8]	*/
2326  0xe79f2002, /* 1:   ldr     r2, [pc, r2]		*/
2327  0xe081100f, /* 2:   add     r1, pc			*/
2328  0xe12fff12, /*      bx      r2			*/
2329  0x00000014, /* 3:   .word  _GLOBAL_OFFSET_TABLE_ - 1b - 8
2330				+ dl_tlsdesc_lazy_resolver(GOT)   */
2331  0x00000018, /* 4:   .word  _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2332};
2333
2334/* NOTE: [Thumb nop sequence]
2335   When adding code that transitions from Thumb to Arm the instruction that
2336   should be used for the alignment padding should be 0xe7fd (b .-2) instead of
2337   a nop for performance reasons.  */
2338
2339/* ARM FDPIC PLT entry.  */
2340/* The last 5 words contain PLT lazy fragment code and data.  */
2341static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2342  {
2343    0xe59fc008,    /* ldr     r12, .L1 */
2344    0xe08cc009,    /* add     r12, r12, r9 */
2345    0xe59c9004,    /* ldr     r9, [r12, #4] */
2346    0xe59cf000,    /* ldr     pc, [r12] */
2347    0x00000000,    /* L1.     .word   foo(GOTOFFFUNCDESC) */
2348    0x00000000,    /* L1.     .word   foo(funcdesc_value_reloc_offset) */
2349    0xe51fc00c,    /* ldr     r12, [pc, #-12] */
2350    0xe92d1000,    /* push    {r12} */
2351    0xe599c004,    /* ldr     r12, [r9, #4] */
2352    0xe599f000,    /* ldr     pc, [r9] */
2353  };
2354
2355/* Thumb FDPIC PLT entry.  */
2356/* The last 5 words contain PLT lazy fragment code and data.  */
2357static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
2358  {
2359    0xc00cf8df,    /* ldr.w   r12, .L1 */
2360    0x0c09eb0c,    /* add.w   r12, r12, r9 */
2361    0x9004f8dc,    /* ldr.w   r9, [r12, #4] */
2362    0xf000f8dc,    /* ldr.w   pc, [r12] */
2363    0x00000000,    /* .L1     .word   foo(GOTOFFFUNCDESC) */
2364    0x00000000,    /* .L2     .word   foo(funcdesc_value_reloc_offset) */
2365    0xc008f85f,    /* ldr.w   r12, .L2 */
2366    0xcd04f84d,    /* push    {r12} */
2367    0xc004f8d9,    /* ldr.w   r12, [r9, #4] */
2368    0xf000f8d9,    /* ldr.w   pc, [r9] */
2369  };
2370
2371#ifdef FOUR_WORD_PLT
2372
2373/* The first entry in a procedure linkage table looks like
2374   this.  It is set up so that any shared library function that is
2375   called before the relocation has been set up calls the dynamic
2376   linker first.  */
2377static const bfd_vma elf32_arm_plt0_entry [] =
2378{
2379  0xe52de004,		/* str   lr, [sp, #-4]! */
2380  0xe59fe010,		/* ldr   lr, [pc, #16]  */
2381  0xe08fe00e,		/* add   lr, pc, lr     */
2382  0xe5bef008,		/* ldr   pc, [lr, #8]!  */
2383};
2384
2385/* Subsequent entries in a procedure linkage table look like
2386   this.  */
2387static const bfd_vma elf32_arm_plt_entry [] =
2388{
2389  0xe28fc600,		/* add   ip, pc, #NN	*/
2390  0xe28cca00,		/* add	 ip, ip, #NN	*/
2391  0xe5bcf000,		/* ldr	 pc, [ip, #NN]! */
2392  0x00000000,		/* unused		*/
2393};
2394
2395#else /* not FOUR_WORD_PLT */
2396
2397/* The first entry in a procedure linkage table looks like
2398   this.  It is set up so that any shared library function that is
2399   called before the relocation has been set up calls the dynamic
2400   linker first.  */
2401static const bfd_vma elf32_arm_plt0_entry [] =
2402{
2403  0xe52de004,		/* str	 lr, [sp, #-4]! */
2404  0xe59fe004,		/* ldr	 lr, [pc, #4]	*/
2405  0xe08fe00e,		/* add	 lr, pc, lr	*/
2406  0xe5bef008,		/* ldr	 pc, [lr, #8]!	*/
2407  0x00000000,		/* &GOT[0] - .		*/
2408};
2409
2410/* By default subsequent entries in a procedure linkage table look like
2411   this. Offsets that don't fit into 28 bits will cause link error.  */
2412static const bfd_vma elf32_arm_plt_entry_short [] =
2413{
2414  0xe28fc600,		/* add   ip, pc, #0xNN00000 */
2415  0xe28cca00,		/* add	 ip, ip, #0xNN000   */
2416  0xe5bcf000,		/* ldr	 pc, [ip, #0xNNN]!  */
2417};
2418
2419/* When explicitly asked, we'll use this "long" entry format
2420   which can cope with arbitrary displacements.  */
2421static const bfd_vma elf32_arm_plt_entry_long [] =
2422{
2423  0xe28fc200,		/* add	 ip, pc, #0xN0000000 */
2424  0xe28cc600,		/* add	 ip, ip, #0xNN00000  */
2425  0xe28cca00,		/* add	 ip, ip, #0xNN000    */
2426  0xe5bcf000,		/* ldr	 pc, [ip, #0xNNN]!   */
2427};
2428
2429static bool elf32_arm_use_long_plt_entry = false;
2430
2431#endif /* not FOUR_WORD_PLT */
2432
2433/* The first entry in a procedure linkage table looks like this.
2434   It is set up so that any shared library function that is called before the
2435   relocation has been set up calls the dynamic linker first.  */
2436static const bfd_vma elf32_thumb2_plt0_entry [] =
2437{
2438  /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2439     an instruction maybe encoded to one or two array elements.  */
2440  0xf8dfb500,		/* push	   {lr}		 */
2441  0x44fee008,		/* ldr.w   lr, [pc, #8]	 */
2442			/* add	   lr, pc	 */
2443  0xff08f85e,		/* ldr.w   pc, [lr, #8]! */
2444  0x00000000,		/* &GOT[0] - .		 */
2445};
2446
2447/* Subsequent entries in a procedure linkage table for thumb only target
2448   look like this.  */
2449static const bfd_vma elf32_thumb2_plt_entry [] =
2450{
2451  /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2452     an instruction maybe encoded to one or two array elements.  */
2453  0x0c00f240,		/* movw	   ip, #0xNNNN	  */
2454  0x0c00f2c0,		/* movt	   ip, #0xNNNN	  */
2455  0xf8dc44fc,		/* add	   ip, pc	  */
2456  0xe7fcf000		/* ldr.w   pc, [ip]	  */
2457			/* b      .-4		  */
2458};
2459
2460/* The format of the first entry in the procedure linkage table
2461   for a VxWorks executable.  */
2462static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2463{
2464  0xe52dc008,		/* str	  ip,[sp,#-8]!			*/
2465  0xe59fc000,		/* ldr	  ip,[pc]			*/
2466  0xe59cf008,		/* ldr	  pc,[ip,#8]			*/
2467  0x00000000,		/* .long  _GLOBAL_OFFSET_TABLE_		*/
2468};
2469
2470/* The format of subsequent entries in a VxWorks executable.  */
2471static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2472{
2473  0xe59fc000,	      /* ldr	ip,[pc]			*/
2474  0xe59cf000,	      /* ldr	pc,[ip]			*/
2475  0x00000000,	      /* .long	@got				*/
2476  0xe59fc000,	      /* ldr	ip,[pc]			*/
2477  0xea000000,	      /* b	_PLT				*/
2478  0x00000000,	      /* .long	@pltindex*sizeof(Elf32_Rela)	*/
2479};
2480
2481/* The format of entries in a VxWorks shared library.  */
2482static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2483{
2484  0xe59fc000,	      /* ldr	ip,[pc]			*/
2485  0xe79cf009,	      /* ldr	pc,[ip,r9]			*/
2486  0x00000000,	      /* .long	@got				*/
2487  0xe59fc000,	      /* ldr	ip,[pc]			*/
2488  0xe599f008,	      /* ldr	pc,[r9,#8]			*/
2489  0x00000000,	      /* .long	@pltindex*sizeof(Elf32_Rela)	*/
2490};
2491
2492/* An initial stub used if the PLT entry is referenced from Thumb code.  */
2493#define PLT_THUMB_STUB_SIZE 4
2494static const bfd_vma elf32_arm_plt_thumb_stub [] =
2495{
2496  0x4778,		/* bx pc */
2497  0xe7fd		/* b .-2 */
2498};
2499
2500/* The first entry in a procedure linkage table looks like
2501   this.  It is set up so that any shared library function that is
2502   called before the relocation has been set up calls the dynamic
2503   linker first.  */
2504static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2505{
2506  /* First bundle: */
2507  0xe300c000,		/* movw	ip, #:lower16:&GOT[2]-.+8	*/
2508  0xe340c000,		/* movt	ip, #:upper16:&GOT[2]-.+8	*/
2509  0xe08cc00f,		/* add	ip, ip, pc			*/
2510  0xe52dc008,		/* str	ip, [sp, #-8]!			*/
2511  /* Second bundle: */
2512  0xe3ccc103,		/* bic	ip, ip, #0xc0000000		*/
2513  0xe59cc000,		/* ldr	ip, [ip]			*/
2514  0xe3ccc13f,		/* bic	ip, ip, #0xc000000f		*/
2515  0xe12fff1c,		/* bx	ip				*/
2516  /* Third bundle: */
2517  0xe320f000,		/* nop					*/
2518  0xe320f000,		/* nop					*/
2519  0xe320f000,		/* nop					*/
2520  /* .Lplt_tail: */
2521  0xe50dc004,		/* str	ip, [sp, #-4]			*/
2522  /* Fourth bundle: */
2523  0xe3ccc103,		/* bic	ip, ip, #0xc0000000		*/
2524  0xe59cc000,		/* ldr	ip, [ip]			*/
2525  0xe3ccc13f,		/* bic	ip, ip, #0xc000000f		*/
2526  0xe12fff1c,		/* bx	ip				*/
2527};
2528#define ARM_NACL_PLT_TAIL_OFFSET	(11 * 4)
2529
2530/* Subsequent entries in a procedure linkage table look like this.  */
2531static const bfd_vma elf32_arm_nacl_plt_entry [] =
2532{
2533  0xe300c000,		/* movw	ip, #:lower16:&GOT[n]-.+8	*/
2534  0xe340c000,		/* movt	ip, #:upper16:&GOT[n]-.+8	*/
2535  0xe08cc00f,		/* add	ip, ip, pc			*/
2536  0xea000000,		/* b	.Lplt_tail			*/
2537};
2538
2539/* PR 28924:
2540   There was a bug due to too high values of THM_MAX_FWD_BRANCH_OFFSET and
2541   THM2_MAX_FWD_BRANCH_OFFSET.  The first macro concerns the case when Thumb-2
2542   is not available, and second macro when Thumb-2 is available.  Among other
2543   things, they affect the range of branches represented as BLX instructions
2544   in Encoding T2 defined in Section A8.8.25 of the ARM Architecture
2545   Reference Manual ARMv7-A and ARMv7-R edition issue C.d.  Such branches are
2546   specified there to have a maximum forward offset that is a multiple of 4.
2547   Previously, the respective values defined here were multiples of 2 but not
2548   4 and they are included in comments for reference.  */
2549#define ARM_MAX_FWD_BRANCH_OFFSET  ((((1 << 23) - 1) << 2) + 8)
2550#define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2551#define THM_MAX_FWD_BRANCH_OFFSET   ((1 << 22) - 4 + 4)
2552/* #def THM_MAX_FWD_BRANCH_OFFSET   ((1 << 22) - 2 + 4) */
2553#define THM_MAX_BWD_BRANCH_OFFSET  (-(1 << 22) + 4)
2554#define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 4) + 4)
2555/* #def THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4) */
2556#define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2557#define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2558#define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2559
2560enum stub_insn_type
2561{
2562  THUMB16_TYPE = 1,
2563  THUMB32_TYPE,
2564  ARM_TYPE,
2565  DATA_TYPE
2566};
2567
2568#define THUMB16_INSN(X)		{(X), THUMB16_TYPE, R_ARM_NONE, 0}
2569/* A bit of a hack.  A Thumb conditional branch, in which the proper condition
2570   is inserted in arm_build_one_stub().  */
2571#define THUMB16_BCOND_INSN(X)	{(X), THUMB16_TYPE, R_ARM_NONE, 1}
2572#define THUMB32_INSN(X)		{(X), THUMB32_TYPE, R_ARM_NONE, 0}
2573#define THUMB32_MOVT(X)		{(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2574#define THUMB32_MOVW(X)		{(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2575#define THUMB32_B_INSN(X, Z)	{(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2576#define ARM_INSN(X)		{(X), ARM_TYPE, R_ARM_NONE, 0}
2577#define ARM_REL_INSN(X, Z)	{(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2578#define DATA_WORD(X,Y,Z)	{(X), DATA_TYPE, (Y), (Z)}
2579
2580typedef struct
2581{
2582  bfd_vma	       data;
2583  enum stub_insn_type  type;
2584  unsigned int	       r_type;
2585  int		       reloc_addend;
2586}  insn_sequence;
2587
2588/* See note [Thumb nop sequence] when adding a veneer.  */
2589
2590/* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2591   to reach the stub if necessary.  */
2592static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2593{
2594  ARM_INSN (0xe51ff004),	    /* ldr   pc, [pc, #-4] */
2595  DATA_WORD (0, R_ARM_ABS32, 0),    /* dcd   R_ARM_ABS32(X) */
2596};
2597
2598/* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2599   available.  */
2600static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2601{
2602  ARM_INSN (0xe59fc000),	    /* ldr   ip, [pc, #0] */
2603  ARM_INSN (0xe12fff1c),	    /* bx    ip */
2604  DATA_WORD (0, R_ARM_ABS32, 0),    /* dcd   R_ARM_ABS32(X) */
2605};
2606
2607/* Thumb -> Thumb long branch stub. Used on M-profile architectures.  */
2608static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2609{
2610  THUMB16_INSN (0xb401),	     /* push {r0} */
2611  THUMB16_INSN (0x4802),	     /* ldr  r0, [pc, #8] */
2612  THUMB16_INSN (0x4684),	     /* mov  ip, r0 */
2613  THUMB16_INSN (0xbc01),	     /* pop  {r0} */
2614  THUMB16_INSN (0x4760),	     /* bx   ip */
2615  THUMB16_INSN (0xbf00),	     /* nop */
2616  DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(X) */
2617};
2618
2619/* Thumb -> Thumb long branch stub in thumb2 encoding.  Used on armv7.  */
2620static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2621{
2622  THUMB32_INSN (0xf85ff000),	     /* ldr.w  pc, [pc, #-0] */
2623  DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(x) */
2624};
2625
2626/* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2627   M-profile architectures.  */
2628static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2629{
2630  THUMB32_MOVW (0xf2400c00),	     /* mov.w ip, R_ARM_MOVW_ABS_NC */
2631  THUMB32_MOVT (0xf2c00c00),	     /* movt  ip, R_ARM_MOVT_ABS << 16 */
2632  THUMB16_INSN (0x4760),	     /* bx   ip */
2633};
2634
2635/* V4T Thumb -> Thumb long branch stub. Using the stack is not
2636   allowed.  */
2637static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2638{
2639  THUMB16_INSN (0x4778),	     /* bx   pc */
2640  THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2641  ARM_INSN (0xe59fc000),	     /* ldr  ip, [pc, #0] */
2642  ARM_INSN (0xe12fff1c),	     /* bx   ip */
2643  DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(X) */
2644};
2645
2646/* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2647   available.  */
2648static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2649{
2650  THUMB16_INSN (0x4778),	     /* bx   pc */
2651  THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2652  ARM_INSN (0xe51ff004),	     /* ldr   pc, [pc, #-4] */
2653  DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd   R_ARM_ABS32(X) */
2654};
2655
2656/* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2657   one, when the destination is close enough.  */
2658static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2659{
2660  THUMB16_INSN (0x4778),	     /* bx   pc */
2661  THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2662  ARM_REL_INSN (0xea000000, -8),     /* b    (X-8) */
2663};
2664
2665/* ARM/Thumb -> ARM long branch stub, PIC.  On V5T and above, use
2666   blx to reach the stub if necessary.  */
2667static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2668{
2669  ARM_INSN (0xe59fc000),	     /* ldr   ip, [pc] */
2670  ARM_INSN (0xe08ff00c),	     /* add   pc, pc, ip */
2671  DATA_WORD (0, R_ARM_REL32, -4),    /* dcd   R_ARM_REL32(X-4) */
2672};
2673
2674/* ARM/Thumb -> Thumb long branch stub, PIC.  On V5T and above, use
2675   blx to reach the stub if necessary.  We can not add into pc;
2676   it is not guaranteed to mode switch (different in ARMv6 and
2677   ARMv7).  */
2678static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2679{
2680  ARM_INSN (0xe59fc004),	     /* ldr   ip, [pc, #4] */
2681  ARM_INSN (0xe08fc00c),	     /* add   ip, pc, ip */
2682  ARM_INSN (0xe12fff1c),	     /* bx    ip */
2683  DATA_WORD (0, R_ARM_REL32, 0),     /* dcd   R_ARM_REL32(X) */
2684};
2685
2686/* V4T ARM -> ARM long branch stub, PIC.  */
2687static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2688{
2689  ARM_INSN (0xe59fc004),	     /* ldr   ip, [pc, #4] */
2690  ARM_INSN (0xe08fc00c),	     /* add   ip, pc, ip */
2691  ARM_INSN (0xe12fff1c),	     /* bx    ip */
2692  DATA_WORD (0, R_ARM_REL32, 0),     /* dcd   R_ARM_REL32(X) */
2693};
2694
2695/* V4T Thumb -> ARM long branch stub, PIC.  */
2696static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2697{
2698  THUMB16_INSN (0x4778),	     /* bx   pc */
2699  THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2700  ARM_INSN (0xe59fc000),	     /* ldr  ip, [pc, #0] */
2701  ARM_INSN (0xe08cf00f),	     /* add  pc, ip, pc */
2702  DATA_WORD (0, R_ARM_REL32, -4),     /* dcd  R_ARM_REL32(X) */
2703};
2704
2705/* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2706   architectures.  */
2707static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2708{
2709  THUMB16_INSN (0xb401),	     /* push {r0} */
2710  THUMB16_INSN (0x4802),	     /* ldr  r0, [pc, #8] */
2711  THUMB16_INSN (0x46fc),	     /* mov  ip, pc */
2712  THUMB16_INSN (0x4484),	     /* add  ip, r0 */
2713  THUMB16_INSN (0xbc01),	     /* pop  {r0} */
2714  THUMB16_INSN (0x4760),	     /* bx   ip */
2715  DATA_WORD (0, R_ARM_REL32, 4),     /* dcd  R_ARM_REL32(X) */
2716};
2717
2718/* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2719   allowed.  */
2720static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2721{
2722  THUMB16_INSN (0x4778),	     /* bx   pc */
2723  THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2724  ARM_INSN (0xe59fc004),	     /* ldr  ip, [pc, #4] */
2725  ARM_INSN (0xe08fc00c),	     /* add   ip, pc, ip */
2726  ARM_INSN (0xe12fff1c),	     /* bx   ip */
2727  DATA_WORD (0, R_ARM_REL32, 0),     /* dcd  R_ARM_REL32(X) */
2728};
2729
2730/* Thumb2/ARM -> TLS trampoline.  Lowest common denominator, which is a
2731   long PIC stub.  We can use r1 as a scratch -- and cannot use ip.  */
2732static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2733{
2734  ARM_INSN (0xe59f1000),	     /* ldr   r1, [pc] */
2735  ARM_INSN (0xe08ff001),	     /* add   pc, pc, r1 */
2736  DATA_WORD (0, R_ARM_REL32, -4),    /* dcd   R_ARM_REL32(X-4) */
2737};
2738
2739/* V4T Thumb -> TLS trampoline.  lowest common denominator, which is a
2740   long PIC stub.  We can use r1 as a scratch -- and cannot use ip.  */
2741static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2742{
2743  THUMB16_INSN (0x4778),	     /* bx   pc */
2744  THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2745  ARM_INSN (0xe59f1000),	     /* ldr  r1, [pc, #0] */
2746  ARM_INSN (0xe081f00f),	     /* add  pc, r1, pc */
2747  DATA_WORD (0, R_ARM_REL32, -4),    /* dcd  R_ARM_REL32(X) */
2748};
2749
2750/* NaCl ARM -> ARM long branch stub.  */
2751static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2752{
2753  ARM_INSN (0xe59fc00c),		/* ldr	ip, [pc, #12] */
2754  ARM_INSN (0xe3ccc13f),		/* bic	ip, ip, #0xc000000f */
2755  ARM_INSN (0xe12fff1c),		/* bx	ip */
2756  ARM_INSN (0xe320f000),		/* nop */
2757  ARM_INSN (0xe125be70),		/* bkpt	0x5be0 */
2758  DATA_WORD (0, R_ARM_ABS32, 0),	/* dcd	R_ARM_ABS32(X) */
2759  DATA_WORD (0, R_ARM_NONE, 0),		/* .word 0 */
2760  DATA_WORD (0, R_ARM_NONE, 0),		/* .word 0 */
2761};
2762
2763/* NaCl ARM -> ARM long branch stub, PIC.  */
2764static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2765{
2766  ARM_INSN (0xe59fc00c),		/* ldr	ip, [pc, #12] */
2767  ARM_INSN (0xe08cc00f),		/* add	ip, ip, pc */
2768  ARM_INSN (0xe3ccc13f),		/* bic	ip, ip, #0xc000000f */
2769  ARM_INSN (0xe12fff1c),		/* bx	ip */
2770  ARM_INSN (0xe125be70),		/* bkpt	0x5be0 */
2771  DATA_WORD (0, R_ARM_REL32, 8),	/* dcd	R_ARM_REL32(X+8) */
2772  DATA_WORD (0, R_ARM_NONE, 0),		/* .word 0 */
2773  DATA_WORD (0, R_ARM_NONE, 0),		/* .word 0 */
2774};
2775
2776/* Stub used for transition to secure state (aka SG veneer).  */
2777static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2778{
2779  THUMB32_INSN (0xe97fe97f),		/* sg.  */
2780  THUMB32_B_INSN (0xf000b800, -4),	/* b.w original_branch_dest.  */
2781};
2782
2783
2784/* Cortex-A8 erratum-workaround stubs.  */
2785
2786/* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2787   can't use a conditional branch to reach this stub).  */
2788
2789static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2790{
2791  THUMB16_BCOND_INSN (0xd001),	       /* b<cond>.n true.  */
2792  THUMB32_B_INSN (0xf000b800, -4),     /* b.w insn_after_original_branch.  */
2793  THUMB32_B_INSN (0xf000b800, -4)      /* true: b.w original_branch_dest.  */
2794};
2795
2796/* Stub used for b.w and bl.w instructions.  */
2797
2798static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2799{
2800  THUMB32_B_INSN (0xf000b800, -4)	/* b.w original_branch_dest.  */
2801};
2802
2803static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2804{
2805  THUMB32_B_INSN (0xf000b800, -4)	/* b.w original_branch_dest.  */
2806};
2807
2808/* Stub used for Thumb-2 blx.w instructions.  We modified the original blx.w
2809   instruction (which switches to ARM mode) to point to this stub.  Jump to the
2810   real destination using an ARM-mode branch.  */
2811
2812static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2813{
2814  ARM_REL_INSN (0xea000000, -8)	/* b original_branch_dest.  */
2815};
2816
2817/* For each section group there can be a specially created linker section
2818   to hold the stubs for that group.  The name of the stub section is based
2819   upon the name of another section within that group with the suffix below
2820   applied.
2821
2822   PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2823   create what appeared to be a linker stub section when it actually
2824   contained user code/data.  For example, consider this fragment:
2825
2826     const char * stubborn_problems[] = { "np" };
2827
2828   If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2829   section called:
2830
2831     .data.rel.local.stubborn_problems
2832
2833   This then causes problems in arm32_arm_build_stubs() as it triggers:
2834
2835      // Ignore non-stub sections.
2836      if (!strstr (stub_sec->name, STUB_SUFFIX))
2837	continue;
2838
2839   And so the section would be ignored instead of being processed.  Hence
2840   the change in definition of STUB_SUFFIX to a name that cannot be a valid
2841   C identifier.  */
2842#define STUB_SUFFIX ".__stub"
2843
2844/* One entry per long/short branch stub defined above.  */
2845#define DEF_STUBS \
2846  DEF_STUB (long_branch_any_any)	\
2847  DEF_STUB (long_branch_v4t_arm_thumb) \
2848  DEF_STUB (long_branch_thumb_only) \
2849  DEF_STUB (long_branch_v4t_thumb_thumb)	\
2850  DEF_STUB (long_branch_v4t_thumb_arm) \
2851  DEF_STUB (short_branch_v4t_thumb_arm) \
2852  DEF_STUB (long_branch_any_arm_pic) \
2853  DEF_STUB (long_branch_any_thumb_pic) \
2854  DEF_STUB (long_branch_v4t_thumb_thumb_pic) \
2855  DEF_STUB (long_branch_v4t_arm_thumb_pic) \
2856  DEF_STUB (long_branch_v4t_thumb_arm_pic) \
2857  DEF_STUB (long_branch_thumb_only_pic) \
2858  DEF_STUB (long_branch_any_tls_pic) \
2859  DEF_STUB (long_branch_v4t_thumb_tls_pic) \
2860  DEF_STUB (long_branch_arm_nacl) \
2861  DEF_STUB (long_branch_arm_nacl_pic) \
2862  DEF_STUB (cmse_branch_thumb_only) \
2863  DEF_STUB (a8_veneer_b_cond) \
2864  DEF_STUB (a8_veneer_b) \
2865  DEF_STUB (a8_veneer_bl) \
2866  DEF_STUB (a8_veneer_blx) \
2867  DEF_STUB (long_branch_thumb2_only) \
2868  DEF_STUB (long_branch_thumb2_only_pure)
2869
2870#define DEF_STUB(x) arm_stub_##x,
2871enum elf32_arm_stub_type
2872{
2873  arm_stub_none,
2874  DEF_STUBS
2875  max_stub_type
2876};
2877#undef DEF_STUB
2878
2879/* Note the first a8_veneer type.  */
2880const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2881
2882typedef struct
2883{
2884  const insn_sequence* template_sequence;
2885  int template_size;
2886} stub_def;
2887
2888#define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2889static const stub_def stub_definitions[] =
2890{
2891  {NULL, 0},
2892  DEF_STUBS
2893};
2894
2895struct elf32_arm_stub_hash_entry
2896{
2897  /* Base hash table entry structure.  */
2898  struct bfd_hash_entry root;
2899
2900  /* The stub section.  */
2901  asection *stub_sec;
2902
2903  /* Offset within stub_sec of the beginning of this stub.  */
2904  bfd_vma stub_offset;
2905
2906  /* Given the symbol's value and its section we can determine its final
2907     value when building the stubs (so the stub knows where to jump).  */
2908  bfd_vma target_value;
2909  asection *target_section;
2910
2911  /* Same as above but for the source of the branch to the stub.  Used for
2912     Cortex-A8 erratum workaround to patch it to branch to the stub.  As
2913     such, source section does not need to be recorded since Cortex-A8 erratum
2914     workaround stubs are only generated when both source and target are in the
2915     same section.  */
2916  bfd_vma source_value;
2917
2918  /* The instruction which caused this stub to be generated (only valid for
2919     Cortex-A8 erratum workaround stubs at present).  */
2920  unsigned long orig_insn;
2921
2922  /* The stub type.  */
2923  enum elf32_arm_stub_type stub_type;
2924  /* Its encoding size in bytes.  */
2925  int stub_size;
2926  /* Its template.  */
2927  const insn_sequence *stub_template;
2928  /* The size of the template (number of entries).  */
2929  int stub_template_size;
2930
2931  /* The symbol table entry, if any, that this was derived from.  */
2932  struct elf32_arm_link_hash_entry *h;
2933
2934  /* Type of branch.  */
2935  enum arm_st_branch_type branch_type;
2936
2937  /* Where this stub is being called from, or, in the case of combined
2938     stub sections, the first input section in the group.  */
2939  asection *id_sec;
2940
2941  /* The name for the local symbol at the start of this stub.  The
2942     stub name in the hash table has to be unique; this does not, so
2943     it can be friendlier.  */
2944  char *output_name;
2945};
2946
2947/* Used to build a map of a section.  This is required for mixed-endian
2948   code/data.  */
2949
2950typedef struct elf32_elf_section_map
2951{
2952  bfd_vma vma;
2953  char type;
2954}
2955elf32_arm_section_map;
2956
2957/* Information about a VFP11 erratum veneer, or a branch to such a veneer.  */
2958
2959typedef enum
2960{
2961  VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2962  VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2963  VFP11_ERRATUM_ARM_VENEER,
2964  VFP11_ERRATUM_THUMB_VENEER
2965}
2966elf32_vfp11_erratum_type;
2967
2968typedef struct elf32_vfp11_erratum_list
2969{
2970  struct elf32_vfp11_erratum_list *next;
2971  bfd_vma vma;
2972  union
2973  {
2974    struct
2975    {
2976      struct elf32_vfp11_erratum_list *veneer;
2977      unsigned int vfp_insn;
2978    } b;
2979    struct
2980    {
2981      struct elf32_vfp11_erratum_list *branch;
2982      unsigned int id;
2983    } v;
2984  } u;
2985  elf32_vfp11_erratum_type type;
2986}
2987elf32_vfp11_erratum_list;
2988
2989/* Information about a STM32L4XX erratum veneer, or a branch to such a
2990   veneer.  */
2991typedef enum
2992{
2993  STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2994  STM32L4XX_ERRATUM_VENEER
2995}
2996elf32_stm32l4xx_erratum_type;
2997
2998typedef struct elf32_stm32l4xx_erratum_list
2999{
3000  struct elf32_stm32l4xx_erratum_list *next;
3001  bfd_vma vma;
3002  union
3003  {
3004    struct
3005    {
3006      struct elf32_stm32l4xx_erratum_list *veneer;
3007      unsigned int insn;
3008    } b;
3009    struct
3010    {
3011      struct elf32_stm32l4xx_erratum_list *branch;
3012      unsigned int id;
3013    } v;
3014  } u;
3015  elf32_stm32l4xx_erratum_type type;
3016}
3017elf32_stm32l4xx_erratum_list;
3018
3019typedef enum
3020{
3021  DELETE_EXIDX_ENTRY,
3022  INSERT_EXIDX_CANTUNWIND_AT_END
3023}
3024arm_unwind_edit_type;
3025
3026/* A (sorted) list of edits to apply to an unwind table.  */
3027typedef struct arm_unwind_table_edit
3028{
3029  arm_unwind_edit_type type;
3030  /* Note: we sometimes want to insert an unwind entry corresponding to a
3031     section different from the one we're currently writing out, so record the
3032     (text) section this edit relates to here.  */
3033  asection *linked_section;
3034  unsigned int index;
3035  struct arm_unwind_table_edit *next;
3036}
3037arm_unwind_table_edit;
3038
3039typedef struct _arm_elf_section_data
3040{
3041  /* Information about mapping symbols.  */
3042  struct bfd_elf_section_data elf;
3043  unsigned int mapcount;
3044  unsigned int mapsize;
3045  elf32_arm_section_map *map;
3046  /* Information about CPU errata.  */
3047  unsigned int erratumcount;
3048  elf32_vfp11_erratum_list *erratumlist;
3049  unsigned int stm32l4xx_erratumcount;
3050  elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
3051  unsigned int additional_reloc_count;
3052  /* Information about unwind tables.  */
3053  union
3054  {
3055    /* Unwind info attached to a text section.  */
3056    struct
3057    {
3058      asection *arm_exidx_sec;
3059    } text;
3060
3061    /* Unwind info attached to an .ARM.exidx section.  */
3062    struct
3063    {
3064      arm_unwind_table_edit *unwind_edit_list;
3065      arm_unwind_table_edit *unwind_edit_tail;
3066    } exidx;
3067  } u;
3068}
3069_arm_elf_section_data;
3070
3071#define elf32_arm_section_data(sec) \
3072  ((_arm_elf_section_data *) elf_section_data (sec))
3073
3074/* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3075   These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3076   so may be created multiple times: we use an array of these entries whilst
3077   relaxing which we can refresh easily, then create stubs for each potentially
3078   erratum-triggering instruction once we've settled on a solution.  */
3079
3080struct a8_erratum_fix
3081{
3082  bfd *input_bfd;
3083  asection *section;
3084  bfd_vma offset;
3085  bfd_vma target_offset;
3086  unsigned long orig_insn;
3087  char *stub_name;
3088  enum elf32_arm_stub_type stub_type;
3089  enum arm_st_branch_type branch_type;
3090};
3091
3092/* A table of relocs applied to branches which might trigger Cortex-A8
3093   erratum.  */
3094
3095struct a8_erratum_reloc
3096{
3097  bfd_vma from;
3098  bfd_vma destination;
3099  struct elf32_arm_link_hash_entry *hash;
3100  const char *sym_name;
3101  unsigned int r_type;
3102  enum arm_st_branch_type branch_type;
3103  bool non_a8_stub;
3104};
3105
3106/* The size of the thread control block.  */
3107#define TCB_SIZE	8
3108
3109/* ARM-specific information about a PLT entry, over and above the usual
3110   gotplt_union.  */
3111struct arm_plt_info
3112{
3113  /* We reference count Thumb references to a PLT entry separately,
3114     so that we can emit the Thumb trampoline only if needed.  */
3115  bfd_signed_vma thumb_refcount;
3116
3117  /* Some references from Thumb code may be eliminated by BL->BLX
3118     conversion, so record them separately.  */
3119  bfd_signed_vma maybe_thumb_refcount;
3120
3121  /* How many of the recorded PLT accesses were from non-call relocations.
3122     This information is useful when deciding whether anything takes the
3123     address of an STT_GNU_IFUNC PLT.  A value of 0 means that all
3124     non-call references to the function should resolve directly to the
3125     real runtime target.  */
3126  unsigned int noncall_refcount;
3127
3128  /* Since PLT entries have variable size if the Thumb prologue is
3129     used, we need to record the index into .got.plt instead of
3130     recomputing it from the PLT offset.  */
3131  bfd_signed_vma got_offset;
3132};
3133
3134/* Information about an .iplt entry for a local STT_GNU_IFUNC symbol.  */
3135struct arm_local_iplt_info
3136{
3137  /* The information that is usually found in the generic ELF part of
3138     the hash table entry.  */
3139  union gotplt_union root;
3140
3141  /* The information that is usually found in the ARM-specific part of
3142     the hash table entry.  */
3143  struct arm_plt_info arm;
3144
3145  /* A list of all potential dynamic relocations against this symbol.  */
3146  struct elf_dyn_relocs *dyn_relocs;
3147};
3148
3149/* Structure to handle FDPIC support for local functions.  */
3150struct fdpic_local
3151{
3152  unsigned int funcdesc_cnt;
3153  unsigned int gotofffuncdesc_cnt;
3154  int funcdesc_offset;
3155};
3156
3157struct elf_arm_obj_tdata
3158{
3159  struct elf_obj_tdata root;
3160
3161  /* Zero to warn when linking objects with incompatible enum sizes.  */
3162  int no_enum_size_warning;
3163
3164  /* Zero to warn when linking objects with incompatible wchar_t sizes.  */
3165  int no_wchar_size_warning;
3166
3167  /* The number of entries in each of the arrays in this strcuture.
3168     Used to avoid buffer overruns.  */
3169  bfd_size_type num_entries;
3170
3171  /* tls_type for each local got entry.  */
3172  char *local_got_tls_type;
3173
3174  /* GOTPLT entries for TLS descriptors.  */
3175  bfd_vma *local_tlsdesc_gotent;
3176
3177  /* Information for local symbols that need entries in .iplt.  */
3178  struct arm_local_iplt_info **local_iplt;
3179
3180  /* Maintains FDPIC counters and funcdesc info.  */
3181  struct fdpic_local *local_fdpic_cnts;
3182};
3183
3184#define elf_arm_tdata(bfd) \
3185  ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3186
3187#define elf32_arm_num_entries(bfd) \
3188  (elf_arm_tdata (bfd)->num_entries)
3189
3190#define elf32_arm_local_got_tls_type(bfd) \
3191  (elf_arm_tdata (bfd)->local_got_tls_type)
3192
3193#define elf32_arm_local_tlsdesc_gotent(bfd) \
3194  (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3195
3196#define elf32_arm_local_iplt(bfd) \
3197  (elf_arm_tdata (bfd)->local_iplt)
3198
3199#define elf32_arm_local_fdpic_cnts(bfd) \
3200  (elf_arm_tdata (bfd)->local_fdpic_cnts)
3201
3202#define is_arm_elf(bfd) \
3203  (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3204   && elf_tdata (bfd) != NULL \
3205   && elf_object_id (bfd) == ARM_ELF_DATA)
3206
3207static bool
3208elf32_arm_mkobject (bfd *abfd)
3209{
3210  return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3211				  ARM_ELF_DATA);
3212}
3213
3214#define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3215
3216/* Structure to handle FDPIC support for extern functions.  */
3217struct fdpic_global {
3218  unsigned int gotofffuncdesc_cnt;
3219  unsigned int gotfuncdesc_cnt;
3220  unsigned int funcdesc_cnt;
3221  int funcdesc_offset;
3222  int gotfuncdesc_offset;
3223};
3224
3225/* Arm ELF linker hash entry.  */
3226struct elf32_arm_link_hash_entry
3227{
3228  struct elf_link_hash_entry root;
3229
3230  /* ARM-specific PLT information.  */
3231  struct arm_plt_info plt;
3232
3233#define GOT_UNKNOWN	0
3234#define GOT_NORMAL	1
3235#define GOT_TLS_GD	2
3236#define GOT_TLS_IE	4
3237#define GOT_TLS_GDESC	8
3238#define GOT_TLS_GD_ANY_P(type)	((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3239  unsigned int tls_type : 8;
3240
3241  /* True if the symbol's PLT entry is in .iplt rather than .plt.  */
3242  unsigned int is_iplt : 1;
3243
3244  unsigned int unused : 23;
3245
3246  /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3247     starting at the end of the jump table.  */
3248  bfd_vma tlsdesc_got;
3249
3250  /* The symbol marking the real symbol location for exported thumb
3251     symbols with Arm stubs.  */
3252  struct elf_link_hash_entry *export_glue;
3253
3254  /* A pointer to the most recently used stub hash entry against this
3255     symbol.  */
3256  struct elf32_arm_stub_hash_entry *stub_cache;
3257
3258  /* Counter for FDPIC relocations against this symbol.  */
3259  struct fdpic_global fdpic_cnts;
3260};
3261
3262/* Traverse an arm ELF linker hash table.  */
3263#define elf32_arm_link_hash_traverse(table, func, info)			\
3264  (elf_link_hash_traverse						\
3265   (&(table)->root,							\
3266    (bool (*) (struct elf_link_hash_entry *, void *)) (func),		\
3267    (info)))
3268
3269/* Get the ARM elf linker hash table from a link_info structure.  */
3270#define elf32_arm_hash_table(p) \
3271  ((is_elf_hash_table ((p)->hash)					\
3272    && elf_hash_table_id (elf_hash_table (p)) == ARM_ELF_DATA)		\
3273   ? (struct elf32_arm_link_hash_table *) (p)->hash : NULL)
3274
3275#define arm_stub_hash_lookup(table, string, create, copy) \
3276  ((struct elf32_arm_stub_hash_entry *) \
3277   bfd_hash_lookup ((table), (string), (create), (copy)))
3278
3279/* Array to keep track of which stub sections have been created, and
3280   information on stub grouping.  */
3281struct map_stub
3282{
3283  /* This is the section to which stubs in the group will be
3284     attached.  */
3285  asection *link_sec;
3286  /* The stub section.  */
3287  asection *stub_sec;
3288};
3289
3290#define elf32_arm_compute_jump_table_size(htab) \
3291  ((htab)->next_tls_desc_index * 4)
3292
3293/* ARM ELF linker hash table.  */
3294struct elf32_arm_link_hash_table
3295{
3296  /* The main hash table.  */
3297  struct elf_link_hash_table root;
3298
3299  /* The size in bytes of the section containing the Thumb-to-ARM glue.  */
3300  bfd_size_type thumb_glue_size;
3301
3302  /* The size in bytes of the section containing the ARM-to-Thumb glue.  */
3303  bfd_size_type arm_glue_size;
3304
3305  /* The size in bytes of section containing the ARMv4 BX veneers.  */
3306  bfd_size_type bx_glue_size;
3307
3308  /* Offsets of ARMv4 BX veneers.  Bit1 set if present, and Bit0 set when
3309     veneer has been populated.  */
3310  bfd_vma bx_glue_offset[15];
3311
3312  /* The size in bytes of the section containing glue for VFP11 erratum
3313     veneers.  */
3314  bfd_size_type vfp11_erratum_glue_size;
3315
3316 /* The size in bytes of the section containing glue for STM32L4XX erratum
3317     veneers.  */
3318  bfd_size_type stm32l4xx_erratum_glue_size;
3319
3320  /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum.  This
3321     holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3322     elf32_arm_write_section().  */
3323  struct a8_erratum_fix *a8_erratum_fixes;
3324  unsigned int num_a8_erratum_fixes;
3325
3326  /* An arbitrary input BFD chosen to hold the glue sections.  */
3327  bfd * bfd_of_glue_owner;
3328
3329  /* Nonzero to output a BE8 image.  */
3330  int byteswap_code;
3331
3332  /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3333     Nonzero if R_ARM_TARGET1 means R_ARM_REL32.  */
3334  int target1_is_rel;
3335
3336  /* The relocation to use for R_ARM_TARGET2 relocations.  */
3337  int target2_reloc;
3338
3339  /* 0 = Ignore R_ARM_V4BX.
3340     1 = Convert BX to MOV PC.
3341     2 = Generate v4 interworing stubs.  */
3342  int fix_v4bx;
3343
3344  /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum.  */
3345  int fix_cortex_a8;
3346
3347  /* Whether we should fix the ARM1176 BLX immediate issue.  */
3348  int fix_arm1176;
3349
3350  /* Nonzero if the ARM/Thumb BLX instructions are available for use.  */
3351  int use_blx;
3352
3353  /* What sort of code sequences we should look for which may trigger the
3354     VFP11 denorm erratum.  */
3355  bfd_arm_vfp11_fix vfp11_fix;
3356
3357  /* Global counter for the number of fixes we have emitted.  */
3358  int num_vfp11_fixes;
3359
3360  /* What sort of code sequences we should look for which may trigger the
3361     STM32L4XX erratum.  */
3362  bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3363
3364  /* Global counter for the number of fixes we have emitted.  */
3365  int num_stm32l4xx_fixes;
3366
3367  /* Nonzero to force PIC branch veneers.  */
3368  int pic_veneer;
3369
3370  /* The number of bytes in the initial entry in the PLT.  */
3371  bfd_size_type plt_header_size;
3372
3373  /* The number of bytes in the subsequent PLT etries.  */
3374  bfd_size_type plt_entry_size;
3375
3376  /* True if the target uses REL relocations.  */
3377  bool use_rel;
3378
3379  /* Nonzero if import library must be a secure gateway import library
3380     as per ARMv8-M Security Extensions.  */
3381  int cmse_implib;
3382
3383  /* The import library whose symbols' address must remain stable in
3384     the import library generated.  */
3385  bfd *in_implib_bfd;
3386
3387  /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt.  */
3388  bfd_vma next_tls_desc_index;
3389
3390  /* How many R_ARM_TLS_DESC relocations were generated so far.  */
3391  bfd_vma num_tls_desc;
3392
3393  /* The (unloaded but important) VxWorks .rela.plt.unloaded section.  */
3394  asection *srelplt2;
3395
3396  /* Offset in .plt section of tls_arm_trampoline.  */
3397  bfd_vma tls_trampoline;
3398
3399  /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations.  */
3400  union
3401  {
3402    bfd_signed_vma refcount;
3403    bfd_vma offset;
3404  } tls_ldm_got;
3405
3406  /* For convenience in allocate_dynrelocs.  */
3407  bfd * obfd;
3408
3409  /* The amount of space used by the reserved portion of the sgotplt
3410     section, plus whatever space is used by the jump slots.  */
3411  bfd_vma sgotplt_jump_table_size;
3412
3413  /* The stub hash table.  */
3414  struct bfd_hash_table stub_hash_table;
3415
3416  /* Linker stub bfd.  */
3417  bfd *stub_bfd;
3418
3419  /* Linker call-backs.  */
3420  asection * (*add_stub_section) (const char *, asection *, asection *,
3421				  unsigned int);
3422  void (*layout_sections_again) (void);
3423
3424  /* Array to keep track of which stub sections have been created, and
3425     information on stub grouping.  */
3426  struct map_stub *stub_group;
3427
3428  /* Input stub section holding secure gateway veneers.  */
3429  asection *cmse_stub_sec;
3430
3431  /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3432     start to be allocated.  */
3433  bfd_vma new_cmse_stub_offset;
3434
3435  /* Number of elements in stub_group.  */
3436  unsigned int top_id;
3437
3438  /* Assorted information used by elf32_arm_size_stubs.  */
3439  unsigned int bfd_count;
3440  unsigned int top_index;
3441  asection **input_list;
3442
3443  /* True if the target system uses FDPIC. */
3444  int fdpic_p;
3445
3446  /* Fixup section. Used for FDPIC.  */
3447  asection *srofixup;
3448};
3449
3450/* Add an FDPIC read-only fixup.  */
3451static void
3452arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3453{
3454  bfd_vma fixup_offset;
3455
3456  fixup_offset = srofixup->reloc_count++ * 4;
3457  BFD_ASSERT (fixup_offset < srofixup->size);
3458  bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3459}
3460
3461static inline int
3462ctz (unsigned int mask)
3463{
3464#if GCC_VERSION >= 3004
3465  return __builtin_ctz (mask);
3466#else
3467  unsigned int i;
3468
3469  for (i = 0; i < 8 * sizeof (mask); i++)
3470    {
3471      if (mask & 0x1)
3472	break;
3473      mask = (mask >> 1);
3474    }
3475  return i;
3476#endif
3477}
3478
3479#if !defined (__NetBSD__) || (__NetBSD_Version__ < 600000000)
3480static inline int
3481elf32_arm_popcount (unsigned int mask)
3482{
3483#if GCC_VERSION >= 3004
3484  return __builtin_popcount (mask);
3485#else
3486  unsigned int i;
3487  int sum = 0;
3488
3489  for (i = 0; i < 8 * sizeof (mask); i++)
3490    {
3491      if (mask & 0x1)
3492	sum++;
3493      mask = (mask >> 1);
3494    }
3495  return sum;
3496#endif
3497}
3498#endif
3499
3500static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3501				    asection *sreloc, Elf_Internal_Rela *rel);
3502
3503static void
3504arm_elf_fill_funcdesc (bfd *output_bfd,
3505		       struct bfd_link_info *info,
3506		       int *funcdesc_offset,
3507		       int dynindx,
3508		       int offset,
3509		       bfd_vma addr,
3510		       bfd_vma dynreloc_value,
3511		       bfd_vma seg)
3512{
3513  if ((*funcdesc_offset & 1) == 0)
3514    {
3515      struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3516      asection *sgot = globals->root.sgot;
3517
3518      if (bfd_link_pic (info))
3519	{
3520	  asection *srelgot = globals->root.srelgot;
3521	  Elf_Internal_Rela outrel;
3522
3523	  outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3524	  outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3525	  outrel.r_addend = 0;
3526
3527	  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3528	  bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3529	  bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3530	}
3531      else
3532	{
3533	  struct elf_link_hash_entry *hgot = globals->root.hgot;
3534	  bfd_vma got_value = hgot->root.u.def.value
3535	    + hgot->root.u.def.section->output_section->vma
3536	    + hgot->root.u.def.section->output_offset;
3537
3538	  arm_elf_add_rofixup (output_bfd, globals->srofixup,
3539			       sgot->output_section->vma + sgot->output_offset
3540			       + offset);
3541	  arm_elf_add_rofixup (output_bfd, globals->srofixup,
3542			       sgot->output_section->vma + sgot->output_offset
3543			       + offset + 4);
3544	  bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3545	  bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3546	}
3547      *funcdesc_offset |= 1;
3548    }
3549}
3550
3551/* Create an entry in an ARM ELF linker hash table.  */
3552
3553static struct bfd_hash_entry *
3554elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3555			     struct bfd_hash_table * table,
3556			     const char * string)
3557{
3558  struct elf32_arm_link_hash_entry * ret =
3559    (struct elf32_arm_link_hash_entry *) entry;
3560
3561  /* Allocate the structure if it has not already been allocated by a
3562     subclass.  */
3563  if (ret == NULL)
3564    ret = (struct elf32_arm_link_hash_entry *)
3565	bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3566  if (ret == NULL)
3567    return (struct bfd_hash_entry *) ret;
3568
3569  /* Call the allocation method of the superclass.  */
3570  ret = ((struct elf32_arm_link_hash_entry *)
3571	 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3572				     table, string));
3573  if (ret != NULL)
3574    {
3575      ret->tls_type = GOT_UNKNOWN;
3576      ret->tlsdesc_got = (bfd_vma) -1;
3577      ret->plt.thumb_refcount = 0;
3578      ret->plt.maybe_thumb_refcount = 0;
3579      ret->plt.noncall_refcount = 0;
3580      ret->plt.got_offset = -1;
3581      ret->is_iplt = false;
3582      ret->export_glue = NULL;
3583
3584      ret->stub_cache = NULL;
3585
3586      ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3587      ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3588      ret->fdpic_cnts.funcdesc_cnt = 0;
3589      ret->fdpic_cnts.funcdesc_offset = -1;
3590      ret->fdpic_cnts.gotfuncdesc_offset = -1;
3591    }
3592
3593  return (struct bfd_hash_entry *) ret;
3594}
3595
3596/* Ensure that we have allocated bookkeeping structures for ABFD's local
3597   symbols.  */
3598
3599static bool
3600elf32_arm_allocate_local_sym_info (bfd *abfd)
3601{
3602  if (elf_local_got_refcounts (abfd) == NULL)
3603    {
3604      bfd_size_type num_syms;
3605
3606      elf32_arm_num_entries (abfd) = 0;
3607
3608      /* Whilst it might be tempting to allocate a single block of memory and
3609	 then divide it up amoungst the arrays in the elf_arm_obj_tdata
3610	 structure, this interferes with the work of memory checkers looking
3611	 for buffer overruns.  So allocate each array individually.  */
3612
3613      num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3614
3615      elf_local_got_refcounts (abfd) = bfd_zalloc
3616	(abfd, num_syms * sizeof (* elf_local_got_refcounts (abfd)));
3617
3618      if (elf_local_got_refcounts (abfd) == NULL)
3619	return false;
3620
3621      elf32_arm_local_tlsdesc_gotent (abfd) = bfd_zalloc
3622	(abfd, num_syms * sizeof (* elf32_arm_local_tlsdesc_gotent (abfd)));
3623
3624      if (elf32_arm_local_tlsdesc_gotent (abfd) == NULL)
3625	return false;
3626
3627      elf32_arm_local_iplt (abfd) = bfd_zalloc
3628	(abfd, num_syms * sizeof (* elf32_arm_local_iplt (abfd)));
3629
3630      if (elf32_arm_local_iplt (abfd) == NULL)
3631	return false;
3632
3633      elf32_arm_local_fdpic_cnts (abfd) = bfd_zalloc
3634	(abfd, num_syms * sizeof (* elf32_arm_local_fdpic_cnts (abfd)));
3635
3636      if (elf32_arm_local_fdpic_cnts (abfd) == NULL)
3637	return false;
3638
3639      elf32_arm_local_got_tls_type (abfd) = bfd_zalloc
3640	(abfd, num_syms * sizeof (* elf32_arm_local_got_tls_type (abfd)));
3641
3642      if (elf32_arm_local_got_tls_type (abfd) == NULL)
3643	return false;
3644
3645      elf32_arm_num_entries (abfd) = num_syms;
3646
3647#if GCC_VERSION >= 3000
3648      BFD_ASSERT (__alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd))
3649		  <= __alignof__ (*elf_local_got_refcounts (abfd)));
3650      BFD_ASSERT (__alignof__ (*elf32_arm_local_iplt (abfd))
3651		  <= __alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd)));
3652      BFD_ASSERT (__alignof__ (*elf32_arm_local_fdpic_cnts (abfd))
3653		  <= __alignof__ (*elf32_arm_local_iplt (abfd)));
3654      BFD_ASSERT (__alignof__ (*elf32_arm_local_got_tls_type (abfd))
3655		  <= __alignof__ (*elf32_arm_local_fdpic_cnts (abfd)));
3656#endif
3657    }
3658  return true;
3659}
3660
3661/* Return the .iplt information for local symbol R_SYMNDX, which belongs
3662   to input bfd ABFD.  Create the information if it doesn't already exist.
3663   Return null if an allocation fails.  */
3664
3665static struct arm_local_iplt_info *
3666elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3667{
3668  struct arm_local_iplt_info **ptr;
3669
3670  if (!elf32_arm_allocate_local_sym_info (abfd))
3671    return NULL;
3672
3673  BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3674  BFD_ASSERT (r_symndx < elf32_arm_num_entries (abfd));
3675  ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3676  if (*ptr == NULL)
3677    *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3678  return *ptr;
3679}
3680
3681/* Try to obtain PLT information for the symbol with index R_SYMNDX
3682   in ABFD's symbol table.  If the symbol is global, H points to its
3683   hash table entry, otherwise H is null.
3684
3685   Return true if the symbol does have PLT information.  When returning
3686   true, point *ROOT_PLT at the target-independent reference count/offset
3687   union and *ARM_PLT at the ARM-specific information.  */
3688
3689static bool
3690elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3691			struct elf32_arm_link_hash_entry *h,
3692			unsigned long r_symndx, union gotplt_union **root_plt,
3693			struct arm_plt_info **arm_plt)
3694{
3695  struct arm_local_iplt_info *local_iplt;
3696
3697  if (globals->root.splt == NULL && globals->root.iplt == NULL)
3698    return false;
3699
3700  if (h != NULL)
3701    {
3702      *root_plt = &h->root.plt;
3703      *arm_plt = &h->plt;
3704      return true;
3705    }
3706
3707  if (elf32_arm_local_iplt (abfd) == NULL)
3708    return false;
3709
3710  if (r_symndx >= elf32_arm_num_entries (abfd))
3711    return false;
3712
3713  local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3714  if (local_iplt == NULL)
3715    return false;
3716
3717  *root_plt = &local_iplt->root;
3718  *arm_plt = &local_iplt->arm;
3719  return true;
3720}
3721
3722static bool using_thumb_only (struct elf32_arm_link_hash_table *globals);
3723
3724/* Return true if the PLT described by ARM_PLT requires a Thumb stub
3725   before it.  */
3726
3727static bool
3728elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3729				  struct arm_plt_info *arm_plt)
3730{
3731  struct elf32_arm_link_hash_table *htab;
3732
3733  htab = elf32_arm_hash_table (info);
3734
3735  return (!using_thumb_only (htab) && (arm_plt->thumb_refcount != 0
3736	  || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
3737}
3738
3739/* Return a pointer to the head of the dynamic reloc list that should
3740   be used for local symbol ISYM, which is symbol number R_SYMNDX in
3741   ABFD's symbol table.  Return null if an error occurs.  */
3742
3743static struct elf_dyn_relocs **
3744elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3745				   Elf_Internal_Sym *isym)
3746{
3747  if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3748    {
3749      struct arm_local_iplt_info *local_iplt;
3750
3751      local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3752      if (local_iplt == NULL)
3753	return NULL;
3754      return &local_iplt->dyn_relocs;
3755    }
3756  else
3757    {
3758      /* Track dynamic relocs needed for local syms too.
3759	 We really need local syms available to do this
3760	 easily.  Oh well.  */
3761      asection *s;
3762      void *vpp;
3763
3764      s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3765      if (s == NULL)
3766	return NULL;
3767
3768      vpp = &elf_section_data (s)->local_dynrel;
3769      return (struct elf_dyn_relocs **) vpp;
3770    }
3771}
3772
3773/* Initialize an entry in the stub hash table.  */
3774
3775static struct bfd_hash_entry *
3776stub_hash_newfunc (struct bfd_hash_entry *entry,
3777		   struct bfd_hash_table *table,
3778		   const char *string)
3779{
3780  /* Allocate the structure if it has not already been allocated by a
3781     subclass.  */
3782  if (entry == NULL)
3783    {
3784      entry = (struct bfd_hash_entry *)
3785	  bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3786      if (entry == NULL)
3787	return entry;
3788    }
3789
3790  /* Call the allocation method of the superclass.  */
3791  entry = bfd_hash_newfunc (entry, table, string);
3792  if (entry != NULL)
3793    {
3794      struct elf32_arm_stub_hash_entry *eh;
3795
3796      /* Initialize the local fields.  */
3797      eh = (struct elf32_arm_stub_hash_entry *) entry;
3798      eh->stub_sec = NULL;
3799      eh->stub_offset = (bfd_vma) -1;
3800      eh->source_value = 0;
3801      eh->target_value = 0;
3802      eh->target_section = NULL;
3803      eh->orig_insn = 0;
3804      eh->stub_type = arm_stub_none;
3805      eh->stub_size = 0;
3806      eh->stub_template = NULL;
3807      eh->stub_template_size = -1;
3808      eh->h = NULL;
3809      eh->id_sec = NULL;
3810      eh->output_name = NULL;
3811    }
3812
3813  return entry;
3814}
3815
3816/* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3817   shortcuts to them in our hash table.  */
3818
3819static bool
3820create_got_section (bfd *dynobj, struct bfd_link_info *info)
3821{
3822  struct elf32_arm_link_hash_table *htab;
3823
3824  htab = elf32_arm_hash_table (info);
3825  if (htab == NULL)
3826    return false;
3827
3828  if (! _bfd_elf_create_got_section (dynobj, info))
3829    return false;
3830
3831  /* Also create .rofixup.  */
3832  if (htab->fdpic_p)
3833    {
3834      htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3835						    (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3836						     | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3837      if (htab->srofixup == NULL
3838	  || !bfd_set_section_alignment (htab->srofixup, 2))
3839	return false;
3840    }
3841
3842  return true;
3843}
3844
3845/* Create the .iplt, .rel(a).iplt and .igot.plt sections.  */
3846
3847static bool
3848create_ifunc_sections (struct bfd_link_info *info)
3849{
3850  struct elf32_arm_link_hash_table *htab;
3851  const struct elf_backend_data *bed;
3852  bfd *dynobj;
3853  asection *s;
3854  flagword flags;
3855
3856  htab = elf32_arm_hash_table (info);
3857  dynobj = htab->root.dynobj;
3858  bed = get_elf_backend_data (dynobj);
3859  flags = bed->dynamic_sec_flags;
3860
3861  if (htab->root.iplt == NULL)
3862    {
3863      s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3864					      flags | SEC_READONLY | SEC_CODE);
3865      if (s == NULL
3866	  || !bfd_set_section_alignment (s, bed->plt_alignment))
3867	return false;
3868      htab->root.iplt = s;
3869    }
3870
3871  if (htab->root.irelplt == NULL)
3872    {
3873      s = bfd_make_section_anyway_with_flags (dynobj,
3874					      RELOC_SECTION (htab, ".iplt"),
3875					      flags | SEC_READONLY);
3876      if (s == NULL
3877	  || !bfd_set_section_alignment (s, bed->s->log_file_align))
3878	return false;
3879      htab->root.irelplt = s;
3880    }
3881
3882  if (htab->root.igotplt == NULL)
3883    {
3884      s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3885      if (s == NULL
3886	  || !bfd_set_section_alignment (s, bed->s->log_file_align))
3887	return false;
3888      htab->root.igotplt = s;
3889    }
3890  return true;
3891}
3892
3893/* Determine if we're dealing with a Thumb only architecture.  */
3894
3895static bool
3896using_thumb_only (struct elf32_arm_link_hash_table *globals)
3897{
3898  int arch;
3899  int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3900					  Tag_CPU_arch_profile);
3901
3902  if (profile)
3903    return profile == 'M';
3904
3905  arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3906
3907  /* Force return logic to be reviewed for each new architecture.  */
3908  BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3909
3910  if (arch == TAG_CPU_ARCH_V6_M
3911      || arch == TAG_CPU_ARCH_V6S_M
3912      || arch == TAG_CPU_ARCH_V7E_M
3913      || arch == TAG_CPU_ARCH_V8M_BASE
3914      || arch == TAG_CPU_ARCH_V8M_MAIN
3915      || arch == TAG_CPU_ARCH_V8_1M_MAIN)
3916    return true;
3917
3918  return false;
3919}
3920
3921/* Determine if we're dealing with a Thumb-2 object.  */
3922
3923static bool
3924using_thumb2 (struct elf32_arm_link_hash_table *globals)
3925{
3926  int arch;
3927  int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3928					    Tag_THUMB_ISA_use);
3929
3930  /* No use of thumb permitted, or a legacy thumb-1/2 definition.  */
3931  if (thumb_isa < 3)
3932    return thumb_isa == 2;
3933
3934  /* Variant of thumb is described by the architecture tag.  */
3935  arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3936
3937  /* Force return logic to be reviewed for each new architecture.  */
3938  BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3939
3940  return (arch == TAG_CPU_ARCH_V6T2
3941	  || arch == TAG_CPU_ARCH_V7
3942	  || arch == TAG_CPU_ARCH_V7E_M
3943	  || arch == TAG_CPU_ARCH_V8
3944	  || arch == TAG_CPU_ARCH_V8R
3945	  || arch == TAG_CPU_ARCH_V8M_MAIN
3946	  || arch == TAG_CPU_ARCH_V8_1M_MAIN);
3947}
3948
3949/* Determine whether Thumb-2 BL instruction is available.  */
3950
3951static bool
3952using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3953{
3954  int arch =
3955    bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3956
3957  /* Force return logic to be reviewed for each new architecture.  */
3958  BFD_ASSERT (arch <= TAG_CPU_ARCH_V9);
3959
3960  /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M).  */
3961  return (arch == TAG_CPU_ARCH_V6T2
3962	  || arch >= TAG_CPU_ARCH_V7);
3963}
3964
3965/* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3966   .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3967   hash table.  */
3968
3969static bool
3970elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3971{
3972  struct elf32_arm_link_hash_table *htab;
3973
3974  htab = elf32_arm_hash_table (info);
3975  if (htab == NULL)
3976    return false;
3977
3978  if (!htab->root.sgot && !create_got_section (dynobj, info))
3979    return false;
3980
3981  if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3982    return false;
3983
3984  if (htab->root.target_os == is_vxworks)
3985    {
3986      if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3987	return false;
3988
3989      if (bfd_link_pic (info))
3990	{
3991	  htab->plt_header_size = 0;
3992	  htab->plt_entry_size
3993	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3994	}
3995      else
3996	{
3997	  htab->plt_header_size
3998	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3999	  htab->plt_entry_size
4000	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
4001	}
4002
4003      if (elf_elfheader (dynobj))
4004	elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
4005    }
4006  else
4007    {
4008      /* PR ld/16017
4009	 Test for thumb only architectures.  Note - we cannot just call
4010	 using_thumb_only() as the attributes in the output bfd have not been
4011	 initialised at this point, so instead we use the input bfd.  */
4012      bfd * saved_obfd = htab->obfd;
4013
4014      htab->obfd = dynobj;
4015      if (using_thumb_only (htab))
4016	{
4017	  htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
4018	  htab->plt_entry_size  = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
4019	}
4020      htab->obfd = saved_obfd;
4021    }
4022
4023  if (htab->fdpic_p) {
4024    htab->plt_header_size = 0;
4025    if (info->flags & DF_BIND_NOW)
4026      htab->plt_entry_size = 4 * (ARRAY_SIZE (elf32_arm_fdpic_plt_entry) - 5);
4027    else
4028      htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry);
4029  }
4030
4031  if (!htab->root.splt
4032      || !htab->root.srelplt
4033      || !htab->root.sdynbss
4034      || (!bfd_link_pic (info) && !htab->root.srelbss))
4035    abort ();
4036
4037  return true;
4038}
4039
4040/* Copy the extra info we tack onto an elf_link_hash_entry.  */
4041
4042static void
4043elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
4044				struct elf_link_hash_entry *dir,
4045				struct elf_link_hash_entry *ind)
4046{
4047  struct elf32_arm_link_hash_entry *edir, *eind;
4048
4049  edir = (struct elf32_arm_link_hash_entry *) dir;
4050  eind = (struct elf32_arm_link_hash_entry *) ind;
4051
4052  if (ind->root.type == bfd_link_hash_indirect)
4053    {
4054      /* Copy over PLT info.  */
4055      edir->plt.thumb_refcount += eind->plt.thumb_refcount;
4056      eind->plt.thumb_refcount = 0;
4057      edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
4058      eind->plt.maybe_thumb_refcount = 0;
4059      edir->plt.noncall_refcount += eind->plt.noncall_refcount;
4060      eind->plt.noncall_refcount = 0;
4061
4062      /* Copy FDPIC counters.  */
4063      edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
4064      edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
4065      edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
4066
4067      /* We should only allocate a function to .iplt once the final
4068	 symbol information is known.  */
4069      BFD_ASSERT (!eind->is_iplt);
4070
4071      if (dir->got.refcount <= 0)
4072	{
4073	  edir->tls_type = eind->tls_type;
4074	  eind->tls_type = GOT_UNKNOWN;
4075	}
4076    }
4077
4078  _bfd_elf_link_hash_copy_indirect (info, dir, ind);
4079}
4080
4081/* Destroy an ARM elf linker hash table.  */
4082
4083static void
4084elf32_arm_link_hash_table_free (bfd *obfd)
4085{
4086  struct elf32_arm_link_hash_table *ret
4087    = (struct elf32_arm_link_hash_table *) obfd->link.hash;
4088
4089  bfd_hash_table_free (&ret->stub_hash_table);
4090  _bfd_elf_link_hash_table_free (obfd);
4091}
4092
4093/* Create an ARM elf linker hash table.  */
4094
4095static struct bfd_link_hash_table *
4096elf32_arm_link_hash_table_create (bfd *abfd)
4097{
4098  struct elf32_arm_link_hash_table *ret;
4099  size_t amt = sizeof (struct elf32_arm_link_hash_table);
4100
4101  ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
4102  if (ret == NULL)
4103    return NULL;
4104
4105  if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
4106				      elf32_arm_link_hash_newfunc,
4107				      sizeof (struct elf32_arm_link_hash_entry),
4108				      ARM_ELF_DATA))
4109    {
4110      free (ret);
4111      return NULL;
4112    }
4113
4114  ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
4115  ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
4116#ifdef FOUR_WORD_PLT
4117  ret->plt_header_size = 16;
4118  ret->plt_entry_size = 16;
4119#else
4120  ret->plt_header_size = 20;
4121  ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4122#endif
4123  ret->use_rel = true;
4124  ret->obfd = abfd;
4125  ret->fdpic_p = 0;
4126
4127  if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4128			    sizeof (struct elf32_arm_stub_hash_entry)))
4129    {
4130      _bfd_elf_link_hash_table_free (abfd);
4131      return NULL;
4132    }
4133  ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4134
4135  return &ret->root.root;
4136}
4137
4138/* Determine what kind of NOPs are available.  */
4139
4140static bool
4141arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4142{
4143  const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4144					     Tag_CPU_arch);
4145
4146  /* Force return logic to be reviewed for each new architecture.  */
4147  BFD_ASSERT (arch <= TAG_CPU_ARCH_V9);
4148
4149  return (arch == TAG_CPU_ARCH_V6T2
4150	  || arch == TAG_CPU_ARCH_V6K
4151	  || arch == TAG_CPU_ARCH_V7
4152	  || arch == TAG_CPU_ARCH_V8
4153	  || arch == TAG_CPU_ARCH_V8R
4154	  || arch == TAG_CPU_ARCH_V9);
4155}
4156
4157static bool
4158arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4159{
4160  switch (stub_type)
4161    {
4162    case arm_stub_long_branch_thumb_only:
4163    case arm_stub_long_branch_thumb2_only:
4164    case arm_stub_long_branch_thumb2_only_pure:
4165    case arm_stub_long_branch_v4t_thumb_arm:
4166    case arm_stub_short_branch_v4t_thumb_arm:
4167    case arm_stub_long_branch_v4t_thumb_arm_pic:
4168    case arm_stub_long_branch_v4t_thumb_tls_pic:
4169    case arm_stub_long_branch_thumb_only_pic:
4170    case arm_stub_cmse_branch_thumb_only:
4171      return true;
4172    case arm_stub_none:
4173      BFD_FAIL ();
4174      return false;
4175      break;
4176    default:
4177      return false;
4178    }
4179}
4180
4181/* Determine the type of stub needed, if any, for a call.  */
4182
4183static enum elf32_arm_stub_type
4184arm_type_of_stub (struct bfd_link_info *info,
4185		  asection *input_sec,
4186		  const Elf_Internal_Rela *rel,
4187		  unsigned char st_type,
4188		  enum arm_st_branch_type *actual_branch_type,
4189		  struct elf32_arm_link_hash_entry *hash,
4190		  bfd_vma destination,
4191		  asection *sym_sec,
4192		  bfd *input_bfd,
4193		  const char *name)
4194{
4195  bfd_vma location;
4196  bfd_signed_vma branch_offset;
4197  unsigned int r_type;
4198  struct elf32_arm_link_hash_table * globals;
4199  bool thumb2, thumb2_bl, thumb_only;
4200  enum elf32_arm_stub_type stub_type = arm_stub_none;
4201  int use_plt = 0;
4202  enum arm_st_branch_type branch_type = *actual_branch_type;
4203  union gotplt_union *root_plt;
4204  struct arm_plt_info *arm_plt;
4205  int arch;
4206  int thumb2_movw;
4207
4208  if (branch_type == ST_BRANCH_LONG)
4209    return stub_type;
4210
4211  globals = elf32_arm_hash_table (info);
4212  if (globals == NULL)
4213    return stub_type;
4214
4215  thumb_only = using_thumb_only (globals);
4216  thumb2 = using_thumb2 (globals);
4217  thumb2_bl = using_thumb2_bl (globals);
4218
4219  arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4220
4221  /* True for architectures that implement the thumb2 movw instruction.  */
4222  thumb2_movw = thumb2 || (arch  == TAG_CPU_ARCH_V8M_BASE);
4223
4224  /* Determine where the call point is.  */
4225  location = (input_sec->output_offset
4226	      + input_sec->output_section->vma
4227	      + rel->r_offset);
4228
4229  r_type = ELF32_R_TYPE (rel->r_info);
4230
4231  /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4232     are considering a function call relocation.  */
4233  if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4234		     || r_type == R_ARM_THM_JUMP19)
4235      && branch_type == ST_BRANCH_TO_ARM)
4236    branch_type = ST_BRANCH_TO_THUMB;
4237
4238  /* For TLS call relocs, it is the caller's responsibility to provide
4239     the address of the appropriate trampoline.  */
4240  if (r_type != R_ARM_TLS_CALL
4241      && r_type != R_ARM_THM_TLS_CALL
4242      && elf32_arm_get_plt_info (input_bfd, globals, hash,
4243				 ELF32_R_SYM (rel->r_info), &root_plt,
4244				 &arm_plt)
4245      && root_plt->offset != (bfd_vma) -1)
4246    {
4247      asection *splt;
4248
4249      if (hash == NULL || hash->is_iplt)
4250	splt = globals->root.iplt;
4251      else
4252	splt = globals->root.splt;
4253      if (splt != NULL)
4254	{
4255	  use_plt = 1;
4256
4257	  /* Note when dealing with PLT entries: the main PLT stub is in
4258	     ARM mode, so if the branch is in Thumb mode, another
4259	     Thumb->ARM stub will be inserted later just before the ARM
4260	     PLT stub. If a long branch stub is needed, we'll add a
4261	     Thumb->Arm one and branch directly to the ARM PLT entry.
4262	     Here, we have to check if a pre-PLT Thumb->ARM stub
4263	     is needed and if it will be close enough.  */
4264
4265	  destination = (splt->output_section->vma
4266			 + splt->output_offset
4267			 + root_plt->offset);
4268	  st_type = STT_FUNC;
4269
4270	  /* Thumb branch/call to PLT: it can become a branch to ARM
4271	     or to Thumb. We must perform the same checks and
4272	     corrections as in elf32_arm_final_link_relocate.  */
4273	  if ((r_type == R_ARM_THM_CALL)
4274	      || (r_type == R_ARM_THM_JUMP24))
4275	    {
4276	      if (globals->use_blx
4277		  && r_type == R_ARM_THM_CALL
4278		  && !thumb_only)
4279		{
4280		  /* If the Thumb BLX instruction is available, convert
4281		     the BL to a BLX instruction to call the ARM-mode
4282		     PLT entry.  */
4283		  branch_type = ST_BRANCH_TO_ARM;
4284		}
4285	      else
4286		{
4287		  if (!thumb_only)
4288		    /* Target the Thumb stub before the ARM PLT entry.  */
4289		    destination -= PLT_THUMB_STUB_SIZE;
4290		  branch_type = ST_BRANCH_TO_THUMB;
4291		}
4292	    }
4293	  else
4294	    {
4295	      branch_type = ST_BRANCH_TO_ARM;
4296	    }
4297	}
4298    }
4299  /* Calls to STT_GNU_IFUNC symbols should go through a PLT.  */
4300  BFD_ASSERT (st_type != STT_GNU_IFUNC);
4301
4302  branch_offset = (bfd_signed_vma)(destination - location);
4303
4304  if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4305      || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4306    {
4307      /* Handle cases where:
4308	 - this call goes too far (different Thumb/Thumb2 max
4309	   distance)
4310	 - it's a Thumb->Arm call and blx is not available, or it's a
4311	   Thumb->Arm branch (not bl). A stub is needed in this case,
4312	   but only if this call is not through a PLT entry. Indeed,
4313	   PLT stubs handle mode switching already.  */
4314      if ((!thumb2_bl
4315	    && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4316		|| (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4317	  || (thumb2_bl
4318	      && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4319		  || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4320	  || (thumb2
4321	      && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4322		  || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4323	      && (r_type == R_ARM_THM_JUMP19))
4324	  || (branch_type == ST_BRANCH_TO_ARM
4325	      && (((r_type == R_ARM_THM_CALL
4326		    || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4327		  || (r_type == R_ARM_THM_JUMP24)
4328		  || (r_type == R_ARM_THM_JUMP19))
4329	      && !use_plt))
4330	{
4331	  /* If we need to insert a Thumb-Thumb long branch stub to a
4332	     PLT, use one that branches directly to the ARM PLT
4333	     stub. If we pretended we'd use the pre-PLT Thumb->ARM
4334	     stub, undo this now.  */
4335	  if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4336	    {
4337	      branch_type = ST_BRANCH_TO_ARM;
4338	      branch_offset += PLT_THUMB_STUB_SIZE;
4339	    }
4340
4341	  if (branch_type == ST_BRANCH_TO_THUMB)
4342	    {
4343	      /* Thumb to thumb.  */
4344	      if (!thumb_only)
4345		{
4346		  if (input_sec->flags & SEC_ELF_PURECODE)
4347		    _bfd_error_handler
4348		      (_("%pB(%pA): warning: long branch veneers used in"
4349			 " section with SHF_ARM_PURECODE section"
4350			 " attribute is only supported for M-profile"
4351			 " targets that implement the movw instruction"),
4352		       input_bfd, input_sec);
4353
4354		  stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4355		    /* PIC stubs.  */
4356		    ? ((globals->use_blx
4357			&& (r_type == R_ARM_THM_CALL))
4358		       /* V5T and above. Stub starts with ARM code, so
4359			  we must be able to switch mode before
4360			  reaching it, which is only possible for 'bl'
4361			  (ie R_ARM_THM_CALL relocation).  */
4362		       ? arm_stub_long_branch_any_thumb_pic
4363		       /* On V4T, use Thumb code only.  */
4364		       : arm_stub_long_branch_v4t_thumb_thumb_pic)
4365
4366		    /* non-PIC stubs.  */
4367		    : ((globals->use_blx
4368			&& (r_type == R_ARM_THM_CALL))
4369		       /* V5T and above.  */
4370		       ? arm_stub_long_branch_any_any
4371		       /* V4T.  */
4372		       : arm_stub_long_branch_v4t_thumb_thumb);
4373		}
4374	      else
4375		{
4376		  if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4377		      stub_type = arm_stub_long_branch_thumb2_only_pure;
4378		  else
4379		    {
4380		      if (input_sec->flags & SEC_ELF_PURECODE)
4381			_bfd_error_handler
4382			  (_("%pB(%pA): warning: long branch veneers used in"
4383			     " section with SHF_ARM_PURECODE section"
4384			     " attribute is only supported for M-profile"
4385			     " targets that implement the movw instruction"),
4386			   input_bfd, input_sec);
4387
4388		      stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4389			/* PIC stub.  */
4390			? arm_stub_long_branch_thumb_only_pic
4391			/* non-PIC stub.  */
4392			: (thumb2 ? arm_stub_long_branch_thumb2_only
4393				  : arm_stub_long_branch_thumb_only);
4394		    }
4395		}
4396	    }
4397	  else
4398	    {
4399	      if (input_sec->flags & SEC_ELF_PURECODE)
4400		_bfd_error_handler
4401		  (_("%pB(%pA): warning: long branch veneers used in"
4402		     " section with SHF_ARM_PURECODE section"
4403		     " attribute is only supported" " for M-profile"
4404		     " targets that implement the movw instruction"),
4405		   input_bfd, input_sec);
4406
4407	      /* Thumb to arm.  */
4408	      if (sym_sec != NULL
4409		  && sym_sec->owner != NULL
4410		  && !INTERWORK_FLAG (sym_sec->owner))
4411		{
4412		  _bfd_error_handler
4413		    (_("%pB(%s): warning: interworking not enabled;"
4414		       " first occurrence: %pB: %s call to %s"),
4415		     sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4416		}
4417
4418	      stub_type =
4419		(bfd_link_pic (info) | globals->pic_veneer)
4420		/* PIC stubs.  */
4421		? (r_type == R_ARM_THM_TLS_CALL
4422		   /* TLS PIC stubs.  */
4423		   ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4424		      : arm_stub_long_branch_v4t_thumb_tls_pic)
4425		   : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4426		      /* V5T PIC and above.  */
4427		      ? arm_stub_long_branch_any_arm_pic
4428		      /* V4T PIC stub.  */
4429		      : arm_stub_long_branch_v4t_thumb_arm_pic))
4430
4431		/* non-PIC stubs.  */
4432		: ((globals->use_blx && r_type == R_ARM_THM_CALL)
4433		   /* V5T and above.  */
4434		   ? arm_stub_long_branch_any_any
4435		   /* V4T.  */
4436		   : arm_stub_long_branch_v4t_thumb_arm);
4437
4438	      /* Handle v4t short branches.  */
4439	      if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4440		  && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4441		  && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4442		stub_type = arm_stub_short_branch_v4t_thumb_arm;
4443	    }
4444	}
4445    }
4446  else if (r_type == R_ARM_CALL
4447	   || r_type == R_ARM_JUMP24
4448	   || r_type == R_ARM_PLT32
4449	   || r_type == R_ARM_TLS_CALL)
4450    {
4451      if (input_sec->flags & SEC_ELF_PURECODE)
4452	_bfd_error_handler
4453	  (_("%pB(%pA): warning: long branch veneers used in"
4454	     " section with SHF_ARM_PURECODE section"
4455	     " attribute is only supported for M-profile"
4456	     " targets that implement the movw instruction"),
4457	   input_bfd, input_sec);
4458      if (branch_type == ST_BRANCH_TO_THUMB)
4459	{
4460	  /* Arm to thumb.  */
4461
4462	  if (sym_sec != NULL
4463	      && sym_sec->owner != NULL
4464	      && !INTERWORK_FLAG (sym_sec->owner))
4465	    {
4466	      _bfd_error_handler
4467		(_("%pB(%s): warning: interworking not enabled;"
4468		   " first occurrence: %pB: %s call to %s"),
4469		 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4470	    }
4471
4472	  /* We have an extra 2-bytes reach because of
4473	     the mode change (bit 24 (H) of BLX encoding).  */
4474	  if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4475	      || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4476	      || (r_type == R_ARM_CALL && !globals->use_blx)
4477	      || (r_type == R_ARM_JUMP24)
4478	      || (r_type == R_ARM_PLT32))
4479	    {
4480	      stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4481		/* PIC stubs.  */
4482		? ((globals->use_blx)
4483		   /* V5T and above.  */
4484		   ? arm_stub_long_branch_any_thumb_pic
4485		   /* V4T stub.  */
4486		   : arm_stub_long_branch_v4t_arm_thumb_pic)
4487
4488		/* non-PIC stubs.  */
4489		: ((globals->use_blx)
4490		   /* V5T and above.  */
4491		   ? arm_stub_long_branch_any_any
4492		   /* V4T.  */
4493		   : arm_stub_long_branch_v4t_arm_thumb);
4494	    }
4495	}
4496      else
4497	{
4498	  /* Arm to arm.  */
4499	  if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4500	      || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4501	    {
4502	      stub_type =
4503		(bfd_link_pic (info) | globals->pic_veneer)
4504		/* PIC stubs.  */
4505		? (r_type == R_ARM_TLS_CALL
4506		   /* TLS PIC Stub.  */
4507		   ? arm_stub_long_branch_any_tls_pic
4508		   : (globals->root.target_os == is_nacl
4509		      ? arm_stub_long_branch_arm_nacl_pic
4510		      : arm_stub_long_branch_any_arm_pic))
4511		/* non-PIC stubs.  */
4512		: (globals->root.target_os == is_nacl
4513		   ? arm_stub_long_branch_arm_nacl
4514		   : arm_stub_long_branch_any_any);
4515	    }
4516	}
4517    }
4518
4519  /* If a stub is needed, record the actual destination type.  */
4520  if (stub_type != arm_stub_none)
4521    *actual_branch_type = branch_type;
4522
4523  return stub_type;
4524}
4525
4526/* Build a name for an entry in the stub hash table.  */
4527
4528static char *
4529elf32_arm_stub_name (const asection *input_section,
4530		     const asection *sym_sec,
4531		     const struct elf32_arm_link_hash_entry *hash,
4532		     const Elf_Internal_Rela *rel,
4533		     enum elf32_arm_stub_type stub_type)
4534{
4535  char *stub_name;
4536  bfd_size_type len;
4537
4538  if (hash)
4539    {
4540      len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4541      stub_name = (char *) bfd_malloc (len);
4542      if (stub_name != NULL)
4543	sprintf (stub_name, "%08x_%s+%x_%d",
4544		 input_section->id & 0xffffffff,
4545		 hash->root.root.root.string,
4546		 (int) rel->r_addend & 0xffffffff,
4547		 (int) stub_type);
4548    }
4549  else
4550    {
4551      len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4552      stub_name = (char *) bfd_malloc (len);
4553      if (stub_name != NULL)
4554	sprintf (stub_name, "%08x_%x:%x+%x_%d",
4555		 input_section->id & 0xffffffff,
4556		 sym_sec->id & 0xffffffff,
4557		 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4558		 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4559		 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4560		 (int) rel->r_addend & 0xffffffff,
4561		 (int) stub_type);
4562    }
4563
4564  return stub_name;
4565}
4566
4567/* Look up an entry in the stub hash.  Stub entries are cached because
4568   creating the stub name takes a bit of time.  */
4569
4570static struct elf32_arm_stub_hash_entry *
4571elf32_arm_get_stub_entry (const asection *input_section,
4572			  const asection *sym_sec,
4573			  struct elf_link_hash_entry *hash,
4574			  const Elf_Internal_Rela *rel,
4575			  struct elf32_arm_link_hash_table *htab,
4576			  enum elf32_arm_stub_type stub_type)
4577{
4578  struct elf32_arm_stub_hash_entry *stub_entry;
4579  struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4580  const asection *id_sec;
4581
4582  if ((input_section->flags & SEC_CODE) == 0)
4583    return NULL;
4584
4585  /* If the input section is the CMSE stubs one and it needs a long
4586     branch stub to reach it's final destination, give up with an
4587     error message: this is not supported.  See PR ld/24709.  */
4588  if (!strncmp (input_section->name, CMSE_STUB_NAME, strlen (CMSE_STUB_NAME)))
4589    {
4590      bfd *output_bfd = htab->obfd;
4591      asection *out_sec = bfd_get_section_by_name (output_bfd, CMSE_STUB_NAME);
4592
4593      _bfd_error_handler (_("ERROR: CMSE stub (%s section) too far "
4594			    "(%#" PRIx64 ") from destination (%#" PRIx64 ")"),
4595			  CMSE_STUB_NAME,
4596			  (uint64_t)out_sec->output_section->vma
4597			    + out_sec->output_offset,
4598			  (uint64_t)sym_sec->output_section->vma
4599			    + sym_sec->output_offset
4600			    + h->root.root.u.def.value);
4601      /* Exit, rather than leave incompletely processed
4602	 relocations.  */
4603      xexit (1);
4604    }
4605
4606  /* If this input section is part of a group of sections sharing one
4607     stub section, then use the id of the first section in the group.
4608     Stub names need to include a section id, as there may well be
4609     more than one stub used to reach say, printf, and we need to
4610     distinguish between them.  */
4611  BFD_ASSERT (input_section->id <= htab->top_id);
4612  id_sec = htab->stub_group[input_section->id].link_sec;
4613
4614  if (h != NULL && h->stub_cache != NULL
4615      && h->stub_cache->h == h
4616      && h->stub_cache->id_sec == id_sec
4617      && h->stub_cache->stub_type == stub_type)
4618    {
4619      stub_entry = h->stub_cache;
4620    }
4621  else
4622    {
4623      char *stub_name;
4624
4625      stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4626      if (stub_name == NULL)
4627	return NULL;
4628
4629      stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4630					stub_name, false, false);
4631      if (h != NULL)
4632	h->stub_cache = stub_entry;
4633
4634      free (stub_name);
4635    }
4636
4637  return stub_entry;
4638}
4639
4640/* Whether veneers of type STUB_TYPE require to be in a dedicated output
4641   section.  */
4642
4643static bool
4644arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4645{
4646  if (stub_type >= max_stub_type)
4647    abort ();  /* Should be unreachable.  */
4648
4649  switch (stub_type)
4650    {
4651    case arm_stub_cmse_branch_thumb_only:
4652      return true;
4653
4654    default:
4655      return false;
4656    }
4657
4658  abort ();  /* Should be unreachable.  */
4659}
4660
4661/* Required alignment (as a power of 2) for the dedicated section holding
4662   veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4663   with input sections.  */
4664
4665static int
4666arm_dedicated_stub_output_section_required_alignment
4667  (enum elf32_arm_stub_type stub_type)
4668{
4669  if (stub_type >= max_stub_type)
4670    abort ();  /* Should be unreachable.  */
4671
4672  switch (stub_type)
4673    {
4674    /* Vectors of Secure Gateway veneers must be aligned on 32byte
4675       boundary.  */
4676    case arm_stub_cmse_branch_thumb_only:
4677      return 5;
4678
4679    default:
4680      BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4681      return 0;
4682    }
4683
4684  abort ();  /* Should be unreachable.  */
4685}
4686
4687/* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4688   NULL if veneers of this type are interspersed with input sections.  */
4689
4690static const char *
4691arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4692{
4693  if (stub_type >= max_stub_type)
4694    abort ();  /* Should be unreachable.  */
4695
4696  switch (stub_type)
4697    {
4698    case arm_stub_cmse_branch_thumb_only:
4699      return CMSE_STUB_NAME;
4700
4701    default:
4702      BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4703      return NULL;
4704    }
4705
4706  abort ();  /* Should be unreachable.  */
4707}
4708
4709/* If veneers of type STUB_TYPE should go in a dedicated output section,
4710   returns the address of the hash table field in HTAB holding a pointer to the
4711   corresponding input section.  Otherwise, returns NULL.  */
4712
4713static asection **
4714arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4715				      enum elf32_arm_stub_type stub_type)
4716{
4717  if (stub_type >= max_stub_type)
4718    abort ();  /* Should be unreachable.  */
4719
4720  switch (stub_type)
4721    {
4722    case arm_stub_cmse_branch_thumb_only:
4723      return &htab->cmse_stub_sec;
4724
4725    default:
4726      BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4727      return NULL;
4728    }
4729
4730  abort ();  /* Should be unreachable.  */
4731}
4732
4733/* Find or create a stub section to contain a stub of type STUB_TYPE.  SECTION
4734   is the section that branch into veneer and can be NULL if stub should go in
4735   a dedicated output section.  Returns a pointer to the stub section, and the
4736   section to which the stub section will be attached (in *LINK_SEC_P).
4737   LINK_SEC_P may be NULL.  */
4738
4739static asection *
4740elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4741				   struct elf32_arm_link_hash_table *htab,
4742				   enum elf32_arm_stub_type stub_type)
4743{
4744  asection *link_sec, *out_sec, **stub_sec_p;
4745  const char *stub_sec_prefix;
4746  bool dedicated_output_section =
4747    arm_dedicated_stub_output_section_required (stub_type);
4748  int align;
4749
4750  if (dedicated_output_section)
4751    {
4752      bfd *output_bfd = htab->obfd;
4753      const char *out_sec_name =
4754	arm_dedicated_stub_output_section_name (stub_type);
4755      link_sec = NULL;
4756      stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4757      stub_sec_prefix = out_sec_name;
4758      align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4759      out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4760      if (out_sec == NULL)
4761	{
4762	  _bfd_error_handler (_("no address assigned to the veneers output "
4763				"section %s"), out_sec_name);
4764	  return NULL;
4765	}
4766    }
4767  else
4768    {
4769      BFD_ASSERT (section->id <= htab->top_id);
4770      link_sec = htab->stub_group[section->id].link_sec;
4771      BFD_ASSERT (link_sec != NULL);
4772      stub_sec_p = &htab->stub_group[section->id].stub_sec;
4773      if (*stub_sec_p == NULL)
4774	stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4775      stub_sec_prefix = link_sec->name;
4776      out_sec = link_sec->output_section;
4777      align = htab->root.target_os == is_nacl ? 4 : 3;
4778    }
4779
4780  if (*stub_sec_p == NULL)
4781    {
4782      size_t namelen;
4783      bfd_size_type len;
4784      char *s_name;
4785
4786      namelen = strlen (stub_sec_prefix);
4787      len = namelen + sizeof (STUB_SUFFIX);
4788      s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4789      if (s_name == NULL)
4790	return NULL;
4791
4792      memcpy (s_name, stub_sec_prefix, namelen);
4793      memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4794      *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4795					       align);
4796      if (*stub_sec_p == NULL)
4797	return NULL;
4798
4799      out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4800			| SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4801			| SEC_KEEP;
4802    }
4803
4804  if (!dedicated_output_section)
4805    htab->stub_group[section->id].stub_sec = *stub_sec_p;
4806
4807  if (link_sec_p)
4808    *link_sec_p = link_sec;
4809
4810  return *stub_sec_p;
4811}
4812
4813/* Add a new stub entry to the stub hash.  Not all fields of the new
4814   stub entry are initialised.  */
4815
4816static struct elf32_arm_stub_hash_entry *
4817elf32_arm_add_stub (const char *stub_name, asection *section,
4818		    struct elf32_arm_link_hash_table *htab,
4819		    enum elf32_arm_stub_type stub_type)
4820{
4821  asection *link_sec;
4822  asection *stub_sec;
4823  struct elf32_arm_stub_hash_entry *stub_entry;
4824
4825  stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4826						stub_type);
4827  if (stub_sec == NULL)
4828    return NULL;
4829
4830  /* Enter this entry into the linker stub hash table.  */
4831  stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4832				     true, false);
4833  if (stub_entry == NULL)
4834    {
4835      if (section == NULL)
4836	section = stub_sec;
4837      _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4838			  section->owner, stub_name);
4839      return NULL;
4840    }
4841
4842  stub_entry->stub_sec = stub_sec;
4843  stub_entry->stub_offset = (bfd_vma) -1;
4844  stub_entry->id_sec = link_sec;
4845
4846  return stub_entry;
4847}
4848
4849/* Store an Arm insn into an output section not processed by
4850   elf32_arm_write_section.  */
4851
4852static void
4853put_arm_insn (struct elf32_arm_link_hash_table * htab,
4854	      bfd * output_bfd, bfd_vma val, void * ptr)
4855{
4856  if (htab->byteswap_code != bfd_little_endian (output_bfd))
4857    bfd_putl32 (val, ptr);
4858  else
4859    bfd_putb32 (val, ptr);
4860}
4861
4862/* Store a 16-bit Thumb insn into an output section not processed by
4863   elf32_arm_write_section.  */
4864
4865static void
4866put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4867		bfd * output_bfd, bfd_vma val, void * ptr)
4868{
4869  if (htab->byteswap_code != bfd_little_endian (output_bfd))
4870    bfd_putl16 (val, ptr);
4871  else
4872    bfd_putb16 (val, ptr);
4873}
4874
4875/* Store a Thumb2 insn into an output section not processed by
4876   elf32_arm_write_section.  */
4877
4878static void
4879put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4880		 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4881{
4882  /* T2 instructions are 16-bit streamed.  */
4883  if (htab->byteswap_code != bfd_little_endian (output_bfd))
4884    {
4885      bfd_putl16 ((val >> 16) & 0xffff, ptr);
4886      bfd_putl16 ((val & 0xffff), ptr + 2);
4887    }
4888  else
4889    {
4890      bfd_putb16 ((val >> 16) & 0xffff, ptr);
4891      bfd_putb16 ((val & 0xffff), ptr + 2);
4892    }
4893}
4894
4895/* If it's possible to change R_TYPE to a more efficient access
4896   model, return the new reloc type.  */
4897
4898static unsigned
4899elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4900			  struct elf_link_hash_entry *h)
4901{
4902  int is_local = (h == NULL);
4903
4904  if (bfd_link_dll (info)
4905      || (h && h->root.type == bfd_link_hash_undefweak))
4906    return r_type;
4907
4908  /* We do not support relaxations for Old TLS models.  */
4909  switch (r_type)
4910    {
4911    case R_ARM_TLS_GOTDESC:
4912    case R_ARM_TLS_CALL:
4913    case R_ARM_THM_TLS_CALL:
4914    case R_ARM_TLS_DESCSEQ:
4915    case R_ARM_THM_TLS_DESCSEQ:
4916      return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4917    }
4918
4919  return r_type;
4920}
4921
4922static bfd_reloc_status_type elf32_arm_final_link_relocate
4923  (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4924   Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4925   const char *, unsigned char, enum arm_st_branch_type,
4926   struct elf_link_hash_entry *, bool *, char **);
4927
4928static unsigned int
4929arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4930{
4931  switch (stub_type)
4932    {
4933    case arm_stub_a8_veneer_b_cond:
4934    case arm_stub_a8_veneer_b:
4935    case arm_stub_a8_veneer_bl:
4936      return 2;
4937
4938    case arm_stub_long_branch_any_any:
4939    case arm_stub_long_branch_v4t_arm_thumb:
4940    case arm_stub_long_branch_thumb_only:
4941    case arm_stub_long_branch_thumb2_only:
4942    case arm_stub_long_branch_thumb2_only_pure:
4943    case arm_stub_long_branch_v4t_thumb_thumb:
4944    case arm_stub_long_branch_v4t_thumb_arm:
4945    case arm_stub_short_branch_v4t_thumb_arm:
4946    case arm_stub_long_branch_any_arm_pic:
4947    case arm_stub_long_branch_any_thumb_pic:
4948    case arm_stub_long_branch_v4t_thumb_thumb_pic:
4949    case arm_stub_long_branch_v4t_arm_thumb_pic:
4950    case arm_stub_long_branch_v4t_thumb_arm_pic:
4951    case arm_stub_long_branch_thumb_only_pic:
4952    case arm_stub_long_branch_any_tls_pic:
4953    case arm_stub_long_branch_v4t_thumb_tls_pic:
4954    case arm_stub_cmse_branch_thumb_only:
4955    case arm_stub_a8_veneer_blx:
4956      return 4;
4957
4958    case arm_stub_long_branch_arm_nacl:
4959    case arm_stub_long_branch_arm_nacl_pic:
4960      return 16;
4961
4962    default:
4963      abort ();  /* Should be unreachable.  */
4964    }
4965}
4966
4967/* Returns whether stubs of type STUB_TYPE take over the symbol they are
4968   veneering (TRUE) or have their own symbol (FALSE).  */
4969
4970static bool
4971arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4972{
4973  if (stub_type >= max_stub_type)
4974    abort ();  /* Should be unreachable.  */
4975
4976  switch (stub_type)
4977    {
4978    case arm_stub_cmse_branch_thumb_only:
4979      return true;
4980
4981    default:
4982      return false;
4983    }
4984
4985  abort ();  /* Should be unreachable.  */
4986}
4987
4988/* Returns the padding needed for the dedicated section used stubs of type
4989   STUB_TYPE.  */
4990
4991static int
4992arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4993{
4994  if (stub_type >= max_stub_type)
4995    abort ();  /* Should be unreachable.  */
4996
4997  switch (stub_type)
4998    {
4999    case arm_stub_cmse_branch_thumb_only:
5000      return 32;
5001
5002    default:
5003      return 0;
5004    }
5005
5006  abort ();  /* Should be unreachable.  */
5007}
5008
5009/* If veneers of type STUB_TYPE should go in a dedicated output section,
5010   returns the address of the hash table field in HTAB holding the offset at
5011   which new veneers should be layed out in the stub section.  */
5012
5013static bfd_vma*
5014arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
5015				enum elf32_arm_stub_type stub_type)
5016{
5017  switch (stub_type)
5018    {
5019    case arm_stub_cmse_branch_thumb_only:
5020      return &htab->new_cmse_stub_offset;
5021
5022    default:
5023      BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
5024      return NULL;
5025    }
5026}
5027
5028static bool
5029arm_build_one_stub (struct bfd_hash_entry *gen_entry,
5030		    void * in_arg)
5031{
5032#define MAXRELOCS 3
5033  bool removed_sg_veneer;
5034  struct elf32_arm_stub_hash_entry *stub_entry;
5035  struct elf32_arm_link_hash_table *globals;
5036  struct bfd_link_info *info;
5037  asection *stub_sec;
5038  bfd *stub_bfd;
5039  bfd_byte *loc;
5040  bfd_vma sym_value;
5041  int template_size;
5042  int size;
5043  const insn_sequence *template_sequence;
5044  int i;
5045  int stub_reloc_idx[MAXRELOCS] = {-1, -1};
5046  int stub_reloc_offset[MAXRELOCS] = {0, 0};
5047  int nrelocs = 0;
5048  int just_allocated = 0;
5049
5050  /* Massage our args to the form they really have.  */
5051  stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5052  info = (struct bfd_link_info *) in_arg;
5053
5054  /* Fail if the target section could not be assigned to an output
5055     section.  The user should fix his linker script.  */
5056  if (stub_entry->target_section->output_section == NULL
5057      && info->non_contiguous_regions)
5058    info->callbacks->einfo (_("%F%P: Could not assign '%pA' to an output section. "
5059			      "Retry without --enable-non-contiguous-regions.\n"),
5060			    stub_entry->target_section);
5061
5062  globals = elf32_arm_hash_table (info);
5063  if (globals == NULL)
5064    return false;
5065
5066  stub_sec = stub_entry->stub_sec;
5067
5068  if ((globals->fix_cortex_a8 < 0)
5069      != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
5070    /* We have to do less-strictly-aligned fixes last.  */
5071    return true;
5072
5073  /* Assign a slot at the end of section if none assigned yet.  */
5074  if (stub_entry->stub_offset == (bfd_vma) -1)
5075    {
5076      stub_entry->stub_offset = stub_sec->size;
5077      just_allocated = 1;
5078    }
5079  loc = stub_sec->contents + stub_entry->stub_offset;
5080
5081  stub_bfd = stub_sec->owner;
5082
5083  /* This is the address of the stub destination.  */
5084  sym_value = (stub_entry->target_value
5085	       + stub_entry->target_section->output_offset
5086	       + stub_entry->target_section->output_section->vma);
5087
5088  template_sequence = stub_entry->stub_template;
5089  template_size = stub_entry->stub_template_size;
5090
5091  size = 0;
5092  for (i = 0; i < template_size; i++)
5093    {
5094      switch (template_sequence[i].type)
5095	{
5096	case THUMB16_TYPE:
5097	  {
5098	    bfd_vma data = (bfd_vma) template_sequence[i].data;
5099	    if (template_sequence[i].reloc_addend != 0)
5100	      {
5101		/* We've borrowed the reloc_addend field to mean we should
5102		   insert a condition code into this (Thumb-1 branch)
5103		   instruction.  See THUMB16_BCOND_INSN.  */
5104		BFD_ASSERT ((data & 0xff00) == 0xd000);
5105		data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
5106	      }
5107	    bfd_put_16 (stub_bfd, data, loc + size);
5108	    size += 2;
5109	  }
5110	  break;
5111
5112	case THUMB32_TYPE:
5113	  bfd_put_16 (stub_bfd,
5114		      (template_sequence[i].data >> 16) & 0xffff,
5115		      loc + size);
5116	  bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
5117		      loc + size + 2);
5118	  if (template_sequence[i].r_type != R_ARM_NONE)
5119	    {
5120	      stub_reloc_idx[nrelocs] = i;
5121	      stub_reloc_offset[nrelocs++] = size;
5122	    }
5123	  size += 4;
5124	  break;
5125
5126	case ARM_TYPE:
5127	  bfd_put_32 (stub_bfd, template_sequence[i].data,
5128		      loc + size);
5129	  /* Handle cases where the target is encoded within the
5130	     instruction.  */
5131	  if (template_sequence[i].r_type == R_ARM_JUMP24)
5132	    {
5133	      stub_reloc_idx[nrelocs] = i;
5134	      stub_reloc_offset[nrelocs++] = size;
5135	    }
5136	  size += 4;
5137	  break;
5138
5139	case DATA_TYPE:
5140	  bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
5141	  stub_reloc_idx[nrelocs] = i;
5142	  stub_reloc_offset[nrelocs++] = size;
5143	  size += 4;
5144	  break;
5145
5146	default:
5147	  BFD_FAIL ();
5148	  return false;
5149	}
5150    }
5151
5152  if (just_allocated)
5153    stub_sec->size += size;
5154
5155  /* Stub size has already been computed in arm_size_one_stub. Check
5156     consistency.  */
5157  BFD_ASSERT (size == stub_entry->stub_size);
5158
5159  /* Destination is Thumb. Force bit 0 to 1 to reflect this.  */
5160  if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5161    sym_value |= 1;
5162
5163  /* Assume non empty slots have at least one and at most MAXRELOCS entries
5164     to relocate in each stub.  */
5165  removed_sg_veneer =
5166    (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5167  BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5168
5169  for (i = 0; i < nrelocs; i++)
5170    {
5171      Elf_Internal_Rela rel;
5172      bool unresolved_reloc;
5173      char *error_message;
5174      bfd_vma points_to =
5175	sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5176
5177      rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5178      rel.r_info = ELF32_R_INFO (0,
5179				 template_sequence[stub_reloc_idx[i]].r_type);
5180      rel.r_addend = 0;
5181
5182      if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5183	/* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5184	   template should refer back to the instruction after the original
5185	   branch.  We use target_section as Cortex-A8 erratum workaround stubs
5186	   are only generated when both source and target are in the same
5187	   section.  */
5188	points_to = stub_entry->target_section->output_section->vma
5189		    + stub_entry->target_section->output_offset
5190		    + stub_entry->source_value;
5191
5192      elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5193	  (template_sequence[stub_reloc_idx[i]].r_type),
5194	   stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5195	   points_to, info, stub_entry->target_section, "", STT_FUNC,
5196	   stub_entry->branch_type,
5197	   (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5198	   &error_message);
5199    }
5200
5201  return true;
5202#undef MAXRELOCS
5203}
5204
5205/* Calculate the template, template size and instruction size for a stub.
5206   Return value is the instruction size.  */
5207
5208static unsigned int
5209find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5210			     const insn_sequence **stub_template,
5211			     int *stub_template_size)
5212{
5213  const insn_sequence *template_sequence = NULL;
5214  int template_size = 0, i;
5215  unsigned int size;
5216
5217  template_sequence = stub_definitions[stub_type].template_sequence;
5218  if (stub_template)
5219    *stub_template = template_sequence;
5220
5221  template_size = stub_definitions[stub_type].template_size;
5222  if (stub_template_size)
5223    *stub_template_size = template_size;
5224
5225  size = 0;
5226  for (i = 0; i < template_size; i++)
5227    {
5228      switch (template_sequence[i].type)
5229	{
5230	case THUMB16_TYPE:
5231	  size += 2;
5232	  break;
5233
5234	case ARM_TYPE:
5235	case THUMB32_TYPE:
5236	case DATA_TYPE:
5237	  size += 4;
5238	  break;
5239
5240	default:
5241	  BFD_FAIL ();
5242	  return 0;
5243	}
5244    }
5245
5246  return size;
5247}
5248
5249/* As above, but don't actually build the stub.  Just bump offset so
5250   we know stub section sizes.  */
5251
5252static bool
5253arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5254		   void *in_arg ATTRIBUTE_UNUSED)
5255{
5256  struct elf32_arm_stub_hash_entry *stub_entry;
5257  const insn_sequence *template_sequence;
5258  int template_size, size;
5259
5260  /* Massage our args to the form they really have.  */
5261  stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5262
5263  BFD_ASSERT ((stub_entry->stub_type > arm_stub_none)
5264	      && stub_entry->stub_type < ARRAY_SIZE (stub_definitions));
5265
5266  size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5267				      &template_size);
5268
5269  /* Initialized to -1.  Null size indicates an empty slot full of zeros.  */
5270  if (stub_entry->stub_template_size)
5271    {
5272      stub_entry->stub_size = size;
5273      stub_entry->stub_template = template_sequence;
5274      stub_entry->stub_template_size = template_size;
5275    }
5276
5277  /* Already accounted for.  */
5278  if (stub_entry->stub_offset != (bfd_vma) -1)
5279    return true;
5280
5281  size = (size + 7) & ~7;
5282  stub_entry->stub_sec->size += size;
5283
5284  return true;
5285}
5286
5287/* External entry points for sizing and building linker stubs.  */
5288
5289/* Set up various things so that we can make a list of input sections
5290   for each output section included in the link.  Returns -1 on error,
5291   0 when no stubs will be needed, and 1 on success.  */
5292
5293int
5294elf32_arm_setup_section_lists (bfd *output_bfd,
5295			       struct bfd_link_info *info)
5296{
5297  bfd *input_bfd;
5298  unsigned int bfd_count;
5299  unsigned int top_id, top_index;
5300  asection *section;
5301  asection **input_list, **list;
5302  size_t amt;
5303  struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5304
5305  if (htab == NULL)
5306    return 0;
5307
5308  /* Count the number of input BFDs and find the top input section id.  */
5309  for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5310       input_bfd != NULL;
5311       input_bfd = input_bfd->link.next)
5312    {
5313      bfd_count += 1;
5314      for (section = input_bfd->sections;
5315	   section != NULL;
5316	   section = section->next)
5317	{
5318	  if (top_id < section->id)
5319	    top_id = section->id;
5320	}
5321    }
5322  htab->bfd_count = bfd_count;
5323
5324  amt = sizeof (struct map_stub) * (top_id + 1);
5325  htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5326  if (htab->stub_group == NULL)
5327    return -1;
5328  htab->top_id = top_id;
5329
5330  /* We can't use output_bfd->section_count here to find the top output
5331     section index as some sections may have been removed, and
5332     _bfd_strip_section_from_output doesn't renumber the indices.  */
5333  for (section = output_bfd->sections, top_index = 0;
5334       section != NULL;
5335       section = section->next)
5336    {
5337      if (top_index < section->index)
5338	top_index = section->index;
5339    }
5340
5341  htab->top_index = top_index;
5342  amt = sizeof (asection *) * (top_index + 1);
5343  input_list = (asection **) bfd_malloc (amt);
5344  htab->input_list = input_list;
5345  if (input_list == NULL)
5346    return -1;
5347
5348  /* For sections we aren't interested in, mark their entries with a
5349     value we can check later.  */
5350  list = input_list + top_index;
5351  do
5352    *list = bfd_abs_section_ptr;
5353  while (list-- != input_list);
5354
5355  for (section = output_bfd->sections;
5356       section != NULL;
5357       section = section->next)
5358    {
5359      if ((section->flags & SEC_CODE) != 0)
5360	input_list[section->index] = NULL;
5361    }
5362
5363  return 1;
5364}
5365
5366/* The linker repeatedly calls this function for each input section,
5367   in the order that input sections are linked into output sections.
5368   Build lists of input sections to determine groupings between which
5369   we may insert linker stubs.  */
5370
5371void
5372elf32_arm_next_input_section (struct bfd_link_info *info,
5373			      asection *isec)
5374{
5375  struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5376
5377  if (htab == NULL)
5378    return;
5379
5380  if (isec->output_section->index <= htab->top_index)
5381    {
5382      asection **list = htab->input_list + isec->output_section->index;
5383
5384      if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5385	{
5386	  /* Steal the link_sec pointer for our list.  */
5387#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5388	  /* This happens to make the list in reverse order,
5389	     which we reverse later.  */
5390	  PREV_SEC (isec) = *list;
5391	  *list = isec;
5392	}
5393    }
5394}
5395
5396/* See whether we can group stub sections together.  Grouping stub
5397   sections may result in fewer stubs.  More importantly, we need to
5398   put all .init* and .fini* stubs at the end of the .init or
5399   .fini output sections respectively, because glibc splits the
5400   _init and _fini functions into multiple parts.  Putting a stub in
5401   the middle of a function is not a good idea.  */
5402
5403static void
5404group_sections (struct elf32_arm_link_hash_table *htab,
5405		bfd_size_type stub_group_size,
5406		bool stubs_always_after_branch)
5407{
5408  asection **list = htab->input_list;
5409
5410  do
5411    {
5412      asection *tail = *list;
5413      asection *head;
5414
5415      if (tail == bfd_abs_section_ptr)
5416	continue;
5417
5418      /* Reverse the list: we must avoid placing stubs at the
5419	 beginning of the section because the beginning of the text
5420	 section may be required for an interrupt vector in bare metal
5421	 code.  */
5422#define NEXT_SEC PREV_SEC
5423      head = NULL;
5424      while (tail != NULL)
5425	{
5426	  /* Pop from tail.  */
5427	  asection *item = tail;
5428	  tail = PREV_SEC (item);
5429
5430	  /* Push on head.  */
5431	  NEXT_SEC (item) = head;
5432	  head = item;
5433	}
5434
5435      while (head != NULL)
5436	{
5437	  asection *curr;
5438	  asection *next;
5439	  bfd_vma stub_group_start = head->output_offset;
5440	  bfd_vma end_of_next;
5441
5442	  curr = head;
5443	  while (NEXT_SEC (curr) != NULL)
5444	    {
5445	      next = NEXT_SEC (curr);
5446	      end_of_next = next->output_offset + next->size;
5447	      if (end_of_next - stub_group_start >= stub_group_size)
5448		/* End of NEXT is too far from start, so stop.  */
5449		break;
5450	      /* Add NEXT to the group.  */
5451	      curr = next;
5452	    }
5453
5454	  /* OK, the size from the start to the start of CURR is less
5455	     than stub_group_size and thus can be handled by one stub
5456	     section.  (Or the head section is itself larger than
5457	     stub_group_size, in which case we may be toast.)
5458	     We should really be keeping track of the total size of
5459	     stubs added here, as stubs contribute to the final output
5460	     section size.  */
5461	  do
5462	    {
5463	      next = NEXT_SEC (head);
5464	      /* Set up this stub group.  */
5465	      htab->stub_group[head->id].link_sec = curr;
5466	    }
5467	  while (head != curr && (head = next) != NULL);
5468
5469	  /* But wait, there's more!  Input sections up to stub_group_size
5470	     bytes after the stub section can be handled by it too.  */
5471	  if (!stubs_always_after_branch)
5472	    {
5473	      stub_group_start = curr->output_offset + curr->size;
5474
5475	      while (next != NULL)
5476		{
5477		  end_of_next = next->output_offset + next->size;
5478		  if (end_of_next - stub_group_start >= stub_group_size)
5479		    /* End of NEXT is too far from stubs, so stop.  */
5480		    break;
5481		  /* Add NEXT to the stub group.  */
5482		  head = next;
5483		  next = NEXT_SEC (head);
5484		  htab->stub_group[head->id].link_sec = curr;
5485		}
5486	    }
5487	  head = next;
5488	}
5489    }
5490  while (list++ != htab->input_list + htab->top_index);
5491
5492  free (htab->input_list);
5493#undef PREV_SEC
5494#undef NEXT_SEC
5495}
5496
5497/* Comparison function for sorting/searching relocations relating to Cortex-A8
5498   erratum fix.  */
5499
5500static int
5501a8_reloc_compare (const void *a, const void *b)
5502{
5503  const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5504  const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5505
5506  if (ra->from < rb->from)
5507    return -1;
5508  else if (ra->from > rb->from)
5509    return 1;
5510  else
5511    return 0;
5512}
5513
5514static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5515						    const char *, char **);
5516
5517/* Helper function to scan code for sequences which might trigger the Cortex-A8
5518   branch/TLB erratum.  Fill in the table described by A8_FIXES_P,
5519   NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P.  Returns true if an error occurs, false
5520   otherwise.  */
5521
5522static bool
5523cortex_a8_erratum_scan (bfd *input_bfd,
5524			struct bfd_link_info *info,
5525			struct a8_erratum_fix **a8_fixes_p,
5526			unsigned int *num_a8_fixes_p,
5527			unsigned int *a8_fix_table_size_p,
5528			struct a8_erratum_reloc *a8_relocs,
5529			unsigned int num_a8_relocs,
5530			unsigned prev_num_a8_fixes,
5531			bool *stub_changed_p)
5532{
5533  asection *section;
5534  struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5535  struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5536  unsigned int num_a8_fixes = *num_a8_fixes_p;
5537  unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5538
5539  if (htab == NULL)
5540    return false;
5541
5542  for (section = input_bfd->sections;
5543       section != NULL;
5544       section = section->next)
5545    {
5546      bfd_byte *contents = NULL;
5547      struct _arm_elf_section_data *sec_data;
5548      unsigned int span;
5549      bfd_vma base_vma;
5550
5551      if (elf_section_type (section) != SHT_PROGBITS
5552	  || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5553	  || (section->flags & SEC_EXCLUDE) != 0
5554	  || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5555	  || (section->output_section == bfd_abs_section_ptr))
5556	continue;
5557
5558      base_vma = section->output_section->vma + section->output_offset;
5559
5560      if (elf_section_data (section)->this_hdr.contents != NULL)
5561	contents = elf_section_data (section)->this_hdr.contents;
5562      else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5563	return true;
5564
5565      sec_data = elf32_arm_section_data (section);
5566
5567      for (span = 0; span < sec_data->mapcount; span++)
5568	{
5569	  unsigned int span_start = sec_data->map[span].vma;
5570	  unsigned int span_end = (span == sec_data->mapcount - 1)
5571	    ? section->size : sec_data->map[span + 1].vma;
5572	  unsigned int i;
5573	  char span_type = sec_data->map[span].type;
5574	  bool last_was_32bit = false, last_was_branch = false;
5575
5576	  if (span_type != 't')
5577	    continue;
5578
5579	  /* Span is entirely within a single 4KB region: skip scanning.  */
5580	  if (((base_vma + span_start) & ~0xfff)
5581	      == ((base_vma + span_end) & ~0xfff))
5582	    continue;
5583
5584	  /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5585
5586	       * The opcode is BLX.W, BL.W, B.W, Bcc.W
5587	       * The branch target is in the same 4KB region as the
5588		 first half of the branch.
5589	       * The instruction before the branch is a 32-bit
5590		 length non-branch instruction.  */
5591	  for (i = span_start; i < span_end;)
5592	    {
5593	      unsigned int insn = bfd_getl16 (&contents[i]);
5594	      bool insn_32bit = false, is_blx = false, is_b = false;
5595	      bool is_bl = false, is_bcc = false, is_32bit_branch;
5596
5597	      if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5598		insn_32bit = true;
5599
5600	      if (insn_32bit)
5601		{
5602		  /* Load the rest of the insn (in manual-friendly order).  */
5603		  insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5604
5605		  /* Encoding T4: B<c>.W.  */
5606		  is_b = (insn & 0xf800d000) == 0xf0009000;
5607		  /* Encoding T1: BL<c>.W.  */
5608		  is_bl = (insn & 0xf800d000) == 0xf000d000;
5609		  /* Encoding T2: BLX<c>.W.  */
5610		  is_blx = (insn & 0xf800d000) == 0xf000c000;
5611		  /* Encoding T3: B<c>.W (not permitted in IT block).  */
5612		  is_bcc = (insn & 0xf800d000) == 0xf0008000
5613			   && (insn & 0x07f00000) != 0x03800000;
5614		}
5615
5616	      is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5617
5618	      if (((base_vma + i) & 0xfff) == 0xffe
5619		  && insn_32bit
5620		  && is_32bit_branch
5621		  && last_was_32bit
5622		  && ! last_was_branch)
5623		{
5624		  bfd_signed_vma offset = 0;
5625		  bool force_target_arm = false;
5626		  bool force_target_thumb = false;
5627		  bfd_vma target;
5628		  enum elf32_arm_stub_type stub_type = arm_stub_none;
5629		  struct a8_erratum_reloc key, *found;
5630		  bool use_plt = false;
5631
5632		  key.from = base_vma + i;
5633		  found = (struct a8_erratum_reloc *)
5634		      bsearch (&key, a8_relocs, num_a8_relocs,
5635			       sizeof (struct a8_erratum_reloc),
5636			       &a8_reloc_compare);
5637
5638		  if (found)
5639		    {
5640		      char *error_message = NULL;
5641		      struct elf_link_hash_entry *entry;
5642
5643		      /* We don't care about the error returned from this
5644			 function, only if there is glue or not.  */
5645		      entry = find_thumb_glue (info, found->sym_name,
5646					       &error_message);
5647
5648		      if (entry)
5649			found->non_a8_stub = true;
5650
5651		      /* Keep a simpler condition, for the sake of clarity.  */
5652		      if (htab->root.splt != NULL && found->hash != NULL
5653			  && found->hash->root.plt.offset != (bfd_vma) -1)
5654			use_plt = true;
5655
5656		      if (found->r_type == R_ARM_THM_CALL)
5657			{
5658			  if (found->branch_type == ST_BRANCH_TO_ARM
5659			      || use_plt)
5660			    force_target_arm = true;
5661			  else
5662			    force_target_thumb = true;
5663			}
5664		    }
5665
5666		  /* Check if we have an offending branch instruction.  */
5667
5668		  if (found && found->non_a8_stub)
5669		    /* We've already made a stub for this instruction, e.g.
5670		       it's a long branch or a Thumb->ARM stub.  Assume that
5671		       stub will suffice to work around the A8 erratum (see
5672		       setting of always_after_branch above).  */
5673		    ;
5674		  else if (is_bcc)
5675		    {
5676		      offset = (insn & 0x7ff) << 1;
5677		      offset |= (insn & 0x3f0000) >> 4;
5678		      offset |= (insn & 0x2000) ? 0x40000 : 0;
5679		      offset |= (insn & 0x800) ? 0x80000 : 0;
5680		      offset |= (insn & 0x4000000) ? 0x100000 : 0;
5681		      if (offset & 0x100000)
5682			offset |= ~ ((bfd_signed_vma) 0xfffff);
5683		      stub_type = arm_stub_a8_veneer_b_cond;
5684		    }
5685		  else if (is_b || is_bl || is_blx)
5686		    {
5687		      int s = (insn & 0x4000000) != 0;
5688		      int j1 = (insn & 0x2000) != 0;
5689		      int j2 = (insn & 0x800) != 0;
5690		      int i1 = !(j1 ^ s);
5691		      int i2 = !(j2 ^ s);
5692
5693		      offset = (insn & 0x7ff) << 1;
5694		      offset |= (insn & 0x3ff0000) >> 4;
5695		      offset |= i2 << 22;
5696		      offset |= i1 << 23;
5697		      offset |= s << 24;
5698		      if (offset & 0x1000000)
5699			offset |= ~ ((bfd_signed_vma) 0xffffff);
5700
5701		      if (is_blx)
5702			offset &= ~ ((bfd_signed_vma) 3);
5703
5704		      stub_type = is_blx ? arm_stub_a8_veneer_blx :
5705			is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5706		    }
5707
5708		  if (stub_type != arm_stub_none)
5709		    {
5710		      bfd_vma pc_for_insn = base_vma + i + 4;
5711
5712		      /* The original instruction is a BL, but the target is
5713			 an ARM instruction.  If we were not making a stub,
5714			 the BL would have been converted to a BLX.  Use the
5715			 BLX stub instead in that case.  */
5716		      if (htab->use_blx && force_target_arm
5717			  && stub_type == arm_stub_a8_veneer_bl)
5718			{
5719			  stub_type = arm_stub_a8_veneer_blx;
5720			  is_blx = true;
5721			  is_bl = false;
5722			}
5723		      /* Conversely, if the original instruction was
5724			 BLX but the target is Thumb mode, use the BL
5725			 stub.  */
5726		      else if (force_target_thumb
5727			       && stub_type == arm_stub_a8_veneer_blx)
5728			{
5729			  stub_type = arm_stub_a8_veneer_bl;
5730			  is_blx = false;
5731			  is_bl = true;
5732			}
5733
5734		      if (is_blx)
5735			pc_for_insn &= ~ ((bfd_vma) 3);
5736
5737		      /* If we found a relocation, use the proper destination,
5738			 not the offset in the (unrelocated) instruction.
5739			 Note this is always done if we switched the stub type
5740			 above.  */
5741		      if (found)
5742			offset =
5743			  (bfd_signed_vma) (found->destination - pc_for_insn);
5744
5745		      /* If the stub will use a Thumb-mode branch to a
5746			 PLT target, redirect it to the preceding Thumb
5747			 entry point.  */
5748		      if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5749			offset -= PLT_THUMB_STUB_SIZE;
5750
5751		      target = pc_for_insn + offset;
5752
5753		      /* The BLX stub is ARM-mode code.  Adjust the offset to
5754			 take the different PC value (+8 instead of +4) into
5755			 account.  */
5756		      if (stub_type == arm_stub_a8_veneer_blx)
5757			offset += 4;
5758
5759		      if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5760			{
5761			  char *stub_name = NULL;
5762
5763			  if (num_a8_fixes == a8_fix_table_size)
5764			    {
5765			      a8_fix_table_size *= 2;
5766			      a8_fixes = (struct a8_erratum_fix *)
5767				  bfd_realloc (a8_fixes,
5768					       sizeof (struct a8_erratum_fix)
5769					       * a8_fix_table_size);
5770			    }
5771
5772			  if (num_a8_fixes < prev_num_a8_fixes)
5773			    {
5774			      /* If we're doing a subsequent scan,
5775				 check if we've found the same fix as
5776				 before, and try and reuse the stub
5777				 name.  */
5778			      stub_name = a8_fixes[num_a8_fixes].stub_name;
5779			      if ((a8_fixes[num_a8_fixes].section != section)
5780				  || (a8_fixes[num_a8_fixes].offset != i))
5781				{
5782				  free (stub_name);
5783				  stub_name = NULL;
5784				  *stub_changed_p = true;
5785				}
5786			    }
5787
5788			  if (!stub_name)
5789			    {
5790			      stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5791			      if (stub_name != NULL)
5792				sprintf (stub_name, "%x:%x", section->id, i);
5793			    }
5794
5795			  a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5796			  a8_fixes[num_a8_fixes].section = section;
5797			  a8_fixes[num_a8_fixes].offset = i;
5798			  a8_fixes[num_a8_fixes].target_offset =
5799			    target - base_vma;
5800			  a8_fixes[num_a8_fixes].orig_insn = insn;
5801			  a8_fixes[num_a8_fixes].stub_name = stub_name;
5802			  a8_fixes[num_a8_fixes].stub_type = stub_type;
5803			  a8_fixes[num_a8_fixes].branch_type =
5804			    is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5805
5806			  num_a8_fixes++;
5807			}
5808		    }
5809		}
5810
5811	      i += insn_32bit ? 4 : 2;
5812	      last_was_32bit = insn_32bit;
5813	      last_was_branch = is_32bit_branch;
5814	    }
5815	}
5816
5817      if (elf_section_data (section)->this_hdr.contents == NULL)
5818	free (contents);
5819    }
5820
5821  *a8_fixes_p = a8_fixes;
5822  *num_a8_fixes_p = num_a8_fixes;
5823  *a8_fix_table_size_p = a8_fix_table_size;
5824
5825  return false;
5826}
5827
5828/* Create or update a stub entry depending on whether the stub can already be
5829   found in HTAB.  The stub is identified by:
5830   - its type STUB_TYPE
5831   - its source branch (note that several can share the same stub) whose
5832     section and relocation (if any) are given by SECTION and IRELA
5833     respectively
5834   - its target symbol whose input section, hash, name, value and branch type
5835     are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5836     respectively
5837
5838   If found, the value of the stub's target symbol is updated from SYM_VALUE
5839   and *NEW_STUB is set to FALSE.  Otherwise, *NEW_STUB is set to
5840   TRUE and the stub entry is initialized.
5841
5842   Returns the stub that was created or updated, or NULL if an error
5843   occurred.  */
5844
5845static struct elf32_arm_stub_hash_entry *
5846elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5847		       enum elf32_arm_stub_type stub_type, asection *section,
5848		       Elf_Internal_Rela *irela, asection *sym_sec,
5849		       struct elf32_arm_link_hash_entry *hash, char *sym_name,
5850		       bfd_vma sym_value, enum arm_st_branch_type branch_type,
5851		       bool *new_stub)
5852{
5853  const asection *id_sec;
5854  char *stub_name;
5855  struct elf32_arm_stub_hash_entry *stub_entry;
5856  unsigned int r_type;
5857  bool sym_claimed = arm_stub_sym_claimed (stub_type);
5858
5859  BFD_ASSERT (stub_type != arm_stub_none);
5860  *new_stub = false;
5861
5862  if (sym_claimed)
5863    stub_name = sym_name;
5864  else
5865    {
5866      BFD_ASSERT (irela);
5867      BFD_ASSERT (section);
5868      BFD_ASSERT (section->id <= htab->top_id);
5869
5870      /* Support for grouping stub sections.  */
5871      id_sec = htab->stub_group[section->id].link_sec;
5872
5873      /* Get the name of this stub.  */
5874      stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5875				       stub_type);
5876      if (!stub_name)
5877	return NULL;
5878    }
5879
5880  stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, false,
5881				     false);
5882  /* The proper stub has already been created, just update its value.  */
5883  if (stub_entry != NULL)
5884    {
5885      if (!sym_claimed)
5886	free (stub_name);
5887      stub_entry->target_value = sym_value;
5888      return stub_entry;
5889    }
5890
5891  stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5892  if (stub_entry == NULL)
5893    {
5894      if (!sym_claimed)
5895	free (stub_name);
5896      return NULL;
5897    }
5898
5899  stub_entry->target_value = sym_value;
5900  stub_entry->target_section = sym_sec;
5901  stub_entry->stub_type = stub_type;
5902  stub_entry->h = hash;
5903  stub_entry->branch_type = branch_type;
5904
5905  if (sym_claimed)
5906    stub_entry->output_name = sym_name;
5907  else
5908    {
5909      if (sym_name == NULL)
5910	sym_name = "unnamed";
5911      stub_entry->output_name = (char *)
5912	bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5913				   + strlen (sym_name));
5914      if (stub_entry->output_name == NULL)
5915	{
5916	  free (stub_name);
5917	  return NULL;
5918	}
5919
5920      /* For historical reasons, use the existing names for ARM-to-Thumb and
5921	 Thumb-to-ARM stubs.  */
5922      r_type = ELF32_R_TYPE (irela->r_info);
5923      if ((r_type == (unsigned int) R_ARM_THM_CALL
5924	   || r_type == (unsigned int) R_ARM_THM_JUMP24
5925	   || r_type == (unsigned int) R_ARM_THM_JUMP19)
5926	  && branch_type == ST_BRANCH_TO_ARM)
5927	sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5928      else if ((r_type == (unsigned int) R_ARM_CALL
5929		|| r_type == (unsigned int) R_ARM_JUMP24)
5930	       && branch_type == ST_BRANCH_TO_THUMB)
5931	sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5932      else
5933	sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5934    }
5935
5936  *new_stub = true;
5937  return stub_entry;
5938}
5939
5940/* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5941   gateway veneer to transition from non secure to secure state and create them
5942   accordingly.
5943
5944   "ARMv8-M Security Extensions: Requirements on Development Tools" document
5945   defines the conditions that govern Secure Gateway veneer creation for a
5946   given symbol <SYM> as follows:
5947   - it has function type
5948   - it has non local binding
5949   - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5950     same type, binding and value as <SYM> (called normal symbol).
5951   An entry function can handle secure state transition itself in which case
5952   its special symbol would have a different value from the normal symbol.
5953
5954   OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5955   entry mapping while HTAB gives the name to hash entry mapping.
5956   *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5957   created.
5958
5959   The return value gives whether a stub failed to be allocated.  */
5960
5961static bool
5962cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5963	   obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5964	   int *cmse_stub_created)
5965{
5966  const struct elf_backend_data *bed;
5967  Elf_Internal_Shdr *symtab_hdr;
5968  unsigned i, j, sym_count, ext_start;
5969  Elf_Internal_Sym *cmse_sym, *local_syms;
5970  struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5971  enum arm_st_branch_type branch_type;
5972  char *sym_name, *lsym_name;
5973  bfd_vma sym_value;
5974  asection *section;
5975  struct elf32_arm_stub_hash_entry *stub_entry;
5976  bool is_v8m, new_stub, cmse_invalid, ret = true;
5977
5978  bed = get_elf_backend_data (input_bfd);
5979  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5980  sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5981  ext_start = symtab_hdr->sh_info;
5982  is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5983	    && out_attr[Tag_CPU_arch_profile].i == 'M');
5984
5985  local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5986  if (local_syms == NULL)
5987    local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5988				       symtab_hdr->sh_info, 0, NULL, NULL,
5989				       NULL);
5990  if (symtab_hdr->sh_info && local_syms == NULL)
5991    return false;
5992
5993  /* Scan symbols.  */
5994  for (i = 0; i < sym_count; i++)
5995    {
5996      cmse_invalid = false;
5997
5998      if (i < ext_start)
5999	{
6000	  cmse_sym = &local_syms[i];
6001	  sym_name = bfd_elf_string_from_elf_section (input_bfd,
6002						      symtab_hdr->sh_link,
6003						      cmse_sym->st_name);
6004	  if (!sym_name || !startswith (sym_name, CMSE_PREFIX))
6005	    continue;
6006
6007	  /* Special symbol with local binding.  */
6008	  cmse_invalid = true;
6009	}
6010      else
6011	{
6012	  cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
6013	  sym_name = (char *) cmse_hash->root.root.root.string;
6014	  if (!startswith (sym_name, CMSE_PREFIX))
6015	    continue;
6016
6017	  /* Special symbol has incorrect binding or type.  */
6018	  if ((cmse_hash->root.root.type != bfd_link_hash_defined
6019	       && cmse_hash->root.root.type != bfd_link_hash_defweak)
6020	      || cmse_hash->root.type != STT_FUNC)
6021	    cmse_invalid = true;
6022	}
6023
6024      if (!is_v8m)
6025	{
6026	  _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
6027				"ARMv8-M architecture or later"),
6028			      input_bfd, sym_name);
6029	  is_v8m = true; /* Avoid multiple warning.  */
6030	  ret = false;
6031	}
6032
6033      if (cmse_invalid)
6034	{
6035	  _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
6036				" a global or weak function symbol"),
6037			      input_bfd, sym_name);
6038	  ret = false;
6039	  if (i < ext_start)
6040	    continue;
6041	}
6042
6043      sym_name += strlen (CMSE_PREFIX);
6044      hash = (struct elf32_arm_link_hash_entry *)
6045	elf_link_hash_lookup (&(htab)->root, sym_name, false, false, true);
6046
6047      /* No associated normal symbol or it is neither global nor weak.  */
6048      if (!hash
6049	  || (hash->root.root.type != bfd_link_hash_defined
6050	      && hash->root.root.type != bfd_link_hash_defweak)
6051	  || hash->root.type != STT_FUNC)
6052	{
6053	  /* Initialize here to avoid warning about use of possibly
6054	     uninitialized variable.  */
6055	  j = 0;
6056
6057	  if (!hash)
6058	    {
6059	      /* Searching for a normal symbol with local binding.  */
6060	      for (; j < ext_start; j++)
6061		{
6062		  lsym_name =
6063		    bfd_elf_string_from_elf_section (input_bfd,
6064						     symtab_hdr->sh_link,
6065						     local_syms[j].st_name);
6066		  if (!strcmp (sym_name, lsym_name))
6067		    break;
6068		}
6069	    }
6070
6071	  if (hash || j < ext_start)
6072	    {
6073	      _bfd_error_handler
6074		(_("%pB: invalid standard symbol `%s'; it must be "
6075		   "a global or weak function symbol"),
6076		 input_bfd, sym_name);
6077	    }
6078	  else
6079	    _bfd_error_handler
6080	      (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
6081	  ret = false;
6082	  if (!hash)
6083	    continue;
6084	}
6085
6086      sym_value = hash->root.root.u.def.value;
6087      section = hash->root.root.u.def.section;
6088
6089      if (cmse_hash->root.root.u.def.section != section)
6090	{
6091	  _bfd_error_handler
6092	    (_("%pB: `%s' and its special symbol are in different sections"),
6093	     input_bfd, sym_name);
6094	  ret = false;
6095	}
6096      if (cmse_hash->root.root.u.def.value != sym_value)
6097	continue; /* Ignore: could be an entry function starting with SG.  */
6098
6099	/* If this section is a link-once section that will be discarded, then
6100	   don't create any stubs.  */
6101      if (section->output_section == NULL)
6102	{
6103	  _bfd_error_handler
6104	    (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
6105	  continue;
6106	}
6107
6108      if (hash->root.size == 0)
6109	{
6110	  _bfd_error_handler
6111	    (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
6112	  ret = false;
6113	}
6114
6115      if (!ret)
6116	continue;
6117      branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6118      stub_entry
6119	= elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6120				 NULL, NULL, section, hash, sym_name,
6121				 sym_value, branch_type, &new_stub);
6122
6123      if (stub_entry == NULL)
6124	 ret = false;
6125      else
6126	{
6127	  BFD_ASSERT (new_stub);
6128	  (*cmse_stub_created)++;
6129	}
6130    }
6131
6132  if (!symtab_hdr->contents)
6133    free (local_syms);
6134  return ret;
6135}
6136
6137/* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6138   code entry function, ie can be called from non secure code without using a
6139   veneer.  */
6140
6141static bool
6142cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6143{
6144  bfd_byte contents[4];
6145  uint32_t first_insn;
6146  asection *section;
6147  file_ptr offset;
6148  bfd *abfd;
6149
6150  /* Defined symbol of function type.  */
6151  if (hash->root.root.type != bfd_link_hash_defined
6152      && hash->root.root.type != bfd_link_hash_defweak)
6153    return false;
6154  if (hash->root.type != STT_FUNC)
6155    return false;
6156
6157  /* Read first instruction.  */
6158  section = hash->root.root.u.def.section;
6159  abfd = section->owner;
6160  offset = hash->root.root.u.def.value - section->vma;
6161  if (!bfd_get_section_contents (abfd, section, contents, offset,
6162				 sizeof (contents)))
6163    return false;
6164
6165  first_insn = bfd_get_32 (abfd, contents);
6166
6167  /* Starts by SG instruction.  */
6168  return first_insn == 0xe97fe97f;
6169}
6170
6171/* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6172   secure gateway veneers (ie. the veneers was not in the input import library)
6173   and there is no output import library (GEN_INFO->out_implib_bfd is NULL.  */
6174
6175static bool
6176arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6177{
6178  struct elf32_arm_stub_hash_entry *stub_entry;
6179  struct bfd_link_info *info;
6180
6181  /* Massage our args to the form they really have.  */
6182  stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6183  info = (struct bfd_link_info *) gen_info;
6184
6185  if (info->out_implib_bfd)
6186    return true;
6187
6188  if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6189    return true;
6190
6191  if (stub_entry->stub_offset == (bfd_vma) -1)
6192    _bfd_error_handler ("  %s", stub_entry->output_name);
6193
6194  return true;
6195}
6196
6197/* Set offset of each secure gateway veneers so that its address remain
6198   identical to the one in the input import library referred by
6199   HTAB->in_implib_bfd.  A warning is issued for veneers that disappeared
6200   (present in input import library but absent from the executable being
6201   linked) or if new veneers appeared and there is no output import library
6202   (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6203   number of secure gateway veneers found in the input import library.
6204
6205   The function returns whether an error occurred.  If no error occurred,
6206   *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6207   and this function and HTAB->new_cmse_stub_offset is set to the biggest
6208   veneer observed set for new veneers to be layed out after.  */
6209
6210static bool
6211set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6212				  struct elf32_arm_link_hash_table *htab,
6213				  int *cmse_stub_created)
6214{
6215  long symsize;
6216  char *sym_name;
6217  flagword flags;
6218  long i, symcount;
6219  bfd *in_implib_bfd;
6220  asection *stub_out_sec;
6221  bool ret = true;
6222  Elf_Internal_Sym *intsym;
6223  const char *out_sec_name;
6224  bfd_size_type cmse_stub_size;
6225  asymbol **sympp = NULL, *sym;
6226  struct elf32_arm_link_hash_entry *hash;
6227  const insn_sequence *cmse_stub_template;
6228  struct elf32_arm_stub_hash_entry *stub_entry;
6229  int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6230  bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6231  bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6232
6233  /* No input secure gateway import library.  */
6234  if (!htab->in_implib_bfd)
6235    return true;
6236
6237  in_implib_bfd = htab->in_implib_bfd;
6238  if (!htab->cmse_implib)
6239    {
6240      _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6241			    "Gateway import libraries"), in_implib_bfd);
6242      return false;
6243    }
6244
6245  /* Get symbol table size.  */
6246  symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6247  if (symsize < 0)
6248    return false;
6249
6250  /* Read in the input secure gateway import library's symbol table.  */
6251  sympp = (asymbol **) bfd_malloc (symsize);
6252  if (sympp == NULL)
6253    return false;
6254
6255  symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6256  if (symcount < 0)
6257    {
6258      ret = false;
6259      goto free_sym_buf;
6260    }
6261
6262  htab->new_cmse_stub_offset = 0;
6263  cmse_stub_size =
6264    find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6265				 &cmse_stub_template,
6266				 &cmse_stub_template_size);
6267  out_sec_name =
6268    arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6269  stub_out_sec =
6270    bfd_get_section_by_name (htab->obfd, out_sec_name);
6271  if (stub_out_sec != NULL)
6272    cmse_stub_sec_vma = stub_out_sec->vma;
6273
6274  /* Set addresses of veneers mentionned in input secure gateway import
6275     library's symbol table.  */
6276  for (i = 0; i < symcount; i++)
6277    {
6278      sym = sympp[i];
6279      flags = sym->flags;
6280      sym_name = (char *) bfd_asymbol_name (sym);
6281      intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6282
6283      if (sym->section != bfd_abs_section_ptr
6284	  || !(flags & (BSF_GLOBAL | BSF_WEAK))
6285	  || (flags & BSF_FUNCTION) != BSF_FUNCTION
6286	  || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6287	      != ST_BRANCH_TO_THUMB))
6288	{
6289	  _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6290				"symbol should be absolute, global and "
6291				"refer to Thumb functions"),
6292			      in_implib_bfd, sym_name);
6293	  ret = false;
6294	  continue;
6295	}
6296
6297      veneer_value = bfd_asymbol_value (sym);
6298      stub_offset = veneer_value - cmse_stub_sec_vma;
6299      stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6300					 false, false);
6301      hash = (struct elf32_arm_link_hash_entry *)
6302	elf_link_hash_lookup (&(htab)->root, sym_name, false, false, true);
6303
6304      /* Stub entry should have been created by cmse_scan or the symbol be of
6305	 a secure function callable from non secure code.  */
6306      if (!stub_entry && !hash)
6307	{
6308	  bool new_stub;
6309
6310	  _bfd_error_handler
6311	    (_("entry function `%s' disappeared from secure code"), sym_name);
6312	  hash = (struct elf32_arm_link_hash_entry *)
6313	    elf_link_hash_lookup (&(htab)->root, sym_name, true, true, true);
6314	  stub_entry
6315	    = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6316				     NULL, NULL, bfd_abs_section_ptr, hash,
6317				     sym_name, veneer_value,
6318				     ST_BRANCH_TO_THUMB, &new_stub);
6319	  if (stub_entry == NULL)
6320	    ret = false;
6321	  else
6322	  {
6323	    BFD_ASSERT (new_stub);
6324	    new_cmse_stubs_created++;
6325	    (*cmse_stub_created)++;
6326	  }
6327	  stub_entry->stub_template_size = stub_entry->stub_size = 0;
6328	  stub_entry->stub_offset = stub_offset;
6329	}
6330      /* Symbol found is not callable from non secure code.  */
6331      else if (!stub_entry)
6332	{
6333	  if (!cmse_entry_fct_p (hash))
6334	    {
6335	      _bfd_error_handler (_("`%s' refers to a non entry function"),
6336				  sym_name);
6337	      ret = false;
6338	    }
6339	  continue;
6340	}
6341      else
6342	{
6343	  /* Only stubs for SG veneers should have been created.  */
6344	  BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6345
6346	  /* Check visibility hasn't changed.  */
6347	  if (!!(flags & BSF_GLOBAL)
6348	      != (hash->root.root.type == bfd_link_hash_defined))
6349	    _bfd_error_handler
6350	      (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6351	       sym_name);
6352
6353	  stub_entry->stub_offset = stub_offset;
6354	}
6355
6356      /* Size should match that of a SG veneer.  */
6357      if (intsym->st_size != cmse_stub_size)
6358	{
6359	  _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6360			      in_implib_bfd, sym_name);
6361	  ret = false;
6362	}
6363
6364      /* Previous veneer address is before current SG veneer section.  */
6365      if (veneer_value < cmse_stub_sec_vma)
6366	{
6367	  /* Avoid offset underflow.  */
6368	  if (stub_entry)
6369	    stub_entry->stub_offset = 0;
6370	  stub_offset = 0;
6371	  ret = false;
6372	}
6373
6374      /* Complain if stub offset not a multiple of stub size.  */
6375      if (stub_offset % cmse_stub_size)
6376	{
6377	  _bfd_error_handler
6378	    (_("offset of veneer for entry function `%s' not a multiple of "
6379	       "its size"), sym_name);
6380	  ret = false;
6381	}
6382
6383      if (!ret)
6384	continue;
6385
6386      new_cmse_stubs_created--;
6387      if (veneer_value < cmse_stub_array_start)
6388	cmse_stub_array_start = veneer_value;
6389      next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6390      if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6391	htab->new_cmse_stub_offset = next_cmse_stub_offset;
6392    }
6393
6394  if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6395    {
6396      BFD_ASSERT (new_cmse_stubs_created > 0);
6397      _bfd_error_handler
6398	(_("new entry function(s) introduced but no output import library "
6399	   "specified:"));
6400      bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6401    }
6402
6403  if (cmse_stub_array_start != cmse_stub_sec_vma)
6404    {
6405      _bfd_error_handler
6406	(_("start address of `%s' is different from previous link"),
6407	 out_sec_name);
6408      ret = false;
6409    }
6410
6411 free_sym_buf:
6412  free (sympp);
6413  return ret;
6414}
6415
6416/* Determine and set the size of the stub section for a final link.
6417
6418   The basic idea here is to examine all the relocations looking for
6419   PC-relative calls to a target that is unreachable with a "bl"
6420   instruction.  */
6421
6422bool
6423elf32_arm_size_stubs (bfd *output_bfd,
6424		      bfd *stub_bfd,
6425		      struct bfd_link_info *info,
6426		      bfd_signed_vma group_size,
6427		      asection * (*add_stub_section) (const char *, asection *,
6428						      asection *,
6429						      unsigned int),
6430		      void (*layout_sections_again) (void))
6431{
6432  bool ret = true;
6433  obj_attribute *out_attr;
6434  int cmse_stub_created = 0;
6435  bfd_size_type stub_group_size;
6436  bool m_profile, stubs_always_after_branch, first_veneer_scan = true;
6437  struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6438  struct a8_erratum_fix *a8_fixes = NULL;
6439  unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6440  struct a8_erratum_reloc *a8_relocs = NULL;
6441  unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6442
6443  if (htab == NULL)
6444    return false;
6445
6446  if (htab->fix_cortex_a8)
6447    {
6448      a8_fixes = (struct a8_erratum_fix *)
6449	  bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6450      a8_relocs = (struct a8_erratum_reloc *)
6451	  bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6452    }
6453
6454  /* Propagate mach to stub bfd, because it may not have been
6455     finalized when we created stub_bfd.  */
6456  bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6457		     bfd_get_mach (output_bfd));
6458
6459  /* Stash our params away.  */
6460  htab->stub_bfd = stub_bfd;
6461  htab->add_stub_section = add_stub_section;
6462  htab->layout_sections_again = layout_sections_again;
6463  stubs_always_after_branch = group_size < 0;
6464
6465  out_attr = elf_known_obj_attributes_proc (output_bfd);
6466  m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6467
6468  /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6469     as the first half of a 32-bit branch straddling two 4K pages.  This is a
6470     crude way of enforcing that.  */
6471  if (htab->fix_cortex_a8)
6472    stubs_always_after_branch = 1;
6473
6474  if (group_size < 0)
6475    stub_group_size = -group_size;
6476  else
6477    stub_group_size = group_size;
6478
6479  if (stub_group_size == 1)
6480    {
6481      /* Default values.  */
6482      /* Thumb branch range is +-4MB has to be used as the default
6483	 maximum size (a given section can contain both ARM and Thumb
6484	 code, so the worst case has to be taken into account).
6485
6486	 This value is 24K less than that, which allows for 2025
6487	 12-byte stubs.  If we exceed that, then we will fail to link.
6488	 The user will have to relink with an explicit group size
6489	 option.  */
6490      stub_group_size = 4170000;
6491    }
6492
6493  group_sections (htab, stub_group_size, stubs_always_after_branch);
6494
6495  /* If we're applying the cortex A8 fix, we need to determine the
6496     program header size now, because we cannot change it later --
6497     that could alter section placements.  Notice the A8 erratum fix
6498     ends up requiring the section addresses to remain unchanged
6499     modulo the page size.  That's something we cannot represent
6500     inside BFD, and we don't want to force the section alignment to
6501     be the page size.  */
6502  if (htab->fix_cortex_a8)
6503    (*htab->layout_sections_again) ();
6504
6505  while (1)
6506    {
6507      bfd *input_bfd;
6508      unsigned int bfd_indx;
6509      asection *stub_sec;
6510      enum elf32_arm_stub_type stub_type;
6511      bool stub_changed = false;
6512      unsigned prev_num_a8_fixes = num_a8_fixes;
6513
6514      num_a8_fixes = 0;
6515      for (input_bfd = info->input_bfds, bfd_indx = 0;
6516	   input_bfd != NULL;
6517	   input_bfd = input_bfd->link.next, bfd_indx++)
6518	{
6519	  Elf_Internal_Shdr *symtab_hdr;
6520	  asection *section;
6521	  Elf_Internal_Sym *local_syms = NULL;
6522
6523	  if (!is_arm_elf (input_bfd))
6524	    continue;
6525	  if ((input_bfd->flags & DYNAMIC) != 0
6526	      && (elf_sym_hashes (input_bfd) == NULL
6527		  || (elf_dyn_lib_class (input_bfd) & DYN_AS_NEEDED) != 0))
6528	    continue;
6529
6530	  num_a8_relocs = 0;
6531
6532	  /* We'll need the symbol table in a second.  */
6533	  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6534	  if (symtab_hdr->sh_info == 0)
6535	    continue;
6536
6537	  /* Limit scan of symbols to object file whose profile is
6538	     Microcontroller to not hinder performance in the general case.  */
6539	  if (m_profile && first_veneer_scan)
6540	    {
6541	      struct elf_link_hash_entry **sym_hashes;
6542
6543	      sym_hashes = elf_sym_hashes (input_bfd);
6544	      if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6545			      &cmse_stub_created))
6546		goto error_ret_free_local;
6547
6548	      if (cmse_stub_created != 0)
6549		stub_changed = true;
6550	    }
6551
6552	  /* Walk over each section attached to the input bfd.  */
6553	  for (section = input_bfd->sections;
6554	       section != NULL;
6555	       section = section->next)
6556	    {
6557	      Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6558
6559	      /* If there aren't any relocs, then there's nothing more
6560		 to do.  */
6561	      if ((section->flags & SEC_RELOC) == 0
6562		  || section->reloc_count == 0
6563		  || (section->flags & SEC_CODE) == 0)
6564		continue;
6565
6566	      /* If this section is a link-once section that will be
6567		 discarded, then don't create any stubs.  */
6568	      if (section->output_section == NULL
6569		  || section->output_section->owner != output_bfd)
6570		continue;
6571
6572	      /* Get the relocs.  */
6573	      internal_relocs
6574		= _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6575					     NULL, info->keep_memory);
6576	      if (internal_relocs == NULL)
6577		goto error_ret_free_local;
6578
6579	      /* Now examine each relocation.  */
6580	      irela = internal_relocs;
6581	      irelaend = irela + section->reloc_count;
6582	      for (; irela < irelaend; irela++)
6583		{
6584		  unsigned int r_type, r_indx;
6585		  asection *sym_sec;
6586		  bfd_vma sym_value;
6587		  bfd_vma destination;
6588		  struct elf32_arm_link_hash_entry *hash;
6589		  const char *sym_name;
6590		  unsigned char st_type;
6591		  enum arm_st_branch_type branch_type;
6592		  bool created_stub = false;
6593
6594		  r_type = ELF32_R_TYPE (irela->r_info);
6595		  r_indx = ELF32_R_SYM (irela->r_info);
6596
6597		  if (r_type >= (unsigned int) R_ARM_max)
6598		    {
6599		      bfd_set_error (bfd_error_bad_value);
6600		    error_ret_free_internal:
6601		      if (elf_section_data (section)->relocs == NULL)
6602			free (internal_relocs);
6603		    /* Fall through.  */
6604		    error_ret_free_local:
6605		      if (symtab_hdr->contents != (unsigned char *) local_syms)
6606			free (local_syms);
6607		      return false;
6608		    }
6609
6610		  hash = NULL;
6611		  if (r_indx >= symtab_hdr->sh_info)
6612		    hash = elf32_arm_hash_entry
6613		      (elf_sym_hashes (input_bfd)
6614		       [r_indx - symtab_hdr->sh_info]);
6615
6616		  /* Only look for stubs on branch instructions, or
6617		     non-relaxed TLSCALL  */
6618		  if ((r_type != (unsigned int) R_ARM_CALL)
6619		      && (r_type != (unsigned int) R_ARM_THM_CALL)
6620		      && (r_type != (unsigned int) R_ARM_JUMP24)
6621		      && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6622		      && (r_type != (unsigned int) R_ARM_THM_XPC22)
6623		      && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6624		      && (r_type != (unsigned int) R_ARM_PLT32)
6625		      && !((r_type == (unsigned int) R_ARM_TLS_CALL
6626			    || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6627			   && r_type == (elf32_arm_tls_transition
6628					 (info, r_type,
6629					  (struct elf_link_hash_entry *) hash))
6630			   && ((hash ? hash->tls_type
6631				: (elf32_arm_local_got_tls_type
6632				   (input_bfd)[r_indx]))
6633			       & GOT_TLS_GDESC) != 0))
6634		    continue;
6635
6636		  /* Now determine the call target, its name, value,
6637		     section.  */
6638		  sym_sec = NULL;
6639		  sym_value = 0;
6640		  destination = 0;
6641		  sym_name = NULL;
6642
6643		  if (r_type == (unsigned int) R_ARM_TLS_CALL
6644		      || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6645		    {
6646		      /* A non-relaxed TLS call.  The target is the
6647			 plt-resident trampoline and nothing to do
6648			 with the symbol.  */
6649		      BFD_ASSERT (htab->tls_trampoline > 0);
6650		      sym_sec = htab->root.splt;
6651		      sym_value = htab->tls_trampoline;
6652		      hash = 0;
6653		      st_type = STT_FUNC;
6654		      branch_type = ST_BRANCH_TO_ARM;
6655		    }
6656		  else if (!hash)
6657		    {
6658		      /* It's a local symbol.  */
6659		      Elf_Internal_Sym *sym;
6660
6661		      if (local_syms == NULL)
6662			{
6663			  local_syms
6664			    = (Elf_Internal_Sym *) symtab_hdr->contents;
6665			  if (local_syms == NULL)
6666			    local_syms
6667			      = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6668						      symtab_hdr->sh_info, 0,
6669						      NULL, NULL, NULL);
6670			  if (local_syms == NULL)
6671			    goto error_ret_free_internal;
6672			}
6673
6674		      sym = local_syms + r_indx;
6675		      if (sym->st_shndx == SHN_UNDEF)
6676			sym_sec = bfd_und_section_ptr;
6677		      else if (sym->st_shndx == SHN_ABS)
6678			sym_sec = bfd_abs_section_ptr;
6679		      else if (sym->st_shndx == SHN_COMMON)
6680			sym_sec = bfd_com_section_ptr;
6681		      else
6682			sym_sec =
6683			  bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6684
6685		      if (!sym_sec)
6686			/* This is an undefined symbol.  It can never
6687			   be resolved.  */
6688			continue;
6689
6690		      if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6691			sym_value = sym->st_value;
6692		      destination = (sym_value + irela->r_addend
6693				     + sym_sec->output_offset
6694				     + sym_sec->output_section->vma);
6695		      st_type = ELF_ST_TYPE (sym->st_info);
6696		      branch_type =
6697			ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6698		      sym_name
6699			= bfd_elf_string_from_elf_section (input_bfd,
6700							   symtab_hdr->sh_link,
6701							   sym->st_name);
6702		    }
6703		  else
6704		    {
6705		      /* It's an external symbol.  */
6706		      while (hash->root.root.type == bfd_link_hash_indirect
6707			     || hash->root.root.type == bfd_link_hash_warning)
6708			hash = ((struct elf32_arm_link_hash_entry *)
6709				hash->root.root.u.i.link);
6710
6711		      if (hash->root.root.type == bfd_link_hash_defined
6712			  || hash->root.root.type == bfd_link_hash_defweak)
6713			{
6714			  sym_sec = hash->root.root.u.def.section;
6715			  sym_value = hash->root.root.u.def.value;
6716
6717			  struct elf32_arm_link_hash_table *globals =
6718						  elf32_arm_hash_table (info);
6719
6720			  /* For a destination in a shared library,
6721			     use the PLT stub as target address to
6722			     decide whether a branch stub is
6723			     needed.  */
6724			  if (globals != NULL
6725			      && globals->root.splt != NULL
6726			      && hash != NULL
6727			      && hash->root.plt.offset != (bfd_vma) -1)
6728			    {
6729			      sym_sec = globals->root.splt;
6730			      sym_value = hash->root.plt.offset;
6731			      if (sym_sec->output_section != NULL)
6732				destination = (sym_value
6733					       + sym_sec->output_offset
6734					       + sym_sec->output_section->vma);
6735			    }
6736			  else if (sym_sec->output_section != NULL)
6737			    destination = (sym_value + irela->r_addend
6738					   + sym_sec->output_offset
6739					   + sym_sec->output_section->vma);
6740			}
6741		      else if ((hash->root.root.type == bfd_link_hash_undefined)
6742			       || (hash->root.root.type == bfd_link_hash_undefweak))
6743			{
6744			  /* For a shared library, use the PLT stub as
6745			     target address to decide whether a long
6746			     branch stub is needed.
6747			     For absolute code, they cannot be handled.  */
6748			  struct elf32_arm_link_hash_table *globals =
6749			    elf32_arm_hash_table (info);
6750
6751			  if (globals != NULL
6752			      && globals->root.splt != NULL
6753			      && hash != NULL
6754			      && hash->root.plt.offset != (bfd_vma) -1)
6755			    {
6756			      sym_sec = globals->root.splt;
6757			      sym_value = hash->root.plt.offset;
6758			      if (sym_sec->output_section != NULL)
6759				destination = (sym_value
6760					       + sym_sec->output_offset
6761					       + sym_sec->output_section->vma);
6762			    }
6763			  else
6764			    continue;
6765			}
6766		      else
6767			{
6768			  bfd_set_error (bfd_error_bad_value);
6769			  goto error_ret_free_internal;
6770			}
6771		      st_type = hash->root.type;
6772		      branch_type =
6773			ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6774		      sym_name = hash->root.root.root.string;
6775		    }
6776
6777		  do
6778		    {
6779		      bool new_stub;
6780		      struct elf32_arm_stub_hash_entry *stub_entry;
6781
6782		      /* Determine what (if any) linker stub is needed.  */
6783		      stub_type = arm_type_of_stub (info, section, irela,
6784						    st_type, &branch_type,
6785						    hash, destination, sym_sec,
6786						    input_bfd, sym_name);
6787		      if (stub_type == arm_stub_none)
6788			break;
6789
6790		      /* We've either created a stub for this reloc already,
6791			 or we are about to.  */
6792		      stub_entry =
6793			elf32_arm_create_stub (htab, stub_type, section, irela,
6794					       sym_sec, hash,
6795					       (char *) sym_name, sym_value,
6796					       branch_type, &new_stub);
6797
6798		      created_stub = stub_entry != NULL;
6799		      if (!created_stub)
6800			goto error_ret_free_internal;
6801		      else if (!new_stub)
6802			break;
6803		      else
6804			stub_changed = true;
6805		    }
6806		  while (0);
6807
6808		  /* Look for relocations which might trigger Cortex-A8
6809		     erratum.  */
6810		  if (htab->fix_cortex_a8
6811		      && (r_type == (unsigned int) R_ARM_THM_JUMP24
6812			  || r_type == (unsigned int) R_ARM_THM_JUMP19
6813			  || r_type == (unsigned int) R_ARM_THM_CALL
6814			  || r_type == (unsigned int) R_ARM_THM_XPC22))
6815		    {
6816		      bfd_vma from = section->output_section->vma
6817				     + section->output_offset
6818				     + irela->r_offset;
6819
6820		      if ((from & 0xfff) == 0xffe)
6821			{
6822			  /* Found a candidate.  Note we haven't checked the
6823			     destination is within 4K here: if we do so (and
6824			     don't create an entry in a8_relocs) we can't tell
6825			     that a branch should have been relocated when
6826			     scanning later.  */
6827			  if (num_a8_relocs == a8_reloc_table_size)
6828			    {
6829			      a8_reloc_table_size *= 2;
6830			      a8_relocs = (struct a8_erratum_reloc *)
6831				  bfd_realloc (a8_relocs,
6832					       sizeof (struct a8_erratum_reloc)
6833					       * a8_reloc_table_size);
6834			    }
6835
6836			  a8_relocs[num_a8_relocs].from = from;
6837			  a8_relocs[num_a8_relocs].destination = destination;
6838			  a8_relocs[num_a8_relocs].r_type = r_type;
6839			  a8_relocs[num_a8_relocs].branch_type = branch_type;
6840			  a8_relocs[num_a8_relocs].sym_name = sym_name;
6841			  a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6842			  a8_relocs[num_a8_relocs].hash = hash;
6843
6844			  num_a8_relocs++;
6845			}
6846		    }
6847		}
6848
6849	      /* We're done with the internal relocs, free them.  */
6850	      if (elf_section_data (section)->relocs == NULL)
6851		free (internal_relocs);
6852	    }
6853
6854	  if (htab->fix_cortex_a8)
6855	    {
6856	      /* Sort relocs which might apply to Cortex-A8 erratum.  */
6857	      qsort (a8_relocs, num_a8_relocs,
6858		     sizeof (struct a8_erratum_reloc),
6859		     &a8_reloc_compare);
6860
6861	      /* Scan for branches which might trigger Cortex-A8 erratum.  */
6862	      if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6863					  &num_a8_fixes, &a8_fix_table_size,
6864					  a8_relocs, num_a8_relocs,
6865					  prev_num_a8_fixes, &stub_changed)
6866		  != 0)
6867		goto error_ret_free_local;
6868	    }
6869
6870	  if (local_syms != NULL
6871	      && symtab_hdr->contents != (unsigned char *) local_syms)
6872	    {
6873	      if (!info->keep_memory)
6874		free (local_syms);
6875	      else
6876		symtab_hdr->contents = (unsigned char *) local_syms;
6877	    }
6878	}
6879
6880      if (first_veneer_scan
6881	  && !set_cmse_veneer_addr_from_implib (info, htab,
6882						&cmse_stub_created))
6883	ret = false;
6884
6885      if (prev_num_a8_fixes != num_a8_fixes)
6886	stub_changed = true;
6887
6888      if (!stub_changed)
6889	break;
6890
6891      /* OK, we've added some stubs.  Find out the new size of the
6892	 stub sections.  */
6893      for (stub_sec = htab->stub_bfd->sections;
6894	   stub_sec != NULL;
6895	   stub_sec = stub_sec->next)
6896	{
6897	  /* Ignore non-stub sections.  */
6898	  if (!strstr (stub_sec->name, STUB_SUFFIX))
6899	    continue;
6900
6901	  stub_sec->size = 0;
6902	}
6903
6904      /* Add new SG veneers after those already in the input import
6905	 library.  */
6906      for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6907	   stub_type++)
6908	{
6909	  bfd_vma *start_offset_p;
6910	  asection **stub_sec_p;
6911
6912	  start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6913	  stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6914	  if (start_offset_p == NULL)
6915	    continue;
6916
6917	  BFD_ASSERT (stub_sec_p != NULL);
6918	  if (*stub_sec_p != NULL)
6919	    (*stub_sec_p)->size = *start_offset_p;
6920	}
6921
6922      /* Compute stub section size, considering padding.  */
6923      bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6924      for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6925	   stub_type++)
6926	{
6927	  int size, padding;
6928	  asection **stub_sec_p;
6929
6930	  padding = arm_dedicated_stub_section_padding (stub_type);
6931	  stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6932	  /* Skip if no stub input section or no stub section padding
6933	     required.  */
6934	  if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6935	    continue;
6936	  /* Stub section padding required but no dedicated section.  */
6937	  BFD_ASSERT (stub_sec_p);
6938
6939	  size = (*stub_sec_p)->size;
6940	  size = (size + padding - 1) & ~(padding - 1);
6941	  (*stub_sec_p)->size = size;
6942	}
6943
6944      /* Add Cortex-A8 erratum veneers to stub section sizes too.  */
6945      if (htab->fix_cortex_a8)
6946	for (i = 0; i < num_a8_fixes; i++)
6947	  {
6948	    stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6949			 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6950
6951	    if (stub_sec == NULL)
6952	      return false;
6953
6954	    stub_sec->size
6955	      += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6956					      NULL);
6957	  }
6958
6959
6960      /* Ask the linker to do its stuff.  */
6961      (*htab->layout_sections_again) ();
6962      first_veneer_scan = false;
6963    }
6964
6965  /* Add stubs for Cortex-A8 erratum fixes now.  */
6966  if (htab->fix_cortex_a8)
6967    {
6968      for (i = 0; i < num_a8_fixes; i++)
6969	{
6970	  struct elf32_arm_stub_hash_entry *stub_entry;
6971	  char *stub_name = a8_fixes[i].stub_name;
6972	  asection *section = a8_fixes[i].section;
6973	  unsigned int section_id = a8_fixes[i].section->id;
6974	  asection *link_sec = htab->stub_group[section_id].link_sec;
6975	  asection *stub_sec = htab->stub_group[section_id].stub_sec;
6976	  const insn_sequence *template_sequence;
6977	  int template_size, size = 0;
6978
6979	  stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6980					     true, false);
6981	  if (stub_entry == NULL)
6982	    {
6983	      _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6984				  section->owner, stub_name);
6985	      return false;
6986	    }
6987
6988	  stub_entry->stub_sec = stub_sec;
6989	  stub_entry->stub_offset = (bfd_vma) -1;
6990	  stub_entry->id_sec = link_sec;
6991	  stub_entry->stub_type = a8_fixes[i].stub_type;
6992	  stub_entry->source_value = a8_fixes[i].offset;
6993	  stub_entry->target_section = a8_fixes[i].section;
6994	  stub_entry->target_value = a8_fixes[i].target_offset;
6995	  stub_entry->orig_insn = a8_fixes[i].orig_insn;
6996	  stub_entry->branch_type = a8_fixes[i].branch_type;
6997
6998	  size = find_stub_size_and_template (a8_fixes[i].stub_type,
6999					      &template_sequence,
7000					      &template_size);
7001
7002	  stub_entry->stub_size = size;
7003	  stub_entry->stub_template = template_sequence;
7004	  stub_entry->stub_template_size = template_size;
7005	}
7006
7007      /* Stash the Cortex-A8 erratum fix array for use later in
7008	 elf32_arm_write_section().  */
7009      htab->a8_erratum_fixes = a8_fixes;
7010      htab->num_a8_erratum_fixes = num_a8_fixes;
7011    }
7012  else
7013    {
7014      htab->a8_erratum_fixes = NULL;
7015      htab->num_a8_erratum_fixes = 0;
7016    }
7017  return ret;
7018}
7019
7020/* Build all the stubs associated with the current output file.  The
7021   stubs are kept in a hash table attached to the main linker hash
7022   table.  We also set up the .plt entries for statically linked PIC
7023   functions here.  This function is called via arm_elf_finish in the
7024   linker.  */
7025
7026bool
7027elf32_arm_build_stubs (struct bfd_link_info *info)
7028{
7029  asection *stub_sec;
7030  struct bfd_hash_table *table;
7031  enum elf32_arm_stub_type stub_type;
7032  struct elf32_arm_link_hash_table *htab;
7033
7034  htab = elf32_arm_hash_table (info);
7035  if (htab == NULL)
7036    return false;
7037
7038  for (stub_sec = htab->stub_bfd->sections;
7039       stub_sec != NULL;
7040       stub_sec = stub_sec->next)
7041    {
7042      bfd_size_type size;
7043
7044      /* Ignore non-stub sections.  */
7045      if (!strstr (stub_sec->name, STUB_SUFFIX))
7046	continue;
7047
7048      /* Allocate memory to hold the linker stubs.  Zeroing the stub sections
7049	 must at least be done for stub section requiring padding and for SG
7050	 veneers to ensure that a non secure code branching to a removed SG
7051	 veneer causes an error.  */
7052      size = stub_sec->size;
7053      stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
7054      if (stub_sec->contents == NULL && size != 0)
7055	return false;
7056
7057      stub_sec->size = 0;
7058    }
7059
7060  /* Add new SG veneers after those already in the input import library.  */
7061  for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7062    {
7063      bfd_vma *start_offset_p;
7064      asection **stub_sec_p;
7065
7066      start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
7067      stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
7068      if (start_offset_p == NULL)
7069	continue;
7070
7071      BFD_ASSERT (stub_sec_p != NULL);
7072      if (*stub_sec_p != NULL)
7073	(*stub_sec_p)->size = *start_offset_p;
7074    }
7075
7076  /* Build the stubs as directed by the stub hash table.  */
7077  table = &htab->stub_hash_table;
7078  bfd_hash_traverse (table, arm_build_one_stub, info);
7079  if (htab->fix_cortex_a8)
7080    {
7081      /* Place the cortex a8 stubs last.  */
7082      htab->fix_cortex_a8 = -1;
7083      bfd_hash_traverse (table, arm_build_one_stub, info);
7084    }
7085
7086  return true;
7087}
7088
7089/* Locate the Thumb encoded calling stub for NAME.  */
7090
7091static struct elf_link_hash_entry *
7092find_thumb_glue (struct bfd_link_info *link_info,
7093		 const char *name,
7094		 char **error_message)
7095{
7096  char *tmp_name;
7097  struct elf_link_hash_entry *hash;
7098  struct elf32_arm_link_hash_table *hash_table;
7099
7100  /* We need a pointer to the armelf specific hash table.  */
7101  hash_table = elf32_arm_hash_table (link_info);
7102  if (hash_table == NULL)
7103    return NULL;
7104
7105  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7106				  + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
7107
7108  BFD_ASSERT (tmp_name);
7109
7110  sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
7111
7112  hash = elf_link_hash_lookup
7113    (&(hash_table)->root, tmp_name, false, false, true);
7114
7115  if (hash == NULL
7116      && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7117		   "Thumb", tmp_name, name) == -1)
7118    *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7119
7120  free (tmp_name);
7121
7122  return hash;
7123}
7124
7125/* Locate the ARM encoded calling stub for NAME.  */
7126
7127static struct elf_link_hash_entry *
7128find_arm_glue (struct bfd_link_info *link_info,
7129	       const char *name,
7130	       char **error_message)
7131{
7132  char *tmp_name;
7133  struct elf_link_hash_entry *myh;
7134  struct elf32_arm_link_hash_table *hash_table;
7135
7136  /* We need a pointer to the elfarm specific hash table.  */
7137  hash_table = elf32_arm_hash_table (link_info);
7138  if (hash_table == NULL)
7139    return NULL;
7140
7141  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7142				  + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7143  BFD_ASSERT (tmp_name);
7144
7145  sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7146
7147  myh = elf_link_hash_lookup
7148    (&(hash_table)->root, tmp_name, false, false, true);
7149
7150  if (myh == NULL
7151      && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7152		   "ARM", tmp_name, name) == -1)
7153    *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7154
7155  free (tmp_name);
7156
7157  return myh;
7158}
7159
7160/* ARM->Thumb glue (static images):
7161
7162   .arm
7163   __func_from_arm:
7164   ldr r12, __func_addr
7165   bx  r12
7166   __func_addr:
7167   .word func    @ behave as if you saw a ARM_32 reloc.
7168
7169   (v5t static images)
7170   .arm
7171   __func_from_arm:
7172   ldr pc, __func_addr
7173   __func_addr:
7174   .word func    @ behave as if you saw a ARM_32 reloc.
7175
7176   (relocatable images)
7177   .arm
7178   __func_from_arm:
7179   ldr r12, __func_offset
7180   add r12, r12, pc
7181   bx  r12
7182   __func_offset:
7183   .word func - .   */
7184
7185#define ARM2THUMB_STATIC_GLUE_SIZE 12
7186static const insn32 a2t1_ldr_insn = 0xe59fc000;
7187static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7188static const insn32 a2t3_func_addr_insn = 0x00000001;
7189
7190#define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7191static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7192static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7193
7194#define ARM2THUMB_PIC_GLUE_SIZE 16
7195static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7196static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7197static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7198
7199/* Thumb->ARM:				Thumb->(non-interworking aware) ARM
7200
7201     .thumb				.thumb
7202     .align 2				.align 2
7203 __func_from_thumb:		    __func_from_thumb:
7204     bx pc				push {r6, lr}
7205     nop				ldr  r6, __func_addr
7206     .arm				mov  lr, pc
7207     b func				bx   r6
7208					.arm
7209				    ;; back_to_thumb
7210					ldmia r13! {r6, lr}
7211					bx    lr
7212				    __func_addr:
7213					.word	     func  */
7214
7215#define THUMB2ARM_GLUE_SIZE 8
7216static const insn16 t2a1_bx_pc_insn = 0x4778;
7217static const insn16 t2a2_noop_insn = 0x46c0;
7218static const insn32 t2a3_b_insn = 0xea000000;
7219
7220#define VFP11_ERRATUM_VENEER_SIZE 8
7221#define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7222#define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7223
7224#define ARM_BX_VENEER_SIZE 12
7225static const insn32 armbx1_tst_insn = 0xe3100001;
7226static const insn32 armbx2_moveq_insn = 0x01a0f000;
7227static const insn32 armbx3_bx_insn = 0xe12fff10;
7228
7229#ifndef ELFARM_NABI_C_INCLUDED
7230static void
7231arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7232{
7233  asection * s;
7234  bfd_byte * contents;
7235
7236  if (size == 0)
7237    {
7238      /* Do not include empty glue sections in the output.  */
7239      if (abfd != NULL)
7240	{
7241	  s = bfd_get_linker_section (abfd, name);
7242	  if (s != NULL)
7243	    s->flags |= SEC_EXCLUDE;
7244	}
7245      return;
7246    }
7247
7248  BFD_ASSERT (abfd != NULL);
7249
7250  s = bfd_get_linker_section (abfd, name);
7251  BFD_ASSERT (s != NULL);
7252
7253  contents = (bfd_byte *) bfd_zalloc (abfd, size);
7254
7255  BFD_ASSERT (s->size == size);
7256  s->contents = contents;
7257}
7258
7259bool
7260bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7261{
7262  struct elf32_arm_link_hash_table * globals;
7263
7264  globals = elf32_arm_hash_table (info);
7265  BFD_ASSERT (globals != NULL);
7266
7267  arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7268				   globals->arm_glue_size,
7269				   ARM2THUMB_GLUE_SECTION_NAME);
7270
7271  arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7272				   globals->thumb_glue_size,
7273				   THUMB2ARM_GLUE_SECTION_NAME);
7274
7275  arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7276				   globals->vfp11_erratum_glue_size,
7277				   VFP11_ERRATUM_VENEER_SECTION_NAME);
7278
7279  arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7280				   globals->stm32l4xx_erratum_glue_size,
7281				   STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7282
7283  arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7284				   globals->bx_glue_size,
7285				   ARM_BX_GLUE_SECTION_NAME);
7286
7287  return true;
7288}
7289
7290/* Allocate space and symbols for calling a Thumb function from Arm mode.
7291   returns the symbol identifying the stub.  */
7292
7293static struct elf_link_hash_entry *
7294record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7295			  struct elf_link_hash_entry * h)
7296{
7297  const char * name = h->root.root.string;
7298  asection * s;
7299  char * tmp_name;
7300  struct elf_link_hash_entry * myh;
7301  struct bfd_link_hash_entry * bh;
7302  struct elf32_arm_link_hash_table * globals;
7303  bfd_vma val;
7304  bfd_size_type size;
7305
7306  globals = elf32_arm_hash_table (link_info);
7307  BFD_ASSERT (globals != NULL);
7308  BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7309
7310  s = bfd_get_linker_section
7311    (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7312
7313  BFD_ASSERT (s != NULL);
7314
7315  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7316				  + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7317  BFD_ASSERT (tmp_name);
7318
7319  sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7320
7321  myh = elf_link_hash_lookup
7322    (&(globals)->root, tmp_name, false, false, true);
7323
7324  if (myh != NULL)
7325    {
7326      /* We've already seen this guy.  */
7327      free (tmp_name);
7328      return myh;
7329    }
7330
7331  /* The only trick here is using hash_table->arm_glue_size as the value.
7332     Even though the section isn't allocated yet, this is where we will be
7333     putting it.  The +1 on the value marks that the stub has not been
7334     output yet - not that it is a Thumb function.  */
7335  bh = NULL;
7336  val = globals->arm_glue_size + 1;
7337  _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7338				    tmp_name, BSF_GLOBAL, s, val,
7339				    NULL, true, false, &bh);
7340
7341  myh = (struct elf_link_hash_entry *) bh;
7342  myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7343  myh->forced_local = 1;
7344
7345  free (tmp_name);
7346
7347  if (bfd_link_pic (link_info)
7348      || globals->root.is_relocatable_executable
7349      || globals->pic_veneer)
7350    size = ARM2THUMB_PIC_GLUE_SIZE;
7351  else if (globals->use_blx)
7352    size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7353  else
7354    size = ARM2THUMB_STATIC_GLUE_SIZE;
7355
7356  s->size += size;
7357  globals->arm_glue_size += size;
7358
7359  return myh;
7360}
7361
7362/* Allocate space for ARMv4 BX veneers.  */
7363
7364static void
7365record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7366{
7367  asection * s;
7368  struct elf32_arm_link_hash_table *globals;
7369  char *tmp_name;
7370  struct elf_link_hash_entry *myh;
7371  struct bfd_link_hash_entry *bh;
7372  bfd_vma val;
7373
7374  /* BX PC does not need a veneer.  */
7375  if (reg == 15)
7376    return;
7377
7378  globals = elf32_arm_hash_table (link_info);
7379  BFD_ASSERT (globals != NULL);
7380  BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7381
7382  /* Check if this veneer has already been allocated.  */
7383  if (globals->bx_glue_offset[reg])
7384    return;
7385
7386  s = bfd_get_linker_section
7387    (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7388
7389  BFD_ASSERT (s != NULL);
7390
7391  /* Add symbol for veneer.  */
7392  tmp_name = (char *)
7393      bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7394  BFD_ASSERT (tmp_name);
7395
7396  sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7397
7398  myh = elf_link_hash_lookup
7399    (&(globals)->root, tmp_name, false, false, false);
7400
7401  BFD_ASSERT (myh == NULL);
7402
7403  bh = NULL;
7404  val = globals->bx_glue_size;
7405  _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7406				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7407				    NULL, true, false, &bh);
7408
7409  myh = (struct elf_link_hash_entry *) bh;
7410  myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7411  myh->forced_local = 1;
7412
7413  s->size += ARM_BX_VENEER_SIZE;
7414  globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7415  globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7416}
7417
7418
7419/* Add an entry to the code/data map for section SEC.  */
7420
7421static void
7422elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7423{
7424  struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7425  unsigned int newidx;
7426
7427  if (sec_data->map == NULL)
7428    {
7429      sec_data->map = (elf32_arm_section_map *)
7430	  bfd_malloc (sizeof (elf32_arm_section_map));
7431      sec_data->mapcount = 0;
7432      sec_data->mapsize = 1;
7433    }
7434
7435  newidx = sec_data->mapcount++;
7436
7437  if (sec_data->mapcount > sec_data->mapsize)
7438    {
7439      sec_data->mapsize *= 2;
7440      sec_data->map = (elf32_arm_section_map *)
7441	  bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7442			       * sizeof (elf32_arm_section_map));
7443    }
7444
7445  if (sec_data->map)
7446    {
7447      sec_data->map[newidx].vma = vma;
7448      sec_data->map[newidx].type = type;
7449    }
7450}
7451
7452
7453/* Record information about a VFP11 denorm-erratum veneer.  Only ARM-mode
7454   veneers are handled for now.  */
7455
7456static bfd_vma
7457record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7458			     elf32_vfp11_erratum_list *branch,
7459			     bfd *branch_bfd,
7460			     asection *branch_sec,
7461			     unsigned int offset)
7462{
7463  asection *s;
7464  struct elf32_arm_link_hash_table *hash_table;
7465  char *tmp_name;
7466  struct elf_link_hash_entry *myh;
7467  struct bfd_link_hash_entry *bh;
7468  bfd_vma val;
7469  struct _arm_elf_section_data *sec_data;
7470  elf32_vfp11_erratum_list *newerr;
7471
7472  hash_table = elf32_arm_hash_table (link_info);
7473  BFD_ASSERT (hash_table != NULL);
7474  BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7475
7476  s = bfd_get_linker_section
7477    (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7478
7479  sec_data = elf32_arm_section_data (s);
7480
7481  BFD_ASSERT (s != NULL);
7482
7483  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7484				  (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7485  BFD_ASSERT (tmp_name);
7486
7487  sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7488	   hash_table->num_vfp11_fixes);
7489
7490  myh = elf_link_hash_lookup
7491    (&(hash_table)->root, tmp_name, false, false, false);
7492
7493  BFD_ASSERT (myh == NULL);
7494
7495  bh = NULL;
7496  val = hash_table->vfp11_erratum_glue_size;
7497  _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7498				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7499				    NULL, true, false, &bh);
7500
7501  myh = (struct elf_link_hash_entry *) bh;
7502  myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7503  myh->forced_local = 1;
7504
7505  /* Link veneer back to calling location.  */
7506  sec_data->erratumcount += 1;
7507  newerr = (elf32_vfp11_erratum_list *)
7508      bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7509
7510  newerr->type = VFP11_ERRATUM_ARM_VENEER;
7511  newerr->vma = -1;
7512  newerr->u.v.branch = branch;
7513  newerr->u.v.id = hash_table->num_vfp11_fixes;
7514  branch->u.b.veneer = newerr;
7515
7516  newerr->next = sec_data->erratumlist;
7517  sec_data->erratumlist = newerr;
7518
7519  /* A symbol for the return from the veneer.  */
7520  sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7521	   hash_table->num_vfp11_fixes);
7522
7523  myh = elf_link_hash_lookup
7524    (&(hash_table)->root, tmp_name, false, false, false);
7525
7526  if (myh != NULL)
7527    abort ();
7528
7529  bh = NULL;
7530  val = offset + 4;
7531  _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7532				    branch_sec, val, NULL, true, false, &bh);
7533
7534  myh = (struct elf_link_hash_entry *) bh;
7535  myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7536  myh->forced_local = 1;
7537
7538  free (tmp_name);
7539
7540  /* Generate a mapping symbol for the veneer section, and explicitly add an
7541     entry for that symbol to the code/data map for the section.  */
7542  if (hash_table->vfp11_erratum_glue_size == 0)
7543    {
7544      bh = NULL;
7545      /* FIXME: Creates an ARM symbol.  Thumb mode will need attention if it
7546	 ever requires this erratum fix.  */
7547      _bfd_generic_link_add_one_symbol (link_info,
7548					hash_table->bfd_of_glue_owner, "$a",
7549					BSF_LOCAL, s, 0, NULL,
7550					true, false, &bh);
7551
7552      myh = (struct elf_link_hash_entry *) bh;
7553      myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7554      myh->forced_local = 1;
7555
7556      /* The elf32_arm_init_maps function only cares about symbols from input
7557	 BFDs.  We must make a note of this generated mapping symbol
7558	 ourselves so that code byteswapping works properly in
7559	 elf32_arm_write_section.  */
7560      elf32_arm_section_map_add (s, 'a', 0);
7561    }
7562
7563  s->size += VFP11_ERRATUM_VENEER_SIZE;
7564  hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7565  hash_table->num_vfp11_fixes++;
7566
7567  /* The offset of the veneer.  */
7568  return val;
7569}
7570
7571/* Record information about a STM32L4XX STM erratum veneer.  Only THUMB-mode
7572   veneers need to be handled because used only in Cortex-M.  */
7573
7574static bfd_vma
7575record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7576				 elf32_stm32l4xx_erratum_list *branch,
7577				 bfd *branch_bfd,
7578				 asection *branch_sec,
7579				 unsigned int offset,
7580				 bfd_size_type veneer_size)
7581{
7582  asection *s;
7583  struct elf32_arm_link_hash_table *hash_table;
7584  char *tmp_name;
7585  struct elf_link_hash_entry *myh;
7586  struct bfd_link_hash_entry *bh;
7587  bfd_vma val;
7588  struct _arm_elf_section_data *sec_data;
7589  elf32_stm32l4xx_erratum_list *newerr;
7590
7591  hash_table = elf32_arm_hash_table (link_info);
7592  BFD_ASSERT (hash_table != NULL);
7593  BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7594
7595  s = bfd_get_linker_section
7596    (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7597
7598  BFD_ASSERT (s != NULL);
7599
7600  sec_data = elf32_arm_section_data (s);
7601
7602  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7603				  (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7604  BFD_ASSERT (tmp_name);
7605
7606  sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7607	   hash_table->num_stm32l4xx_fixes);
7608
7609  myh = elf_link_hash_lookup
7610    (&(hash_table)->root, tmp_name, false, false, false);
7611
7612  BFD_ASSERT (myh == NULL);
7613
7614  bh = NULL;
7615  val = hash_table->stm32l4xx_erratum_glue_size;
7616  _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7617				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7618				    NULL, true, false, &bh);
7619
7620  myh = (struct elf_link_hash_entry *) bh;
7621  myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7622  myh->forced_local = 1;
7623
7624  /* Link veneer back to calling location.  */
7625  sec_data->stm32l4xx_erratumcount += 1;
7626  newerr = (elf32_stm32l4xx_erratum_list *)
7627      bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7628
7629  newerr->type = STM32L4XX_ERRATUM_VENEER;
7630  newerr->vma = -1;
7631  newerr->u.v.branch = branch;
7632  newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7633  branch->u.b.veneer = newerr;
7634
7635  newerr->next = sec_data->stm32l4xx_erratumlist;
7636  sec_data->stm32l4xx_erratumlist = newerr;
7637
7638  /* A symbol for the return from the veneer.  */
7639  sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7640	   hash_table->num_stm32l4xx_fixes);
7641
7642  myh = elf_link_hash_lookup
7643    (&(hash_table)->root, tmp_name, false, false, false);
7644
7645  if (myh != NULL)
7646    abort ();
7647
7648  bh = NULL;
7649  val = offset + 4;
7650  _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7651				    branch_sec, val, NULL, true, false, &bh);
7652
7653  myh = (struct elf_link_hash_entry *) bh;
7654  myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7655  myh->forced_local = 1;
7656
7657  free (tmp_name);
7658
7659  /* Generate a mapping symbol for the veneer section, and explicitly add an
7660     entry for that symbol to the code/data map for the section.  */
7661  if (hash_table->stm32l4xx_erratum_glue_size == 0)
7662    {
7663      bh = NULL;
7664      /* Creates a THUMB symbol since there is no other choice.  */
7665      _bfd_generic_link_add_one_symbol (link_info,
7666					hash_table->bfd_of_glue_owner, "$t",
7667					BSF_LOCAL, s, 0, NULL,
7668					true, false, &bh);
7669
7670      myh = (struct elf_link_hash_entry *) bh;
7671      myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7672      myh->forced_local = 1;
7673
7674      /* The elf32_arm_init_maps function only cares about symbols from input
7675	 BFDs.  We must make a note of this generated mapping symbol
7676	 ourselves so that code byteswapping works properly in
7677	 elf32_arm_write_section.  */
7678      elf32_arm_section_map_add (s, 't', 0);
7679    }
7680
7681  s->size += veneer_size;
7682  hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7683  hash_table->num_stm32l4xx_fixes++;
7684
7685  /* The offset of the veneer.  */
7686  return val;
7687}
7688
7689#define ARM_GLUE_SECTION_FLAGS \
7690  (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7691   | SEC_READONLY | SEC_LINKER_CREATED)
7692
7693/* Create a fake section for use by the ARM backend of the linker.  */
7694
7695static bool
7696arm_make_glue_section (bfd * abfd, const char * name)
7697{
7698  asection * sec;
7699
7700  sec = bfd_get_linker_section (abfd, name);
7701  if (sec != NULL)
7702    /* Already made.  */
7703    return true;
7704
7705  sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7706
7707  if (sec == NULL
7708      || !bfd_set_section_alignment (sec, 2))
7709    return false;
7710
7711  /* Set the gc mark to prevent the section from being removed by garbage
7712     collection, despite the fact that no relocs refer to this section.  */
7713  sec->gc_mark = 1;
7714
7715  return true;
7716}
7717
7718/* Set size of .plt entries.  This function is called from the
7719   linker scripts in ld/emultempl/{armelf}.em.  */
7720
7721void
7722bfd_elf32_arm_use_long_plt (void)
7723{
7724  elf32_arm_use_long_plt_entry = true;
7725}
7726
7727/* Add the glue sections to ABFD.  This function is called from the
7728   linker scripts in ld/emultempl/{armelf}.em.  */
7729
7730bool
7731bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7732					struct bfd_link_info *info)
7733{
7734  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7735  bool dostm32l4xx = globals
7736    && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7737  bool addglue;
7738
7739  /* If we are only performing a partial
7740     link do not bother adding the glue.  */
7741  if (bfd_link_relocatable (info))
7742    return true;
7743
7744  addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7745    && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7746    && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7747    && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7748
7749  if (!dostm32l4xx)
7750    return addglue;
7751
7752  return addglue
7753    && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7754}
7755
7756/* Mark output sections of veneers needing a dedicated one with SEC_KEEP.  This
7757   ensures they are not marked for deletion by
7758   strip_excluded_output_sections () when veneers are going to be created
7759   later.  Not doing so would trigger assert on empty section size in
7760   lang_size_sections_1 ().  */
7761
7762void
7763bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7764{
7765  enum elf32_arm_stub_type stub_type;
7766
7767  /* If we are only performing a partial
7768     link do not bother adding the glue.  */
7769  if (bfd_link_relocatable (info))
7770    return;
7771
7772  for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7773    {
7774      asection *out_sec;
7775      const char *out_sec_name;
7776
7777      if (!arm_dedicated_stub_output_section_required (stub_type))
7778	continue;
7779
7780     out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7781     out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7782     if (out_sec != NULL)
7783	out_sec->flags |= SEC_KEEP;
7784    }
7785}
7786
7787/* Select a BFD to be used to hold the sections used by the glue code.
7788   This function is called from the linker scripts in ld/emultempl/
7789   {armelf/pe}.em.  */
7790
7791bool
7792bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7793{
7794  struct elf32_arm_link_hash_table *globals;
7795
7796  /* If we are only performing a partial link
7797     do not bother getting a bfd to hold the glue.  */
7798  if (bfd_link_relocatable (info))
7799    return true;
7800
7801  /* Make sure we don't attach the glue sections to a dynamic object.  */
7802  BFD_ASSERT (!(abfd->flags & DYNAMIC));
7803
7804  globals = elf32_arm_hash_table (info);
7805  BFD_ASSERT (globals != NULL);
7806
7807  if (globals->bfd_of_glue_owner != NULL)
7808    return true;
7809
7810  /* Save the bfd for later use.  */
7811  globals->bfd_of_glue_owner = abfd;
7812
7813  return true;
7814}
7815
7816static void
7817check_use_blx (struct elf32_arm_link_hash_table *globals)
7818{
7819  int cpu_arch;
7820
7821  cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7822				       Tag_CPU_arch);
7823
7824  if (globals->fix_arm1176)
7825    {
7826      if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7827	globals->use_blx = 1;
7828    }
7829  else
7830    {
7831      if (cpu_arch > TAG_CPU_ARCH_V4T)
7832	globals->use_blx = 1;
7833    }
7834}
7835
7836bool
7837bfd_elf32_arm_process_before_allocation (bfd *abfd,
7838					 struct bfd_link_info *link_info)
7839{
7840  Elf_Internal_Shdr *symtab_hdr;
7841  Elf_Internal_Rela *internal_relocs = NULL;
7842  Elf_Internal_Rela *irel, *irelend;
7843  bfd_byte *contents = NULL;
7844
7845  asection *sec;
7846  struct elf32_arm_link_hash_table *globals;
7847
7848  /* If we are only performing a partial link do not bother
7849     to construct any glue.  */
7850  if (bfd_link_relocatable (link_info))
7851    return true;
7852
7853  /* Here we have a bfd that is to be included on the link.  We have a
7854     hook to do reloc rummaging, before section sizes are nailed down.  */
7855  globals = elf32_arm_hash_table (link_info);
7856  BFD_ASSERT (globals != NULL);
7857
7858  check_use_blx (globals);
7859
7860  if (globals->byteswap_code && !bfd_big_endian (abfd))
7861    {
7862      _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7863			  abfd);
7864      return false;
7865    }
7866
7867  /* PR 5398: If we have not decided to include any loadable sections in
7868     the output then we will not have a glue owner bfd.  This is OK, it
7869     just means that there is nothing else for us to do here.  */
7870  if (globals->bfd_of_glue_owner == NULL)
7871    return true;
7872
7873  /* Rummage around all the relocs and map the glue vectors.  */
7874  sec = abfd->sections;
7875
7876  if (sec == NULL)
7877    return true;
7878
7879  for (; sec != NULL; sec = sec->next)
7880    {
7881      if (sec->reloc_count == 0)
7882	continue;
7883
7884      if ((sec->flags & SEC_EXCLUDE) != 0)
7885	continue;
7886
7887      symtab_hdr = & elf_symtab_hdr (abfd);
7888
7889      /* Load the relocs.  */
7890      internal_relocs
7891	= _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, false);
7892
7893      if (internal_relocs == NULL)
7894	goto error_return;
7895
7896      irelend = internal_relocs + sec->reloc_count;
7897      for (irel = internal_relocs; irel < irelend; irel++)
7898	{
7899	  long r_type;
7900	  unsigned long r_index;
7901
7902	  struct elf_link_hash_entry *h;
7903
7904	  r_type = ELF32_R_TYPE (irel->r_info);
7905	  r_index = ELF32_R_SYM (irel->r_info);
7906
7907	  /* These are the only relocation types we care about.  */
7908	  if (   r_type != R_ARM_PC24
7909	      && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7910	    continue;
7911
7912	  /* Get the section contents if we haven't done so already.  */
7913	  if (contents == NULL)
7914	    {
7915	      /* Get cached copy if it exists.  */
7916	      if (elf_section_data (sec)->this_hdr.contents != NULL)
7917		contents = elf_section_data (sec)->this_hdr.contents;
7918	      else
7919		{
7920		  /* Go get them off disk.  */
7921		  if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7922		    goto error_return;
7923		}
7924	    }
7925
7926	  if (r_type == R_ARM_V4BX)
7927	    {
7928	      int reg;
7929
7930	      reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7931	      record_arm_bx_glue (link_info, reg);
7932	      continue;
7933	    }
7934
7935	  /* If the relocation is not against a symbol it cannot concern us.  */
7936	  h = NULL;
7937
7938	  /* We don't care about local symbols.  */
7939	  if (r_index < symtab_hdr->sh_info)
7940	    continue;
7941
7942	  /* This is an external symbol.  */
7943	  r_index -= symtab_hdr->sh_info;
7944	  h = (struct elf_link_hash_entry *)
7945	    elf_sym_hashes (abfd)[r_index];
7946
7947	  /* If the relocation is against a static symbol it must be within
7948	     the current section and so cannot be a cross ARM/Thumb relocation.  */
7949	  if (h == NULL)
7950	    continue;
7951
7952	  /* If the call will go through a PLT entry then we do not need
7953	     glue.  */
7954	  if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7955	    continue;
7956
7957	  switch (r_type)
7958	    {
7959	    case R_ARM_PC24:
7960	      /* This one is a call from arm code.  We need to look up
7961		 the target of the call.  If it is a thumb target, we
7962		 insert glue.  */
7963	      if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7964		  == ST_BRANCH_TO_THUMB)
7965		record_arm_to_thumb_glue (link_info, h);
7966	      break;
7967
7968	    default:
7969	      abort ();
7970	    }
7971	}
7972
7973      if (elf_section_data (sec)->this_hdr.contents != contents)
7974	free (contents);
7975      contents = NULL;
7976
7977      if (elf_section_data (sec)->relocs != internal_relocs)
7978	free (internal_relocs);
7979      internal_relocs = NULL;
7980    }
7981
7982  return true;
7983
7984 error_return:
7985  if (elf_section_data (sec)->this_hdr.contents != contents)
7986    free (contents);
7987  if (elf_section_data (sec)->relocs != internal_relocs)
7988    free (internal_relocs);
7989
7990  return false;
7991}
7992#endif
7993
7994
7995/* Initialise maps of ARM/Thumb/data for input BFDs.  */
7996
7997void
7998bfd_elf32_arm_init_maps (bfd *abfd)
7999{
8000  Elf_Internal_Sym *isymbuf;
8001  Elf_Internal_Shdr *hdr;
8002  unsigned int i, localsyms;
8003
8004  /* PR 7093: Make sure that we are dealing with an arm elf binary.  */
8005  if (! is_arm_elf (abfd))
8006    return;
8007
8008  if ((abfd->flags & DYNAMIC) != 0)
8009    return;
8010
8011  hdr = & elf_symtab_hdr (abfd);
8012  localsyms = hdr->sh_info;
8013
8014  /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
8015     should contain the number of local symbols, which should come before any
8016     global symbols.  Mapping symbols are always local.  */
8017  isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
8018				  NULL);
8019
8020  /* No internal symbols read?  Skip this BFD.  */
8021  if (isymbuf == NULL)
8022    return;
8023
8024  for (i = 0; i < localsyms; i++)
8025    {
8026      Elf_Internal_Sym *isym = &isymbuf[i];
8027      asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
8028      const char *name;
8029
8030      if (sec != NULL
8031	  && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
8032	{
8033	  name = bfd_elf_string_from_elf_section (abfd,
8034	    hdr->sh_link, isym->st_name);
8035
8036	  if (bfd_is_arm_special_symbol_name (name,
8037					      BFD_ARM_SPECIAL_SYM_TYPE_MAP))
8038	    elf32_arm_section_map_add (sec, name[1], isym->st_value);
8039	}
8040    }
8041}
8042
8043
8044/* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
8045   say what they wanted.  */
8046
8047void
8048bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
8049{
8050  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8051  obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8052
8053  if (globals == NULL)
8054    return;
8055
8056  if (globals->fix_cortex_a8 == -1)
8057    {
8058      /* Turn on Cortex-A8 erratum workaround for ARMv7-A.  */
8059      if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
8060	  && (out_attr[Tag_CPU_arch_profile].i == 'A'
8061	      || out_attr[Tag_CPU_arch_profile].i == 0))
8062	globals->fix_cortex_a8 = 1;
8063      else
8064	globals->fix_cortex_a8 = 0;
8065    }
8066}
8067
8068
8069void
8070bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
8071{
8072  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8073  obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8074
8075  if (globals == NULL)
8076    return;
8077  /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix.  */
8078  if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
8079    {
8080      switch (globals->vfp11_fix)
8081	{
8082	case BFD_ARM_VFP11_FIX_DEFAULT:
8083	case BFD_ARM_VFP11_FIX_NONE:
8084	  globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8085	  break;
8086
8087	default:
8088	  /* Give a warning, but do as the user requests anyway.  */
8089	  _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8090	    "workaround is not necessary for target architecture"), obfd);
8091	}
8092    }
8093  else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
8094    /* For earlier architectures, we might need the workaround, but do not
8095       enable it by default.  If users is running with broken hardware, they
8096       must enable the erratum fix explicitly.  */
8097    globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8098}
8099
8100void
8101bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
8102{
8103  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8104  obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8105
8106  if (globals == NULL)
8107    return;
8108
8109  /* We assume only Cortex-M4 may require the fix.  */
8110  if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
8111      || out_attr[Tag_CPU_arch_profile].i != 'M')
8112    {
8113      if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
8114	/* Give a warning, but do as the user requests anyway.  */
8115	_bfd_error_handler
8116	  (_("%pB: warning: selected STM32L4XX erratum "
8117	     "workaround is not necessary for target architecture"), obfd);
8118    }
8119}
8120
8121enum bfd_arm_vfp11_pipe
8122{
8123  VFP11_FMAC,
8124  VFP11_LS,
8125  VFP11_DS,
8126  VFP11_BAD
8127};
8128
8129/* Return a VFP register number.  This is encoded as RX:X for single-precision
8130   registers, or X:RX for double-precision registers, where RX is the group of
8131   four bits in the instruction encoding and X is the single extension bit.
8132   RX and X fields are specified using their lowest (starting) bit.  The return
8133   value is:
8134
8135     0...31: single-precision registers s0...s31
8136     32...63: double-precision registers d0...d31.
8137
8138   Although X should be zero for VFP11 (encoding d0...d15 only), we might
8139   encounter VFP3 instructions, so we allow the full range for DP registers.  */
8140
8141static unsigned int
8142bfd_arm_vfp11_regno (unsigned int insn, bool is_double, unsigned int rx,
8143		     unsigned int x)
8144{
8145  if (is_double)
8146    return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8147  else
8148    return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8149}
8150
8151/* Set bits in *WMASK according to a register number REG as encoded by
8152   bfd_arm_vfp11_regno().  Ignore d16-d31.  */
8153
8154static void
8155bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8156{
8157  if (reg < 32)
8158    *wmask |= 1 << reg;
8159  else if (reg < 48)
8160    *wmask |= 3 << ((reg - 32) * 2);
8161}
8162
8163/* Return TRUE if WMASK overwrites anything in REGS.  */
8164
8165static bool
8166bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8167{
8168  int i;
8169
8170  for (i = 0; i < numregs; i++)
8171    {
8172      unsigned int reg = regs[i];
8173
8174      if (reg < 32 && (wmask & (1 << reg)) != 0)
8175	return true;
8176
8177      reg -= 32;
8178
8179      if (reg >= 16)
8180	continue;
8181
8182      if ((wmask & (3 << (reg * 2))) != 0)
8183	return true;
8184    }
8185
8186  return false;
8187}
8188
8189/* In this function, we're interested in two things: finding input registers
8190   for VFP data-processing instructions, and finding the set of registers which
8191   arbitrary VFP instructions may write to.  We use a 32-bit unsigned int to
8192   hold the written set, so FLDM etc. are easy to deal with (we're only
8193   interested in 32 SP registers or 16 dp registers, due to the VFP version
8194   implemented by the chip in question).  DP registers are marked by setting
8195   both SP registers in the write mask).  */
8196
8197static enum bfd_arm_vfp11_pipe
8198bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8199			   int *numregs)
8200{
8201  enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8202  bool is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8203
8204  if ((insn & 0x0f000e10) == 0x0e000a00)  /* A data-processing insn.  */
8205    {
8206      unsigned int pqrs;
8207      unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8208      unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8209
8210      pqrs = ((insn & 0x00800000) >> 20)
8211	   | ((insn & 0x00300000) >> 19)
8212	   | ((insn & 0x00000040) >> 6);
8213
8214      switch (pqrs)
8215	{
8216	case 0: /* fmac[sd].  */
8217	case 1: /* fnmac[sd].  */
8218	case 2: /* fmsc[sd].  */
8219	case 3: /* fnmsc[sd].  */
8220	  vpipe = VFP11_FMAC;
8221	  bfd_arm_vfp11_write_mask (destmask, fd);
8222	  regs[0] = fd;
8223	  regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7);  /* Fn.  */
8224	  regs[2] = fm;
8225	  *numregs = 3;
8226	  break;
8227
8228	case 4: /* fmul[sd].  */
8229	case 5: /* fnmul[sd].  */
8230	case 6: /* fadd[sd].  */
8231	case 7: /* fsub[sd].  */
8232	  vpipe = VFP11_FMAC;
8233	  goto vfp_binop;
8234
8235	case 8: /* fdiv[sd].  */
8236	  vpipe = VFP11_DS;
8237	  vfp_binop:
8238	  bfd_arm_vfp11_write_mask (destmask, fd);
8239	  regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7);   /* Fn.  */
8240	  regs[1] = fm;
8241	  *numregs = 2;
8242	  break;
8243
8244	case 15: /* extended opcode.  */
8245	  {
8246	    unsigned int extn = ((insn >> 15) & 0x1e)
8247			      | ((insn >> 7) & 1);
8248
8249	    switch (extn)
8250	      {
8251	      case 0: /* fcpy[sd].  */
8252	      case 1: /* fabs[sd].  */
8253	      case 2: /* fneg[sd].  */
8254	      case 8: /* fcmp[sd].  */
8255	      case 9: /* fcmpe[sd].  */
8256	      case 10: /* fcmpz[sd].  */
8257	      case 11: /* fcmpez[sd].  */
8258	      case 16: /* fuito[sd].  */
8259	      case 17: /* fsito[sd].  */
8260	      case 24: /* ftoui[sd].  */
8261	      case 25: /* ftouiz[sd].  */
8262	      case 26: /* ftosi[sd].  */
8263	      case 27: /* ftosiz[sd].  */
8264		/* These instructions will not bounce due to underflow.  */
8265		*numregs = 0;
8266		vpipe = VFP11_FMAC;
8267		break;
8268
8269	      case 3: /* fsqrt[sd].  */
8270		/* fsqrt cannot underflow, but it can (perhaps) overwrite
8271		   registers to cause the erratum in previous instructions.  */
8272		bfd_arm_vfp11_write_mask (destmask, fd);
8273		vpipe = VFP11_DS;
8274		break;
8275
8276	      case 15: /* fcvt{ds,sd}.  */
8277		{
8278		  int rnum = 0;
8279
8280		  bfd_arm_vfp11_write_mask (destmask, fd);
8281
8282		  /* Only FCVTSD can underflow.  */
8283		  if ((insn & 0x100) != 0)
8284		    regs[rnum++] = fm;
8285
8286		  *numregs = rnum;
8287
8288		  vpipe = VFP11_FMAC;
8289		}
8290		break;
8291
8292	      default:
8293		return VFP11_BAD;
8294	      }
8295	  }
8296	  break;
8297
8298	default:
8299	  return VFP11_BAD;
8300	}
8301    }
8302  /* Two-register transfer.  */
8303  else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8304    {
8305      unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8306
8307      if ((insn & 0x100000) == 0)
8308	{
8309	  if (is_double)
8310	    bfd_arm_vfp11_write_mask (destmask, fm);
8311	  else
8312	    {
8313	      bfd_arm_vfp11_write_mask (destmask, fm);
8314	      bfd_arm_vfp11_write_mask (destmask, fm + 1);
8315	    }
8316	}
8317
8318      vpipe = VFP11_LS;
8319    }
8320  else if ((insn & 0x0e100e00) == 0x0c100a00)  /* A load insn.  */
8321    {
8322      int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8323      unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8324
8325      switch (puw)
8326	{
8327	case 0: /* Two-reg transfer.  We should catch these above.  */
8328	  abort ();
8329
8330	case 2: /* fldm[sdx].  */
8331	case 3:
8332	case 5:
8333	  {
8334	    unsigned int i, offset = insn & 0xff;
8335
8336	    if (is_double)
8337	      offset >>= 1;
8338
8339	    for (i = fd; i < fd + offset; i++)
8340	      bfd_arm_vfp11_write_mask (destmask, i);
8341	  }
8342	  break;
8343
8344	case 4: /* fld[sd].  */
8345	case 6:
8346	  bfd_arm_vfp11_write_mask (destmask, fd);
8347	  break;
8348
8349	default:
8350	  return VFP11_BAD;
8351	}
8352
8353      vpipe = VFP11_LS;
8354    }
8355  /* Single-register transfer. Note L==0.  */
8356  else if ((insn & 0x0f100e10) == 0x0e000a10)
8357    {
8358      unsigned int opcode = (insn >> 21) & 7;
8359      unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8360
8361      switch (opcode)
8362	{
8363	case 0: /* fmsr/fmdlr.  */
8364	case 1: /* fmdhr.  */
8365	  /* Mark fmdhr and fmdlr as writing to the whole of the DP
8366	     destination register.  I don't know if this is exactly right,
8367	     but it is the conservative choice.  */
8368	  bfd_arm_vfp11_write_mask (destmask, fn);
8369	  break;
8370
8371	case 7: /* fmxr.  */
8372	  break;
8373	}
8374
8375      vpipe = VFP11_LS;
8376    }
8377
8378  return vpipe;
8379}
8380
8381
8382static int elf32_arm_compare_mapping (const void * a, const void * b);
8383
8384
8385/* Look for potentially-troublesome code sequences which might trigger the
8386   VFP11 denormal/antidependency erratum.  See, e.g., the ARM1136 errata sheet
8387   (available from ARM) for details of the erratum.  A short version is
8388   described in ld.texinfo.  */
8389
8390bool
8391bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8392{
8393  asection *sec;
8394  bfd_byte *contents = NULL;
8395  int state = 0;
8396  int regs[3], numregs = 0;
8397  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8398  int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8399
8400  if (globals == NULL)
8401    return false;
8402
8403  /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8404     The states transition as follows:
8405
8406       0 -> 1 (vector) or 0 -> 2 (scalar)
8407	   A VFP FMAC-pipeline instruction has been seen. Fill
8408	   regs[0]..regs[numregs-1] with its input operands. Remember this
8409	   instruction in 'first_fmac'.
8410
8411       1 -> 2
8412	   Any instruction, except for a VFP instruction which overwrites
8413	   regs[*].
8414
8415       1 -> 3 [ -> 0 ]  or
8416       2 -> 3 [ -> 0 ]
8417	   A VFP instruction has been seen which overwrites any of regs[*].
8418	   We must make a veneer!  Reset state to 0 before examining next
8419	   instruction.
8420
8421       2 -> 0
8422	   If we fail to match anything in state 2, reset to state 0 and reset
8423	   the instruction pointer to the instruction after 'first_fmac'.
8424
8425     If the VFP11 vector mode is in use, there must be at least two unrelated
8426     instructions between anti-dependent VFP11 instructions to properly avoid
8427     triggering the erratum, hence the use of the extra state 1.  */
8428
8429  /* If we are only performing a partial link do not bother
8430     to construct any glue.  */
8431  if (bfd_link_relocatable (link_info))
8432    return true;
8433
8434  /* Skip if this bfd does not correspond to an ELF image.  */
8435  if (! is_arm_elf (abfd))
8436    return true;
8437
8438  /* We should have chosen a fix type by the time we get here.  */
8439  BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8440
8441  if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8442    return true;
8443
8444  /* Skip this BFD if it corresponds to an executable or dynamic object.  */
8445  if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8446    return true;
8447
8448  for (sec = abfd->sections; sec != NULL; sec = sec->next)
8449    {
8450      unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8451      struct _arm_elf_section_data *sec_data;
8452
8453      /* If we don't have executable progbits, we're not interested in this
8454	 section.  Also skip if section is to be excluded.  */
8455      if (elf_section_type (sec) != SHT_PROGBITS
8456	  || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8457	  || (sec->flags & SEC_EXCLUDE) != 0
8458	  || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8459	  || sec->output_section == bfd_abs_section_ptr
8460	  || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8461	continue;
8462
8463      sec_data = elf32_arm_section_data (sec);
8464
8465      if (sec_data->mapcount == 0)
8466	continue;
8467
8468      if (elf_section_data (sec)->this_hdr.contents != NULL)
8469	contents = elf_section_data (sec)->this_hdr.contents;
8470      else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8471	goto error_return;
8472
8473      qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8474	     elf32_arm_compare_mapping);
8475
8476      for (span = 0; span < sec_data->mapcount; span++)
8477	{
8478	  unsigned int span_start = sec_data->map[span].vma;
8479	  unsigned int span_end = (span == sec_data->mapcount - 1)
8480				  ? sec->size : sec_data->map[span + 1].vma;
8481	  char span_type = sec_data->map[span].type;
8482
8483	  /* FIXME: Only ARM mode is supported at present.  We may need to
8484	     support Thumb-2 mode also at some point.  */
8485	  if (span_type != 'a')
8486	    continue;
8487
8488	  for (i = span_start; i < span_end;)
8489	    {
8490	      unsigned int next_i = i + 4;
8491	      unsigned int insn = bfd_big_endian (abfd)
8492		? (((unsigned) contents[i] << 24)
8493		   | (contents[i + 1] << 16)
8494		   | (contents[i + 2] << 8)
8495		   | contents[i + 3])
8496		: (((unsigned) contents[i + 3] << 24)
8497		   | (contents[i + 2] << 16)
8498		   | (contents[i + 1] << 8)
8499		   | contents[i]);
8500	      unsigned int writemask = 0;
8501	      enum bfd_arm_vfp11_pipe vpipe;
8502
8503	      switch (state)
8504		{
8505		case 0:
8506		  vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8507						    &numregs);
8508		  /* I'm assuming the VFP11 erratum can trigger with denorm
8509		     operands on either the FMAC or the DS pipeline. This might
8510		     lead to slightly overenthusiastic veneer insertion.  */
8511		  if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8512		    {
8513		      state = use_vector ? 1 : 2;
8514		      first_fmac = i;
8515		      veneer_of_insn = insn;
8516		    }
8517		  break;
8518
8519		case 1:
8520		  {
8521		    int other_regs[3], other_numregs;
8522		    vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8523						      other_regs,
8524						      &other_numregs);
8525		    if (vpipe != VFP11_BAD
8526			&& bfd_arm_vfp11_antidependency (writemask, regs,
8527							 numregs))
8528		      state = 3;
8529		    else
8530		      state = 2;
8531		  }
8532		  break;
8533
8534		case 2:
8535		  {
8536		    int other_regs[3], other_numregs;
8537		    vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8538						      other_regs,
8539						      &other_numregs);
8540		    if (vpipe != VFP11_BAD
8541			&& bfd_arm_vfp11_antidependency (writemask, regs,
8542							 numregs))
8543		      state = 3;
8544		    else
8545		      {
8546			state = 0;
8547			next_i = first_fmac + 4;
8548		      }
8549		  }
8550		  break;
8551
8552		case 3:
8553		  abort ();  /* Should be unreachable.  */
8554		}
8555
8556	      if (state == 3)
8557		{
8558		  elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8559		      bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8560
8561		  elf32_arm_section_data (sec)->erratumcount += 1;
8562
8563		  newerr->u.b.vfp_insn = veneer_of_insn;
8564
8565		  switch (span_type)
8566		    {
8567		    case 'a':
8568		      newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8569		      break;
8570
8571		    default:
8572		      abort ();
8573		    }
8574
8575		  record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8576					       first_fmac);
8577
8578		  newerr->vma = -1;
8579
8580		  newerr->next = sec_data->erratumlist;
8581		  sec_data->erratumlist = newerr;
8582
8583		  state = 0;
8584		}
8585
8586	      i = next_i;
8587	    }
8588	}
8589
8590      if (elf_section_data (sec)->this_hdr.contents != contents)
8591	free (contents);
8592      contents = NULL;
8593    }
8594
8595  return true;
8596
8597 error_return:
8598  if (elf_section_data (sec)->this_hdr.contents != contents)
8599    free (contents);
8600
8601  return false;
8602}
8603
8604/* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8605   after sections have been laid out, using specially-named symbols.  */
8606
8607void
8608bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8609					  struct bfd_link_info *link_info)
8610{
8611  asection *sec;
8612  struct elf32_arm_link_hash_table *globals;
8613  char *tmp_name;
8614
8615  if (bfd_link_relocatable (link_info))
8616    return;
8617
8618  /* Skip if this bfd does not correspond to an ELF image.  */
8619  if (! is_arm_elf (abfd))
8620    return;
8621
8622  globals = elf32_arm_hash_table (link_info);
8623  if (globals == NULL)
8624    return;
8625
8626  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8627				  (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8628  BFD_ASSERT (tmp_name);
8629
8630  for (sec = abfd->sections; sec != NULL; sec = sec->next)
8631    {
8632      struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8633      elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8634
8635      for (; errnode != NULL; errnode = errnode->next)
8636	{
8637	  struct elf_link_hash_entry *myh;
8638	  bfd_vma vma;
8639
8640	  switch (errnode->type)
8641	    {
8642	    case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8643	    case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8644	      /* Find veneer symbol.  */
8645	      sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8646		       errnode->u.b.veneer->u.v.id);
8647
8648	      myh = elf_link_hash_lookup
8649		(&(globals)->root, tmp_name, false, false, true);
8650
8651	      if (myh == NULL)
8652		_bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8653				    abfd, "VFP11", tmp_name);
8654
8655	      vma = myh->root.u.def.section->output_section->vma
8656		    + myh->root.u.def.section->output_offset
8657		    + myh->root.u.def.value;
8658
8659	      errnode->u.b.veneer->vma = vma;
8660	      break;
8661
8662	    case VFP11_ERRATUM_ARM_VENEER:
8663	    case VFP11_ERRATUM_THUMB_VENEER:
8664	      /* Find return location.  */
8665	      sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8666		       errnode->u.v.id);
8667
8668	      myh = elf_link_hash_lookup
8669		(&(globals)->root, tmp_name, false, false, true);
8670
8671	      if (myh == NULL)
8672		_bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8673				    abfd, "VFP11", tmp_name);
8674
8675	      vma = myh->root.u.def.section->output_section->vma
8676		    + myh->root.u.def.section->output_offset
8677		    + myh->root.u.def.value;
8678
8679	      errnode->u.v.branch->vma = vma;
8680	      break;
8681
8682	    default:
8683	      abort ();
8684	    }
8685	}
8686    }
8687
8688  free (tmp_name);
8689}
8690
8691/* Find virtual-memory addresses for STM32L4XX erratum veneers and
8692   return locations after sections have been laid out, using
8693   specially-named symbols.  */
8694
8695void
8696bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8697					      struct bfd_link_info *link_info)
8698{
8699  asection *sec;
8700  struct elf32_arm_link_hash_table *globals;
8701  char *tmp_name;
8702
8703  if (bfd_link_relocatable (link_info))
8704    return;
8705
8706  /* Skip if this bfd does not correspond to an ELF image.  */
8707  if (! is_arm_elf (abfd))
8708    return;
8709
8710  globals = elf32_arm_hash_table (link_info);
8711  if (globals == NULL)
8712    return;
8713
8714  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8715				  (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8716  BFD_ASSERT (tmp_name);
8717
8718  for (sec = abfd->sections; sec != NULL; sec = sec->next)
8719    {
8720      struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8721      elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8722
8723      for (; errnode != NULL; errnode = errnode->next)
8724	{
8725	  struct elf_link_hash_entry *myh;
8726	  bfd_vma vma;
8727
8728	  switch (errnode->type)
8729	    {
8730	    case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8731	      /* Find veneer symbol.  */
8732	      sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8733		       errnode->u.b.veneer->u.v.id);
8734
8735	      myh = elf_link_hash_lookup
8736		(&(globals)->root, tmp_name, false, false, true);
8737
8738	      if (myh == NULL)
8739		_bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8740				    abfd, "STM32L4XX", tmp_name);
8741
8742	      vma = myh->root.u.def.section->output_section->vma
8743		+ myh->root.u.def.section->output_offset
8744		+ myh->root.u.def.value;
8745
8746	      errnode->u.b.veneer->vma = vma;
8747	      break;
8748
8749	    case STM32L4XX_ERRATUM_VENEER:
8750	      /* Find return location.  */
8751	      sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8752		       errnode->u.v.id);
8753
8754	      myh = elf_link_hash_lookup
8755		(&(globals)->root, tmp_name, false, false, true);
8756
8757	      if (myh == NULL)
8758		_bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8759				    abfd, "STM32L4XX", tmp_name);
8760
8761	      vma = myh->root.u.def.section->output_section->vma
8762		+ myh->root.u.def.section->output_offset
8763		+ myh->root.u.def.value;
8764
8765	      errnode->u.v.branch->vma = vma;
8766	      break;
8767
8768	    default:
8769	      abort ();
8770	    }
8771	}
8772    }
8773
8774  free (tmp_name);
8775}
8776
8777static inline bool
8778is_thumb2_ldmia (const insn32 insn)
8779{
8780  /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8781     1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll.  */
8782  return (insn & 0xffd02000) == 0xe8900000;
8783}
8784
8785static inline bool
8786is_thumb2_ldmdb (const insn32 insn)
8787{
8788  /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8789     1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll.  */
8790  return (insn & 0xffd02000) == 0xe9100000;
8791}
8792
8793static inline bool
8794is_thumb2_vldm (const insn32 insn)
8795{
8796  /* A6.5 Extension register load or store instruction
8797     A7.7.229
8798     We look for SP 32-bit and DP 64-bit registers.
8799     Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8800     <list> is consecutive 64-bit registers
8801     1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8802     Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8803     <list> is consecutive 32-bit registers
8804     1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8805     if P==0 && U==1 && W==1 && Rn=1101 VPOP
8806     if PUW=010 || PUW=011 || PUW=101 VLDM.  */
8807  return
8808    (((insn & 0xfe100f00) == 0xec100b00) ||
8809     ((insn & 0xfe100f00) == 0xec100a00))
8810    && /* (IA without !).  */
8811    (((((insn << 7) >> 28) & 0xd) == 0x4)
8812     /* (IA with !), includes VPOP (when reg number is SP).  */
8813     || ((((insn << 7) >> 28) & 0xd) == 0x5)
8814     /* (DB with !).  */
8815     || ((((insn << 7) >> 28) & 0xd) == 0x9));
8816}
8817
8818/* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8819   VLDM opcode and:
8820 - computes the number and the mode of memory accesses
8821 - decides if the replacement should be done:
8822   . replaces only if > 8-word accesses
8823   . or (testing purposes only) replaces all accesses.  */
8824
8825static bool
8826stm32l4xx_need_create_replacing_stub (const insn32 insn,
8827				      bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8828{
8829  int nb_words = 0;
8830
8831  /* The field encoding the register list is the same for both LDMIA
8832     and LDMDB encodings.  */
8833  if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8834    nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8835  else if (is_thumb2_vldm (insn))
8836   nb_words = (insn & 0xff);
8837
8838  /* DEFAULT mode accounts for the real bug condition situation,
8839     ALL mode inserts stubs for each LDM/VLDM instruction (testing).  */
8840  return (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT
8841	  ? nb_words > 8
8842	  : stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL);
8843}
8844
8845/* Look for potentially-troublesome code sequences which might trigger
8846   the STM STM32L4XX erratum.  */
8847
8848bool
8849bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8850				      struct bfd_link_info *link_info)
8851{
8852  asection *sec;
8853  bfd_byte *contents = NULL;
8854  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8855
8856  if (globals == NULL)
8857    return false;
8858
8859  /* If we are only performing a partial link do not bother
8860     to construct any glue.  */
8861  if (bfd_link_relocatable (link_info))
8862    return true;
8863
8864  /* Skip if this bfd does not correspond to an ELF image.  */
8865  if (! is_arm_elf (abfd))
8866    return true;
8867
8868  if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8869    return true;
8870
8871  /* Skip this BFD if it corresponds to an executable or dynamic object.  */
8872  if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8873    return true;
8874
8875  for (sec = abfd->sections; sec != NULL; sec = sec->next)
8876    {
8877      unsigned int i, span;
8878      struct _arm_elf_section_data *sec_data;
8879
8880      /* If we don't have executable progbits, we're not interested in this
8881	 section.  Also skip if section is to be excluded.  */
8882      if (elf_section_type (sec) != SHT_PROGBITS
8883	  || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8884	  || (sec->flags & SEC_EXCLUDE) != 0
8885	  || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8886	  || sec->output_section == bfd_abs_section_ptr
8887	  || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8888	continue;
8889
8890      sec_data = elf32_arm_section_data (sec);
8891
8892      if (sec_data->mapcount == 0)
8893	continue;
8894
8895      if (elf_section_data (sec)->this_hdr.contents != NULL)
8896	contents = elf_section_data (sec)->this_hdr.contents;
8897      else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8898	goto error_return;
8899
8900      qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8901	     elf32_arm_compare_mapping);
8902
8903      for (span = 0; span < sec_data->mapcount; span++)
8904	{
8905	  unsigned int span_start = sec_data->map[span].vma;
8906	  unsigned int span_end = (span == sec_data->mapcount - 1)
8907	    ? sec->size : sec_data->map[span + 1].vma;
8908	  char span_type = sec_data->map[span].type;
8909	  int itblock_current_pos = 0;
8910
8911	  /* Only Thumb2 mode need be supported with this CM4 specific
8912	     code, we should not encounter any arm mode eg span_type
8913	     != 'a'.  */
8914	  if (span_type != 't')
8915	    continue;
8916
8917	  for (i = span_start; i < span_end;)
8918	    {
8919	      unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8920	      bool insn_32bit = false;
8921	      bool is_ldm = false;
8922	      bool is_vldm = false;
8923	      bool is_not_last_in_it_block = false;
8924
8925	      /* The first 16-bits of all 32-bit thumb2 instructions start
8926		 with opcode[15..13]=0b111 and the encoded op1 can be anything
8927		 except opcode[12..11]!=0b00.
8928		 See 32-bit Thumb instruction encoding.  */
8929	      if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8930		insn_32bit = true;
8931
8932	      /* Compute the predicate that tells if the instruction
8933		 is concerned by the IT block
8934		 - Creates an error if there is a ldm that is not
8935		   last in the IT block thus cannot be replaced
8936		 - Otherwise we can create a branch at the end of the
8937		   IT block, it will be controlled naturally by IT
8938		   with the proper pseudo-predicate
8939		 - So the only interesting predicate is the one that
8940		   tells that we are not on the last item of an IT
8941		   block.  */
8942	      if (itblock_current_pos != 0)
8943		  is_not_last_in_it_block = !!--itblock_current_pos;
8944
8945	      if (insn_32bit)
8946		{
8947		  /* Load the rest of the insn (in manual-friendly order).  */
8948		  insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8949		  is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8950		  is_vldm = is_thumb2_vldm (insn);
8951
8952		  /* Veneers are created for (v)ldm depending on
8953		     option flags and memory accesses conditions; but
8954		     if the instruction is not the last instruction of
8955		     an IT block, we cannot create a jump there, so we
8956		     bail out.  */
8957		    if ((is_ldm || is_vldm)
8958			&& stm32l4xx_need_create_replacing_stub
8959			(insn, globals->stm32l4xx_fix))
8960		      {
8961			if (is_not_last_in_it_block)
8962			  {
8963			    _bfd_error_handler
8964			      /* xgettext:c-format */
8965			      (_("%pB(%pA+%#x): error: multiple load detected"
8966				 " in non-last IT block instruction:"
8967				 " STM32L4XX veneer cannot be generated; "
8968				 "use gcc option -mrestrict-it to generate"
8969				 " only one instruction per IT block"),
8970			       abfd, sec, i);
8971			  }
8972			else
8973			  {
8974			    elf32_stm32l4xx_erratum_list *newerr =
8975			      (elf32_stm32l4xx_erratum_list *)
8976			      bfd_zmalloc
8977			      (sizeof (elf32_stm32l4xx_erratum_list));
8978
8979			    elf32_arm_section_data (sec)
8980			      ->stm32l4xx_erratumcount += 1;
8981			    newerr->u.b.insn = insn;
8982			    /* We create only thumb branches.  */
8983			    newerr->type =
8984			      STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
8985			    record_stm32l4xx_erratum_veneer
8986			      (link_info, newerr, abfd, sec,
8987			       i,
8988			       is_ldm ?
8989			       STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
8990			       STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
8991			    newerr->vma = -1;
8992			    newerr->next = sec_data->stm32l4xx_erratumlist;
8993			    sec_data->stm32l4xx_erratumlist = newerr;
8994			  }
8995		      }
8996		}
8997	      else
8998		{
8999		  /* A7.7.37 IT p208
9000		     IT blocks are only encoded in T1
9001		     Encoding T1: IT{x{y{z}}} <firstcond>
9002		     1 0 1 1 - 1 1 1 1 - firstcond - mask
9003		     if mask = '0000' then see 'related encodings'
9004		     We don't deal with UNPREDICTABLE, just ignore these.
9005		     There can be no nested IT blocks so an IT block
9006		     is naturally a new one for which it is worth
9007		     computing its size.  */
9008		  bool is_newitblock = ((insn & 0xff00) == 0xbf00)
9009		    && ((insn & 0x000f) != 0x0000);
9010		  /* If we have a new IT block we compute its size.  */
9011		  if (is_newitblock)
9012		    {
9013		      /* Compute the number of instructions controlled
9014			 by the IT block, it will be used to decide
9015			 whether we are inside an IT block or not.  */
9016		      unsigned int mask = insn & 0x000f;
9017		      itblock_current_pos = 4 - ctz (mask);
9018		    }
9019		}
9020
9021	      i += insn_32bit ? 4 : 2;
9022	    }
9023	}
9024
9025      if (elf_section_data (sec)->this_hdr.contents != contents)
9026	free (contents);
9027      contents = NULL;
9028    }
9029
9030  return true;
9031
9032 error_return:
9033  if (elf_section_data (sec)->this_hdr.contents != contents)
9034    free (contents);
9035
9036  return false;
9037}
9038
9039/* Set target relocation values needed during linking.  */
9040
9041void
9042bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
9043				 struct bfd_link_info *link_info,
9044				 struct elf32_arm_params *params)
9045{
9046  struct elf32_arm_link_hash_table *globals;
9047
9048  globals = elf32_arm_hash_table (link_info);
9049  if (globals == NULL)
9050    return;
9051
9052  globals->target1_is_rel = params->target1_is_rel;
9053  if (globals->fdpic_p)
9054    globals->target2_reloc = R_ARM_GOT32;
9055  else if (strcmp (params->target2_type, "rel") == 0)
9056    globals->target2_reloc = R_ARM_REL32;
9057  else if (strcmp (params->target2_type, "abs") == 0)
9058    globals->target2_reloc = R_ARM_ABS32;
9059  else if (strcmp (params->target2_type, "got-rel") == 0)
9060    globals->target2_reloc = R_ARM_GOT_PREL;
9061  else
9062    {
9063      _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9064			  params->target2_type);
9065    }
9066  globals->fix_v4bx = params->fix_v4bx;
9067  globals->use_blx |= params->use_blx;
9068  globals->vfp11_fix = params->vfp11_denorm_fix;
9069  globals->stm32l4xx_fix = params->stm32l4xx_fix;
9070  if (globals->fdpic_p)
9071    globals->pic_veneer = 1;
9072  else
9073    globals->pic_veneer = params->pic_veneer;
9074  globals->fix_cortex_a8 = params->fix_cortex_a8;
9075  globals->fix_arm1176 = params->fix_arm1176;
9076  globals->cmse_implib = params->cmse_implib;
9077  globals->in_implib_bfd = params->in_implib_bfd;
9078
9079  BFD_ASSERT (is_arm_elf (output_bfd));
9080  elf_arm_tdata (output_bfd)->no_enum_size_warning
9081    = params->no_enum_size_warning;
9082  elf_arm_tdata (output_bfd)->no_wchar_size_warning
9083    = params->no_wchar_size_warning;
9084}
9085
9086/* Replace the target offset of a Thumb bl or b.w instruction.  */
9087
9088static void
9089insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
9090{
9091  bfd_vma upper;
9092  bfd_vma lower;
9093  int reloc_sign;
9094
9095  BFD_ASSERT ((offset & 1) == 0);
9096
9097  upper = bfd_get_16 (abfd, insn);
9098  lower = bfd_get_16 (abfd, insn + 2);
9099  reloc_sign = (offset < 0) ? 1 : 0;
9100  upper = (upper & ~(bfd_vma) 0x7ff)
9101	  | ((offset >> 12) & 0x3ff)
9102	  | (reloc_sign << 10);
9103  lower = (lower & ~(bfd_vma) 0x2fff)
9104	  | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
9105	  | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
9106	  | ((offset >> 1) & 0x7ff);
9107  bfd_put_16 (abfd, upper, insn);
9108  bfd_put_16 (abfd, lower, insn + 2);
9109}
9110
9111/* Thumb code calling an ARM function.  */
9112
9113static int
9114elf32_thumb_to_arm_stub (struct bfd_link_info * info,
9115			 const char *		name,
9116			 bfd *			input_bfd,
9117			 bfd *			output_bfd,
9118			 asection *		input_section,
9119			 bfd_byte *		hit_data,
9120			 asection *		sym_sec,
9121			 bfd_vma		offset,
9122			 bfd_signed_vma		addend,
9123			 bfd_vma		val,
9124			 char **error_message)
9125{
9126  asection * s = 0;
9127  bfd_vma my_offset;
9128  long int ret_offset;
9129  struct elf_link_hash_entry * myh;
9130  struct elf32_arm_link_hash_table * globals;
9131
9132  myh = find_thumb_glue (info, name, error_message);
9133  if (myh == NULL)
9134    return false;
9135
9136  globals = elf32_arm_hash_table (info);
9137  BFD_ASSERT (globals != NULL);
9138  BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9139
9140  my_offset = myh->root.u.def.value;
9141
9142  s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9143			      THUMB2ARM_GLUE_SECTION_NAME);
9144
9145  BFD_ASSERT (s != NULL);
9146  BFD_ASSERT (s->contents != NULL);
9147  BFD_ASSERT (s->output_section != NULL);
9148
9149  if ((my_offset & 0x01) == 0x01)
9150    {
9151      if (sym_sec != NULL
9152	  && sym_sec->owner != NULL
9153	  && !INTERWORK_FLAG (sym_sec->owner))
9154	{
9155	  _bfd_error_handler
9156	    (_("%pB(%s): warning: interworking not enabled;"
9157	       " first occurrence: %pB: %s call to %s"),
9158	     sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9159
9160	  return false;
9161	}
9162
9163      --my_offset;
9164      myh->root.u.def.value = my_offset;
9165
9166      put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9167		      s->contents + my_offset);
9168
9169      put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9170		      s->contents + my_offset + 2);
9171
9172      ret_offset =
9173	/* Address of destination of the stub.  */
9174	((bfd_signed_vma) val)
9175	- ((bfd_signed_vma)
9176	   /* Offset from the start of the current section
9177	      to the start of the stubs.  */
9178	   (s->output_offset
9179	    /* Offset of the start of this stub from the start of the stubs.  */
9180	    + my_offset
9181	    /* Address of the start of the current section.  */
9182	    + s->output_section->vma)
9183	   /* The branch instruction is 4 bytes into the stub.  */
9184	   + 4
9185	   /* ARM branches work from the pc of the instruction + 8.  */
9186	   + 8);
9187
9188      put_arm_insn (globals, output_bfd,
9189		    (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9190		    s->contents + my_offset + 4);
9191    }
9192
9193  BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9194
9195  /* Now go back and fix up the original BL insn to point to here.  */
9196  ret_offset =
9197    /* Address of where the stub is located.  */
9198    (s->output_section->vma + s->output_offset + my_offset)
9199     /* Address of where the BL is located.  */
9200    - (input_section->output_section->vma + input_section->output_offset
9201       + offset)
9202    /* Addend in the relocation.  */
9203    - addend
9204    /* Biassing for PC-relative addressing.  */
9205    - 8;
9206
9207  insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9208
9209  return true;
9210}
9211
9212/* Populate an Arm to Thumb stub.  Returns the stub symbol.  */
9213
9214static struct elf_link_hash_entry *
9215elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9216			     const char *	    name,
9217			     bfd *		    input_bfd,
9218			     bfd *		    output_bfd,
9219			     asection *		    sym_sec,
9220			     bfd_vma		    val,
9221			     asection *		    s,
9222			     char **		    error_message)
9223{
9224  bfd_vma my_offset;
9225  long int ret_offset;
9226  struct elf_link_hash_entry * myh;
9227  struct elf32_arm_link_hash_table * globals;
9228
9229  myh = find_arm_glue (info, name, error_message);
9230  if (myh == NULL)
9231    return NULL;
9232
9233  globals = elf32_arm_hash_table (info);
9234  BFD_ASSERT (globals != NULL);
9235  BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9236
9237  my_offset = myh->root.u.def.value;
9238
9239  if ((my_offset & 0x01) == 0x01)
9240    {
9241      if (sym_sec != NULL
9242	  && sym_sec->owner != NULL
9243	  && !INTERWORK_FLAG (sym_sec->owner))
9244	{
9245	  _bfd_error_handler
9246	    (_("%pB(%s): warning: interworking not enabled;"
9247	       " first occurrence: %pB: %s call to %s"),
9248	     sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9249	}
9250
9251      --my_offset;
9252      myh->root.u.def.value = my_offset;
9253
9254      if (bfd_link_pic (info)
9255	  || globals->root.is_relocatable_executable
9256	  || globals->pic_veneer)
9257	{
9258	  /* For relocatable objects we can't use absolute addresses,
9259	     so construct the address from a relative offset.  */
9260	  /* TODO: If the offset is small it's probably worth
9261	     constructing the address with adds.  */
9262	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9263			s->contents + my_offset);
9264	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9265			s->contents + my_offset + 4);
9266	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9267			s->contents + my_offset + 8);
9268	  /* Adjust the offset by 4 for the position of the add,
9269	     and 8 for the pipeline offset.  */
9270	  ret_offset = (val - (s->output_offset
9271			       + s->output_section->vma
9272			       + my_offset + 12))
9273		       | 1;
9274	  bfd_put_32 (output_bfd, ret_offset,
9275		      s->contents + my_offset + 12);
9276	}
9277      else if (globals->use_blx)
9278	{
9279	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9280			s->contents + my_offset);
9281
9282	  /* It's a thumb address.  Add the low order bit.  */
9283	  bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9284		      s->contents + my_offset + 4);
9285	}
9286      else
9287	{
9288	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9289			s->contents + my_offset);
9290
9291	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9292			s->contents + my_offset + 4);
9293
9294	  /* It's a thumb address.  Add the low order bit.  */
9295	  bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9296		      s->contents + my_offset + 8);
9297
9298	  my_offset += 12;
9299	}
9300    }
9301
9302  BFD_ASSERT (my_offset <= globals->arm_glue_size);
9303
9304  return myh;
9305}
9306
9307/* Arm code calling a Thumb function.  */
9308
9309static int
9310elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9311			 const char *		name,
9312			 bfd *			input_bfd,
9313			 bfd *			output_bfd,
9314			 asection *		input_section,
9315			 bfd_byte *		hit_data,
9316			 asection *		sym_sec,
9317			 bfd_vma		offset,
9318			 bfd_signed_vma		addend,
9319			 bfd_vma		val,
9320			 char **error_message)
9321{
9322  unsigned long int tmp;
9323  bfd_vma my_offset;
9324  asection * s;
9325  long int ret_offset;
9326  struct elf_link_hash_entry * myh;
9327  struct elf32_arm_link_hash_table * globals;
9328
9329  globals = elf32_arm_hash_table (info);
9330  BFD_ASSERT (globals != NULL);
9331  BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9332
9333  s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9334			      ARM2THUMB_GLUE_SECTION_NAME);
9335  BFD_ASSERT (s != NULL);
9336  BFD_ASSERT (s->contents != NULL);
9337  BFD_ASSERT (s->output_section != NULL);
9338
9339  myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9340				     sym_sec, val, s, error_message);
9341  if (!myh)
9342    return false;
9343
9344  my_offset = myh->root.u.def.value;
9345  tmp = bfd_get_32 (input_bfd, hit_data);
9346  tmp = tmp & 0xFF000000;
9347
9348  /* Somehow these are both 4 too far, so subtract 8.  */
9349  ret_offset = (s->output_offset
9350		+ my_offset
9351		+ s->output_section->vma
9352		- (input_section->output_offset
9353		   + input_section->output_section->vma
9354		   + offset + addend)
9355		- 8);
9356
9357  tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9358
9359  bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9360
9361  return true;
9362}
9363
9364/* Populate Arm stub for an exported Thumb function.  */
9365
9366static bool
9367elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9368{
9369  struct bfd_link_info * info = (struct bfd_link_info *) inf;
9370  asection * s;
9371  struct elf_link_hash_entry * myh;
9372  struct elf32_arm_link_hash_entry *eh;
9373  struct elf32_arm_link_hash_table * globals;
9374  asection *sec;
9375  bfd_vma val;
9376  char *error_message;
9377
9378  eh = elf32_arm_hash_entry (h);
9379  /* Allocate stubs for exported Thumb functions on v4t.  */
9380  if (eh->export_glue == NULL)
9381    return true;
9382
9383  globals = elf32_arm_hash_table (info);
9384  BFD_ASSERT (globals != NULL);
9385  BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9386
9387  s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9388			      ARM2THUMB_GLUE_SECTION_NAME);
9389  BFD_ASSERT (s != NULL);
9390  BFD_ASSERT (s->contents != NULL);
9391  BFD_ASSERT (s->output_section != NULL);
9392
9393  sec = eh->export_glue->root.u.def.section;
9394
9395  BFD_ASSERT (sec->output_section != NULL);
9396
9397  val = eh->export_glue->root.u.def.value + sec->output_offset
9398	+ sec->output_section->vma;
9399
9400  myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9401				     h->root.u.def.section->owner,
9402				     globals->obfd, sec, val, s,
9403				     &error_message);
9404  BFD_ASSERT (myh);
9405  return true;
9406}
9407
9408/* Populate ARMv4 BX veneers.  Returns the absolute adress of the veneer.  */
9409
9410static bfd_vma
9411elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9412{
9413  bfd_byte *p;
9414  bfd_vma glue_addr;
9415  asection *s;
9416  struct elf32_arm_link_hash_table *globals;
9417
9418  globals = elf32_arm_hash_table (info);
9419  BFD_ASSERT (globals != NULL);
9420  BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9421
9422  s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9423			      ARM_BX_GLUE_SECTION_NAME);
9424  BFD_ASSERT (s != NULL);
9425  BFD_ASSERT (s->contents != NULL);
9426  BFD_ASSERT (s->output_section != NULL);
9427
9428  BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9429
9430  glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9431
9432  if ((globals->bx_glue_offset[reg] & 1) == 0)
9433    {
9434      p = s->contents + glue_addr;
9435      bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9436      bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9437      bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9438      globals->bx_glue_offset[reg] |= 1;
9439    }
9440
9441  return glue_addr + s->output_section->vma + s->output_offset;
9442}
9443
9444/* Generate Arm stubs for exported Thumb symbols.  */
9445static void
9446elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9447				  struct bfd_link_info *link_info)
9448{
9449  struct elf32_arm_link_hash_table * globals;
9450
9451  if (link_info == NULL)
9452    /* Ignore this if we are not called by the ELF backend linker.  */
9453    return;
9454
9455  globals = elf32_arm_hash_table (link_info);
9456  if (globals == NULL)
9457    return;
9458
9459  /* If blx is available then exported Thumb symbols are OK and there is
9460     nothing to do.  */
9461  if (globals->use_blx)
9462    return;
9463
9464  elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9465			  link_info);
9466}
9467
9468/* Reserve space for COUNT dynamic relocations in relocation selection
9469   SRELOC.  */
9470
9471static void
9472elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9473			      bfd_size_type count)
9474{
9475  struct elf32_arm_link_hash_table *htab;
9476
9477  htab = elf32_arm_hash_table (info);
9478  BFD_ASSERT (htab->root.dynamic_sections_created);
9479  if (sreloc == NULL)
9480    abort ();
9481  sreloc->size += RELOC_SIZE (htab) * count;
9482}
9483
9484/* Reserve space for COUNT R_ARM_IRELATIVE relocations.  If the link is
9485   dynamic, the relocations should go in SRELOC, otherwise they should
9486   go in the special .rel.iplt section.  */
9487
9488static void
9489elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9490			    bfd_size_type count)
9491{
9492  struct elf32_arm_link_hash_table *htab;
9493
9494  htab = elf32_arm_hash_table (info);
9495  if (!htab->root.dynamic_sections_created)
9496    htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9497  else
9498    {
9499      BFD_ASSERT (sreloc != NULL);
9500      sreloc->size += RELOC_SIZE (htab) * count;
9501    }
9502}
9503
9504/* Add relocation REL to the end of relocation section SRELOC.  */
9505
9506static void
9507elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9508			asection *sreloc, Elf_Internal_Rela *rel)
9509{
9510  bfd_byte *loc;
9511  struct elf32_arm_link_hash_table *htab;
9512
9513  htab = elf32_arm_hash_table (info);
9514  if (!htab->root.dynamic_sections_created
9515      && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9516    sreloc = htab->root.irelplt;
9517  if (sreloc == NULL)
9518    abort ();
9519  loc = sreloc->contents;
9520  loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9521  if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9522    abort ();
9523  SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9524}
9525
9526/* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9527   IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9528   to .plt.  */
9529
9530static void
9531elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9532			      bool is_iplt_entry,
9533			      union gotplt_union *root_plt,
9534			      struct arm_plt_info *arm_plt)
9535{
9536  struct elf32_arm_link_hash_table *htab;
9537  asection *splt;
9538  asection *sgotplt;
9539
9540  htab = elf32_arm_hash_table (info);
9541
9542  if (is_iplt_entry)
9543    {
9544      splt = htab->root.iplt;
9545      sgotplt = htab->root.igotplt;
9546
9547      /* NaCl uses a special first entry in .iplt too.  */
9548      if (htab->root.target_os == is_nacl && splt->size == 0)
9549	splt->size += htab->plt_header_size;
9550
9551      /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt.  */
9552      elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9553    }
9554  else
9555    {
9556      splt = htab->root.splt;
9557      sgotplt = htab->root.sgotplt;
9558
9559    if (htab->fdpic_p)
9560      {
9561	/* Allocate room for R_ARM_FUNCDESC_VALUE.  */
9562	/* For lazy binding, relocations will be put into .rel.plt, in
9563	   .rel.got otherwise.  */
9564	/* FIXME: today we don't support lazy binding so put it in .rel.got */
9565	if (info->flags & DF_BIND_NOW)
9566	  elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9567	else
9568	  elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9569      }
9570    else
9571      {
9572	/* Allocate room for an R_JUMP_SLOT relocation in .rel.plt.  */
9573	elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9574      }
9575
9576      /* If this is the first .plt entry, make room for the special
9577	 first entry.  */
9578      if (splt->size == 0)
9579	splt->size += htab->plt_header_size;
9580
9581      htab->next_tls_desc_index++;
9582    }
9583
9584  /* Allocate the PLT entry itself, including any leading Thumb stub.  */
9585  if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9586    splt->size += PLT_THUMB_STUB_SIZE;
9587  root_plt->offset = splt->size;
9588  splt->size += htab->plt_entry_size;
9589
9590  /* We also need to make an entry in the .got.plt section, which
9591     will be placed in the .got section by the linker script.  */
9592  if (is_iplt_entry)
9593    arm_plt->got_offset = sgotplt->size;
9594  else
9595    arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9596  if (htab->fdpic_p)
9597    /* Function descriptor takes 64 bits in GOT.  */
9598    sgotplt->size += 8;
9599  else
9600    sgotplt->size += 4;
9601}
9602
9603static bfd_vma
9604arm_movw_immediate (bfd_vma value)
9605{
9606  return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9607}
9608
9609static bfd_vma
9610arm_movt_immediate (bfd_vma value)
9611{
9612  return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9613}
9614
9615/* Fill in a PLT entry and its associated GOT slot.  If DYNINDX == -1,
9616   the entry lives in .iplt and resolves to (*SYM_VALUE)().
9617   Otherwise, DYNINDX is the index of the symbol in the dynamic
9618   symbol table and SYM_VALUE is undefined.
9619
9620   ROOT_PLT points to the offset of the PLT entry from the start of its
9621   section (.iplt or .plt).  ARM_PLT points to the symbol's ARM-specific
9622   bookkeeping information.
9623
9624   Returns FALSE if there was a problem.  */
9625
9626static bool
9627elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9628			      union gotplt_union *root_plt,
9629			      struct arm_plt_info *arm_plt,
9630			      int dynindx, bfd_vma sym_value)
9631{
9632  struct elf32_arm_link_hash_table *htab;
9633  asection *sgot;
9634  asection *splt;
9635  asection *srel;
9636  bfd_byte *loc;
9637  bfd_vma plt_index;
9638  Elf_Internal_Rela rel;
9639  bfd_vma got_header_size;
9640
9641  htab = elf32_arm_hash_table (info);
9642
9643  /* Pick the appropriate sections and sizes.  */
9644  if (dynindx == -1)
9645    {
9646      splt = htab->root.iplt;
9647      sgot = htab->root.igotplt;
9648      srel = htab->root.irelplt;
9649
9650      /* There are no reserved entries in .igot.plt, and no special
9651	 first entry in .iplt.  */
9652      got_header_size = 0;
9653    }
9654  else
9655    {
9656      splt = htab->root.splt;
9657      sgot = htab->root.sgotplt;
9658      srel = htab->root.srelplt;
9659
9660      got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9661    }
9662  BFD_ASSERT (splt != NULL && srel != NULL);
9663
9664  bfd_vma got_offset, got_address, plt_address;
9665  bfd_vma got_displacement, initial_got_entry;
9666  bfd_byte * ptr;
9667
9668  BFD_ASSERT (sgot != NULL);
9669
9670  /* Get the offset into the .(i)got.plt table of the entry that
9671     corresponds to this function.  */
9672  got_offset = (arm_plt->got_offset & -2);
9673
9674  /* Get the index in the procedure linkage table which
9675     corresponds to this symbol.  This is the index of this symbol
9676     in all the symbols for which we are making plt entries.
9677     After the reserved .got.plt entries, all symbols appear in
9678     the same order as in .plt.  */
9679  if (htab->fdpic_p)
9680    /* Function descriptor takes 8 bytes.  */
9681    plt_index = (got_offset - got_header_size) / 8;
9682  else
9683    plt_index = (got_offset - got_header_size) / 4;
9684
9685  /* Calculate the address of the GOT entry.  */
9686  got_address = (sgot->output_section->vma
9687		 + sgot->output_offset
9688		 + got_offset);
9689
9690  /* ...and the address of the PLT entry.  */
9691  plt_address = (splt->output_section->vma
9692		 + splt->output_offset
9693		 + root_plt->offset);
9694
9695  ptr = splt->contents + root_plt->offset;
9696  if (htab->root.target_os == is_vxworks && bfd_link_pic (info))
9697    {
9698      unsigned int i;
9699      bfd_vma val;
9700
9701      for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9702	{
9703	  val = elf32_arm_vxworks_shared_plt_entry[i];
9704	  if (i == 2)
9705	    val |= got_address - sgot->output_section->vma;
9706	  if (i == 5)
9707	    val |= plt_index * RELOC_SIZE (htab);
9708	  if (i == 2 || i == 5)
9709	    bfd_put_32 (output_bfd, val, ptr);
9710	  else
9711	    put_arm_insn (htab, output_bfd, val, ptr);
9712	}
9713    }
9714  else if (htab->root.target_os == is_vxworks)
9715    {
9716      unsigned int i;
9717      bfd_vma val;
9718
9719      for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9720	{
9721	  val = elf32_arm_vxworks_exec_plt_entry[i];
9722	  if (i == 2)
9723	    val |= got_address;
9724	  if (i == 4)
9725	    val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9726	  if (i == 5)
9727	    val |= plt_index * RELOC_SIZE (htab);
9728	  if (i == 2 || i == 5)
9729	    bfd_put_32 (output_bfd, val, ptr);
9730	  else
9731	    put_arm_insn (htab, output_bfd, val, ptr);
9732	}
9733
9734      loc = (htab->srelplt2->contents
9735	     + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9736
9737      /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9738	 referencing the GOT for this PLT entry.  */
9739      rel.r_offset = plt_address + 8;
9740      rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9741      rel.r_addend = got_offset;
9742      SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9743      loc += RELOC_SIZE (htab);
9744
9745      /* Create the R_ARM_ABS32 relocation referencing the
9746	 beginning of the PLT for this GOT entry.  */
9747      rel.r_offset = got_address;
9748      rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9749      rel.r_addend = 0;
9750      SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9751    }
9752  else if (htab->root.target_os == is_nacl)
9753    {
9754      /* Calculate the displacement between the PLT slot and the
9755	 common tail that's part of the special initial PLT slot.  */
9756      int32_t tail_displacement
9757	= ((splt->output_section->vma + splt->output_offset
9758	    + ARM_NACL_PLT_TAIL_OFFSET)
9759	   - (plt_address + htab->plt_entry_size + 4));
9760      BFD_ASSERT ((tail_displacement & 3) == 0);
9761      tail_displacement >>= 2;
9762
9763      BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9764		  || (-tail_displacement & 0xff000000) == 0);
9765
9766      /* Calculate the displacement between the PLT slot and the entry
9767	 in the GOT.  The offset accounts for the value produced by
9768	 adding to pc in the penultimate instruction of the PLT stub.  */
9769      got_displacement = (got_address
9770			  - (plt_address + htab->plt_entry_size));
9771
9772      /* NaCl does not support interworking at all.  */
9773      BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9774
9775      put_arm_insn (htab, output_bfd,
9776		    elf32_arm_nacl_plt_entry[0]
9777		    | arm_movw_immediate (got_displacement),
9778		    ptr + 0);
9779      put_arm_insn (htab, output_bfd,
9780		    elf32_arm_nacl_plt_entry[1]
9781		    | arm_movt_immediate (got_displacement),
9782		    ptr + 4);
9783      put_arm_insn (htab, output_bfd,
9784		    elf32_arm_nacl_plt_entry[2],
9785		    ptr + 8);
9786      put_arm_insn (htab, output_bfd,
9787		    elf32_arm_nacl_plt_entry[3]
9788		    | (tail_displacement & 0x00ffffff),
9789		    ptr + 12);
9790    }
9791  else if (htab->fdpic_p)
9792    {
9793      const bfd_vma *plt_entry = using_thumb_only (htab)
9794	? elf32_arm_fdpic_thumb_plt_entry
9795	: elf32_arm_fdpic_plt_entry;
9796
9797      /* Fill-up Thumb stub if needed.  */
9798      if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9799	{
9800	  put_thumb_insn (htab, output_bfd,
9801			  elf32_arm_plt_thumb_stub[0], ptr - 4);
9802	  put_thumb_insn (htab, output_bfd,
9803			  elf32_arm_plt_thumb_stub[1], ptr - 2);
9804	}
9805      /* As we are using 32 bit instructions even for the Thumb
9806	 version, we have to use 'put_arm_insn' instead of
9807	 'put_thumb_insn'.  */
9808      put_arm_insn (htab, output_bfd, plt_entry[0], ptr + 0);
9809      put_arm_insn (htab, output_bfd, plt_entry[1], ptr + 4);
9810      put_arm_insn (htab, output_bfd, plt_entry[2], ptr + 8);
9811      put_arm_insn (htab, output_bfd, plt_entry[3], ptr + 12);
9812      bfd_put_32 (output_bfd, got_offset, ptr + 16);
9813
9814      if (!(info->flags & DF_BIND_NOW))
9815	{
9816	  /* funcdesc_value_reloc_offset.  */
9817	  bfd_put_32 (output_bfd,
9818		      htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9819		      ptr + 20);
9820	  put_arm_insn (htab, output_bfd, plt_entry[6], ptr + 24);
9821	  put_arm_insn (htab, output_bfd, plt_entry[7], ptr + 28);
9822	  put_arm_insn (htab, output_bfd, plt_entry[8], ptr + 32);
9823	  put_arm_insn (htab, output_bfd, plt_entry[9], ptr + 36);
9824	}
9825    }
9826  else if (using_thumb_only (htab))
9827    {
9828      /* PR ld/16017: Generate thumb only PLT entries.  */
9829      if (!using_thumb2 (htab))
9830	{
9831	  /* FIXME: We ought to be able to generate thumb-1 PLT
9832	     instructions...  */
9833	  _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9834			      output_bfd);
9835	  return false;
9836	}
9837
9838      /* Calculate the displacement between the PLT slot and the entry in
9839	 the GOT.  The 12-byte offset accounts for the value produced by
9840	 adding to pc in the 3rd instruction of the PLT stub.  */
9841      got_displacement = got_address - (plt_address + 12);
9842
9843      /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9844	 instead of 'put_thumb_insn'.  */
9845      put_arm_insn (htab, output_bfd,
9846		    elf32_thumb2_plt_entry[0]
9847		    | ((got_displacement & 0x000000ff) << 16)
9848		    | ((got_displacement & 0x00000700) << 20)
9849		    | ((got_displacement & 0x00000800) >>  1)
9850		    | ((got_displacement & 0x0000f000) >> 12),
9851		    ptr + 0);
9852      put_arm_insn (htab, output_bfd,
9853		    elf32_thumb2_plt_entry[1]
9854		    | ((got_displacement & 0x00ff0000)      )
9855		    | ((got_displacement & 0x07000000) <<  4)
9856		    | ((got_displacement & 0x08000000) >> 17)
9857		    | ((got_displacement & 0xf0000000) >> 28),
9858		    ptr + 4);
9859      put_arm_insn (htab, output_bfd,
9860		    elf32_thumb2_plt_entry[2],
9861		    ptr + 8);
9862      put_arm_insn (htab, output_bfd,
9863		    elf32_thumb2_plt_entry[3],
9864		    ptr + 12);
9865    }
9866  else
9867    {
9868      /* Calculate the displacement between the PLT slot and the
9869	 entry in the GOT.  The eight-byte offset accounts for the
9870	 value produced by adding to pc in the first instruction
9871	 of the PLT stub.  */
9872      got_displacement = got_address - (plt_address + 8);
9873
9874      if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9875	{
9876	  put_thumb_insn (htab, output_bfd,
9877			  elf32_arm_plt_thumb_stub[0], ptr - 4);
9878	  put_thumb_insn (htab, output_bfd,
9879			  elf32_arm_plt_thumb_stub[1], ptr - 2);
9880	}
9881
9882      if (!elf32_arm_use_long_plt_entry)
9883	{
9884	  BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9885
9886	  put_arm_insn (htab, output_bfd,
9887			elf32_arm_plt_entry_short[0]
9888			| ((got_displacement & 0x0ff00000) >> 20),
9889			ptr + 0);
9890	  put_arm_insn (htab, output_bfd,
9891			elf32_arm_plt_entry_short[1]
9892			| ((got_displacement & 0x000ff000) >> 12),
9893			ptr+ 4);
9894	  put_arm_insn (htab, output_bfd,
9895			elf32_arm_plt_entry_short[2]
9896			| (got_displacement & 0x00000fff),
9897			ptr + 8);
9898#ifdef FOUR_WORD_PLT
9899	  bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9900#endif
9901	}
9902      else
9903	{
9904	  put_arm_insn (htab, output_bfd,
9905			elf32_arm_plt_entry_long[0]
9906			| ((got_displacement & 0xf0000000) >> 28),
9907			ptr + 0);
9908	  put_arm_insn (htab, output_bfd,
9909			elf32_arm_plt_entry_long[1]
9910			| ((got_displacement & 0x0ff00000) >> 20),
9911			ptr + 4);
9912	  put_arm_insn (htab, output_bfd,
9913			elf32_arm_plt_entry_long[2]
9914			| ((got_displacement & 0x000ff000) >> 12),
9915			ptr+ 8);
9916	  put_arm_insn (htab, output_bfd,
9917			elf32_arm_plt_entry_long[3]
9918			| (got_displacement & 0x00000fff),
9919			ptr + 12);
9920	}
9921    }
9922
9923  /* Fill in the entry in the .rel(a).(i)plt section.  */
9924  rel.r_offset = got_address;
9925  rel.r_addend = 0;
9926  if (dynindx == -1)
9927    {
9928      /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9929	 The dynamic linker or static executable then calls SYM_VALUE
9930	 to determine the correct run-time value of the .igot.plt entry.  */
9931      rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9932      initial_got_entry = sym_value;
9933    }
9934  else
9935    {
9936      /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9937	 used by PLT entry.  */
9938      if (htab->fdpic_p)
9939	{
9940	  rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9941	  initial_got_entry = 0;
9942	}
9943      else
9944	{
9945	  rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9946	  initial_got_entry = (splt->output_section->vma
9947			       + splt->output_offset);
9948
9949	  /* PR ld/16017
9950	     When thumb only we need to set the LSB for any address that
9951	     will be used with an interworking branch instruction.  */
9952	  if (using_thumb_only (htab))
9953	    initial_got_entry |= 1;
9954	}
9955    }
9956
9957  /* Fill in the entry in the global offset table.  */
9958  bfd_put_32 (output_bfd, initial_got_entry,
9959	      sgot->contents + got_offset);
9960
9961  if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
9962    {
9963      /* Setup initial funcdesc value.  */
9964      /* FIXME: we don't support lazy binding because there is a
9965	 race condition between both words getting written and
9966	 some other thread attempting to read them. The ARM
9967	 architecture does not have an atomic 64 bit load/store
9968	 instruction that could be used to prevent it; it is
9969	 recommended that threaded FDPIC applications run with the
9970	 LD_BIND_NOW environment variable set.  */
9971      bfd_put_32 (output_bfd, plt_address + 0x18,
9972		  sgot->contents + got_offset);
9973      bfd_put_32 (output_bfd, -1 /*TODO*/,
9974		  sgot->contents + got_offset + 4);
9975    }
9976
9977  if (dynindx == -1)
9978    elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
9979  else
9980    {
9981      if (htab->fdpic_p)
9982	{
9983	  /* For FDPIC we put PLT relocationss into .rel.got when not
9984	     lazy binding otherwise we put them in .rel.plt.  For now,
9985	     we don't support lazy binding so put it in .rel.got.  */
9986	  if (info->flags & DF_BIND_NOW)
9987	    elf32_arm_add_dynreloc (output_bfd, info, htab->root.srelgot, &rel);
9988	  else
9989	    elf32_arm_add_dynreloc (output_bfd, info, htab->root.srelplt, &rel);
9990	}
9991      else
9992	{
9993	  loc = srel->contents + plt_index * RELOC_SIZE (htab);
9994	  SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9995	}
9996    }
9997
9998  return true;
9999}
10000
10001/* Some relocations map to different relocations depending on the
10002   target.  Return the real relocation.  */
10003
10004static int
10005arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
10006		     int r_type)
10007{
10008  switch (r_type)
10009    {
10010    case R_ARM_TARGET1:
10011      if (globals->target1_is_rel)
10012	return R_ARM_REL32;
10013      else
10014	return R_ARM_ABS32;
10015
10016    case R_ARM_TARGET2:
10017      return globals->target2_reloc;
10018
10019    default:
10020      return r_type;
10021    }
10022}
10023
10024/* Return the base VMA address which should be subtracted from real addresses
10025   when resolving @dtpoff relocation.
10026   This is PT_TLS segment p_vaddr.  */
10027
10028static bfd_vma
10029dtpoff_base (struct bfd_link_info *info)
10030{
10031  /* If tls_sec is NULL, we should have signalled an error already.  */
10032  if (elf_hash_table (info)->tls_sec == NULL)
10033    return 0;
10034  return elf_hash_table (info)->tls_sec->vma;
10035}
10036
10037/* Return the relocation value for @tpoff relocation
10038   if STT_TLS virtual address is ADDRESS.  */
10039
10040static bfd_vma
10041tpoff (struct bfd_link_info *info, bfd_vma address)
10042{
10043  struct elf_link_hash_table *htab = elf_hash_table (info);
10044  bfd_vma base;
10045
10046  /* If tls_sec is NULL, we should have signalled an error already.  */
10047  if (htab->tls_sec == NULL)
10048    return 0;
10049  base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
10050  return address - htab->tls_sec->vma + base;
10051}
10052
10053/* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10054   VALUE is the relocation value.  */
10055
10056static bfd_reloc_status_type
10057elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
10058{
10059  if (value > 0xfff)
10060    return bfd_reloc_overflow;
10061
10062  value |= bfd_get_32 (abfd, data) & 0xfffff000;
10063  bfd_put_32 (abfd, value, data);
10064  return bfd_reloc_ok;
10065}
10066
10067/* Handle TLS relaxations.  Relaxing is possible for symbols that use
10068   R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10069   R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10070
10071   Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10072   is to then call final_link_relocate.  Return other values in the
10073   case of error.
10074
10075   FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10076   the pre-relaxed code.  It would be nice if the relocs were updated
10077   to match the optimization.   */
10078
10079static bfd_reloc_status_type
10080elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
10081		     bfd *input_bfd, asection *input_sec, bfd_byte *contents,
10082		     Elf_Internal_Rela *rel, unsigned long is_local)
10083{
10084  unsigned long insn;
10085
10086  switch (ELF32_R_TYPE (rel->r_info))
10087    {
10088    default:
10089      return bfd_reloc_notsupported;
10090
10091    case R_ARM_TLS_GOTDESC:
10092      if (is_local)
10093	insn = 0;
10094      else
10095	{
10096	  insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10097	  if (insn & 1)
10098	    insn -= 5; /* THUMB */
10099	  else
10100	    insn -= 8; /* ARM */
10101	}
10102      bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10103      return bfd_reloc_continue;
10104
10105    case R_ARM_THM_TLS_DESCSEQ:
10106      /* Thumb insn.  */
10107      insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
10108      if ((insn & 0xff78) == 0x4478)	  /* add rx, pc */
10109	{
10110	  if (is_local)
10111	    /* nop */
10112	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10113	}
10114      else if ((insn & 0xffc0) == 0x6840)  /* ldr rx,[ry,#4] */
10115	{
10116	  if (is_local)
10117	    /* nop */
10118	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10119	  else
10120	    /* ldr rx,[ry] */
10121	    bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10122	}
10123      else if ((insn & 0xff87) == 0x4780)  /* blx rx */
10124	{
10125	  if (is_local)
10126	    /* nop */
10127	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10128	  else
10129	    /* mov r0, rx */
10130	    bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10131			contents + rel->r_offset);
10132	}
10133      else
10134	{
10135	  if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10136	    /* It's a 32 bit instruction, fetch the rest of it for
10137	       error generation.  */
10138	    insn = (insn << 16)
10139	      | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10140	  _bfd_error_handler
10141	    /* xgettext:c-format */
10142	    (_("%pB(%pA+%#" PRIx64 "): "
10143	       "unexpected %s instruction '%#lx' in TLS trampoline"),
10144	     input_bfd, input_sec, (uint64_t) rel->r_offset,
10145	     "Thumb", insn);
10146	  return bfd_reloc_notsupported;
10147	}
10148      break;
10149
10150    case R_ARM_TLS_DESCSEQ:
10151      /* arm insn.  */
10152      insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10153      if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10154	{
10155	  if (is_local)
10156	    /* mov rx, ry */
10157	    bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10158			contents + rel->r_offset);
10159	}
10160      else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10161	{
10162	  if (is_local)
10163	    /* nop */
10164	    bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10165	  else
10166	    /* ldr rx,[ry] */
10167	    bfd_put_32 (input_bfd, insn & 0xfffff000,
10168			contents + rel->r_offset);
10169	}
10170      else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10171	{
10172	  if (is_local)
10173	    /* nop */
10174	    bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10175	  else
10176	    /* mov r0, rx */
10177	    bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10178			contents + rel->r_offset);
10179	}
10180      else
10181	{
10182	  _bfd_error_handler
10183	    /* xgettext:c-format */
10184	    (_("%pB(%pA+%#" PRIx64 "): "
10185	       "unexpected %s instruction '%#lx' in TLS trampoline"),
10186	     input_bfd, input_sec, (uint64_t) rel->r_offset,
10187	     "ARM", insn);
10188	  return bfd_reloc_notsupported;
10189	}
10190      break;
10191
10192    case R_ARM_TLS_CALL:
10193      /* GD->IE relaxation, turn the instruction into 'nop' or
10194	 'ldr r0, [pc,r0]'  */
10195      insn = is_local ? 0xe1a00000 : 0xe79f0000;
10196      bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10197      break;
10198
10199    case R_ARM_THM_TLS_CALL:
10200      /* GD->IE relaxation.  */
10201      if (!is_local)
10202	/* add r0,pc; ldr r0, [r0]  */
10203	insn = 0x44786800;
10204      else if (using_thumb2 (globals))
10205	/* nop.w */
10206	insn = 0xf3af8000;
10207      else
10208	/* nop; nop */
10209	insn = 0xbf00bf00;
10210
10211      bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10212      bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10213      break;
10214    }
10215  return bfd_reloc_ok;
10216}
10217
10218/* For a given value of n, calculate the value of G_n as required to
10219   deal with group relocations.  We return it in the form of an
10220   encoded constant-and-rotation, together with the final residual.  If n is
10221   specified as less than zero, then final_residual is filled with the
10222   input value and no further action is performed.  */
10223
10224static bfd_vma
10225calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10226{
10227  int current_n;
10228  bfd_vma g_n;
10229  bfd_vma encoded_g_n = 0;
10230  bfd_vma residual = value; /* Also known as Y_n.  */
10231
10232  for (current_n = 0; current_n <= n; current_n++)
10233    {
10234      int shift;
10235
10236      /* Calculate which part of the value to mask.  */
10237      if (residual == 0)
10238	shift = 0;
10239      else
10240	{
10241	  int msb;
10242
10243	  /* Determine the most significant bit in the residual and
10244	     align the resulting value to a 2-bit boundary.  */
10245	  for (msb = 30; msb >= 0; msb -= 2)
10246	    if (residual & (3u << msb))
10247	      break;
10248
10249	  /* The desired shift is now (msb - 6), or zero, whichever
10250	     is the greater.  */
10251	  shift = msb - 6;
10252	  if (shift < 0)
10253	    shift = 0;
10254	}
10255
10256      /* Calculate g_n in 32-bit as well as encoded constant+rotation form.  */
10257      g_n = residual & (0xff << shift);
10258      encoded_g_n = (g_n >> shift)
10259		    | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10260
10261      /* Calculate the residual for the next time around.  */
10262      residual &= ~g_n;
10263    }
10264
10265  *final_residual = residual;
10266
10267  return encoded_g_n;
10268}
10269
10270/* Given an ARM instruction, determine whether it is an ADD or a SUB.
10271   Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise.  */
10272
10273static int
10274identify_add_or_sub (bfd_vma insn)
10275{
10276  int opcode = insn & 0x1e00000;
10277
10278  if (opcode == 1 << 23) /* ADD */
10279    return 1;
10280
10281  if (opcode == 1 << 22) /* SUB */
10282    return -1;
10283
10284  return 0;
10285}
10286
10287/* Perform a relocation as part of a final link.  */
10288
10289static bfd_reloc_status_type
10290elf32_arm_final_link_relocate (reloc_howto_type *	    howto,
10291			       bfd *			    input_bfd,
10292			       bfd *			    output_bfd,
10293			       asection *		    input_section,
10294			       bfd_byte *		    contents,
10295			       Elf_Internal_Rela *	    rel,
10296			       bfd_vma			    value,
10297			       struct bfd_link_info *	    info,
10298			       asection *		    sym_sec,
10299			       const char *		    sym_name,
10300			       unsigned char		    st_type,
10301			       enum arm_st_branch_type	    branch_type,
10302			       struct elf_link_hash_entry * h,
10303			       bool *			    unresolved_reloc_p,
10304			       char **			    error_message)
10305{
10306  unsigned long			r_type = howto->type;
10307  unsigned long			r_symndx;
10308  bfd_byte *			hit_data = contents + rel->r_offset;
10309  bfd_vma *			local_got_offsets;
10310  bfd_vma *			local_tlsdesc_gotents;
10311  asection *			sgot;
10312  asection *			splt;
10313  asection *			sreloc = NULL;
10314  asection *			srelgot;
10315  bfd_vma			addend;
10316  bfd_signed_vma		signed_addend;
10317  unsigned char			dynreloc_st_type;
10318  bfd_vma			dynreloc_value;
10319  struct elf32_arm_link_hash_table * globals;
10320  struct elf32_arm_link_hash_entry *eh;
10321  union gotplt_union	       *root_plt;
10322  struct arm_plt_info	       *arm_plt;
10323  bfd_vma			plt_offset;
10324  bfd_vma			gotplt_offset;
10325  bool				has_iplt_entry;
10326  bool				resolved_to_zero;
10327
10328  globals = elf32_arm_hash_table (info);
10329  if (globals == NULL)
10330    return bfd_reloc_notsupported;
10331
10332  BFD_ASSERT (is_arm_elf (input_bfd));
10333  BFD_ASSERT (howto != NULL);
10334
10335  /* Some relocation types map to different relocations depending on the
10336     target.  We pick the right one here.  */
10337  r_type = arm_real_reloc_type (globals, r_type);
10338
10339  /* It is possible to have linker relaxations on some TLS access
10340     models.  Update our information here.  */
10341  r_type = elf32_arm_tls_transition (info, r_type, h);
10342
10343  if (r_type != howto->type)
10344    howto = elf32_arm_howto_from_type (r_type);
10345
10346  eh = (struct elf32_arm_link_hash_entry *) h;
10347  sgot = globals->root.sgot;
10348  local_got_offsets = elf_local_got_offsets (input_bfd);
10349  local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10350
10351  if (globals->root.dynamic_sections_created)
10352    srelgot = globals->root.srelgot;
10353  else
10354    srelgot = NULL;
10355
10356  r_symndx = ELF32_R_SYM (rel->r_info);
10357
10358  if (globals->use_rel)
10359    {
10360      bfd_vma sign;
10361
10362      switch (bfd_get_reloc_size (howto))
10363	{
10364	case 1: addend = bfd_get_8 (input_bfd, hit_data); break;
10365	case 2: addend = bfd_get_16 (input_bfd, hit_data); break;
10366	case 4: addend = bfd_get_32 (input_bfd, hit_data); break;
10367	default: addend = 0; break;
10368	}
10369      /* Note: the addend and signed_addend calculated here are
10370	 incorrect for any split field.  */
10371      addend &= howto->src_mask;
10372      sign = howto->src_mask & ~(howto->src_mask >> 1);
10373      signed_addend = (addend ^ sign) - sign;
10374      signed_addend = (bfd_vma) signed_addend << howto->rightshift;
10375      addend <<= howto->rightshift;
10376    }
10377  else
10378    addend = signed_addend = rel->r_addend;
10379
10380  /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10381     are resolving a function call relocation.  */
10382  if (using_thumb_only (globals)
10383      && (r_type == R_ARM_THM_CALL
10384	  || r_type == R_ARM_THM_JUMP24)
10385      && branch_type == ST_BRANCH_TO_ARM)
10386    branch_type = ST_BRANCH_TO_THUMB;
10387
10388  /* Record the symbol information that should be used in dynamic
10389     relocations.  */
10390  dynreloc_st_type = st_type;
10391  dynreloc_value = value;
10392  if (branch_type == ST_BRANCH_TO_THUMB)
10393    dynreloc_value |= 1;
10394
10395  /* Find out whether the symbol has a PLT.  Set ST_VALUE, BRANCH_TYPE and
10396     VALUE appropriately for relocations that we resolve at link time.  */
10397  has_iplt_entry = false;
10398  if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10399			      &arm_plt)
10400      && root_plt->offset != (bfd_vma) -1)
10401    {
10402      plt_offset = root_plt->offset;
10403      gotplt_offset = arm_plt->got_offset;
10404
10405      if (h == NULL || eh->is_iplt)
10406	{
10407	  has_iplt_entry = true;
10408	  splt = globals->root.iplt;
10409
10410	  /* Populate .iplt entries here, because not all of them will
10411	     be seen by finish_dynamic_symbol.  The lower bit is set if
10412	     we have already populated the entry.  */
10413	  if (plt_offset & 1)
10414	    plt_offset--;
10415	  else
10416	    {
10417	      if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10418						-1, dynreloc_value))
10419		root_plt->offset |= 1;
10420	      else
10421		return bfd_reloc_notsupported;
10422	    }
10423
10424	  /* Static relocations always resolve to the .iplt entry.  */
10425	  st_type = STT_FUNC;
10426	  value = (splt->output_section->vma
10427		   + splt->output_offset
10428		   + plt_offset);
10429	  branch_type = ST_BRANCH_TO_ARM;
10430
10431	  /* If there are non-call relocations that resolve to the .iplt
10432	     entry, then all dynamic ones must too.  */
10433	  if (arm_plt->noncall_refcount != 0)
10434	    {
10435	      dynreloc_st_type = st_type;
10436	      dynreloc_value = value;
10437	    }
10438	}
10439      else
10440	/* We populate the .plt entry in finish_dynamic_symbol.  */
10441	splt = globals->root.splt;
10442    }
10443  else
10444    {
10445      splt = NULL;
10446      plt_offset = (bfd_vma) -1;
10447      gotplt_offset = (bfd_vma) -1;
10448    }
10449
10450  resolved_to_zero = (h != NULL
10451		      && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10452
10453  switch (r_type)
10454    {
10455    case R_ARM_NONE:
10456      /* We don't need to find a value for this symbol.  It's just a
10457	 marker.  */
10458      *unresolved_reloc_p = false;
10459      return bfd_reloc_ok;
10460
10461    case R_ARM_ABS12:
10462      if (globals->root.target_os != is_vxworks)
10463	return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10464      /* Fall through.  */
10465
10466    case R_ARM_PC24:
10467    case R_ARM_ABS32:
10468    case R_ARM_ABS32_NOI:
10469    case R_ARM_REL32:
10470    case R_ARM_REL32_NOI:
10471    case R_ARM_CALL:
10472    case R_ARM_JUMP24:
10473    case R_ARM_XPC25:
10474    case R_ARM_PREL31:
10475    case R_ARM_PLT32:
10476      /* Handle relocations which should use the PLT entry.  ABS32/REL32
10477	 will use the symbol's value, which may point to a PLT entry, but we
10478	 don't need to handle that here.  If we created a PLT entry, all
10479	 branches in this object should go to it, except if the PLT is too
10480	 far away, in which case a long branch stub should be inserted.  */
10481      if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10482	   && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10483	   && r_type != R_ARM_CALL
10484	   && r_type != R_ARM_JUMP24
10485	   && r_type != R_ARM_PLT32)
10486	  && plt_offset != (bfd_vma) -1)
10487	{
10488	  /* If we've created a .plt section, and assigned a PLT entry
10489	     to this function, it must either be a STT_GNU_IFUNC reference
10490	     or not be known to bind locally.  In other cases, we should
10491	     have cleared the PLT entry by now.  */
10492	  BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10493
10494	  value = (splt->output_section->vma
10495		   + splt->output_offset
10496		   + plt_offset);
10497	  *unresolved_reloc_p = false;
10498	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
10499					   contents, rel->r_offset, value,
10500					   rel->r_addend);
10501	}
10502
10503      /* When generating a shared object or relocatable executable, these
10504	 relocations are copied into the output file to be resolved at
10505	 run time.  */
10506      if ((bfd_link_pic (info)
10507	   || globals->root.is_relocatable_executable
10508	   || globals->fdpic_p)
10509	  && (input_section->flags & SEC_ALLOC)
10510	  && !(globals->root.target_os == is_vxworks
10511	       && strcmp (input_section->output_section->name,
10512			  ".tls_vars") == 0)
10513	  && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10514	      || !SYMBOL_CALLS_LOCAL (info, h))
10515	  && !(input_bfd == globals->stub_bfd
10516	       && strstr (input_section->name, STUB_SUFFIX))
10517	  && (h == NULL
10518	      || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10519		  && !resolved_to_zero)
10520	      || h->root.type != bfd_link_hash_undefweak)
10521	  && r_type != R_ARM_PC24
10522	  && r_type != R_ARM_CALL
10523	  && r_type != R_ARM_JUMP24
10524	  && r_type != R_ARM_PREL31
10525	  && r_type != R_ARM_PLT32)
10526	{
10527	  Elf_Internal_Rela outrel;
10528	  bool skip, relocate;
10529	  int isrofixup = 0;
10530
10531	  if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10532	      && !h->def_regular)
10533	    {
10534	      char *v = _("shared object");
10535
10536	      if (bfd_link_executable (info))
10537		v = _("PIE executable");
10538
10539	      _bfd_error_handler
10540		(_("%pB: relocation %s against external or undefined symbol `%s'"
10541		   " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10542		 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10543	      return bfd_reloc_notsupported;
10544	    }
10545
10546	  *unresolved_reloc_p = false;
10547
10548	  if (sreloc == NULL && globals->root.dynamic_sections_created)
10549	    {
10550	      sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10551							   ! globals->use_rel);
10552
10553	      if (sreloc == NULL)
10554		return bfd_reloc_notsupported;
10555	    }
10556
10557	  skip = false;
10558	  relocate = false;
10559
10560	  outrel.r_addend = addend;
10561	  outrel.r_offset =
10562	    _bfd_elf_section_offset (output_bfd, info, input_section,
10563				     rel->r_offset);
10564	  if (outrel.r_offset == (bfd_vma) -1)
10565	    skip = true;
10566	  else if (outrel.r_offset == (bfd_vma) -2)
10567	    skip = true, relocate = true;
10568	  outrel.r_offset += (input_section->output_section->vma
10569			      + input_section->output_offset);
10570
10571	  if (skip)
10572	    memset (&outrel, 0, sizeof outrel);
10573	  else if (h != NULL
10574		   && h->dynindx != -1
10575		   && (!bfd_link_pic (info)
10576		       || !(bfd_link_pie (info)
10577			    || SYMBOLIC_BIND (info, h))
10578		       || !h->def_regular))
10579	    outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10580	  else
10581	    {
10582	      int symbol;
10583
10584	      /* This symbol is local, or marked to become local.  */
10585	      BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10586			  || (globals->fdpic_p && !bfd_link_pic (info)));
10587	      /* On SVR4-ish systems, the dynamic loader cannot
10588		 relocate the text and data segments independently,
10589		 so the symbol does not matter.  */
10590	      symbol = 0;
10591	      if (dynreloc_st_type == STT_GNU_IFUNC)
10592		/* We have an STT_GNU_IFUNC symbol that doesn't resolve
10593		   to the .iplt entry.  Instead, every non-call reference
10594		   must use an R_ARM_IRELATIVE relocation to obtain the
10595		   correct run-time address.  */
10596		outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10597	      else if (globals->fdpic_p && !bfd_link_pic (info))
10598		isrofixup = 1;
10599	      else
10600		outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10601	      if (globals->use_rel)
10602		relocate = true;
10603	      else
10604		outrel.r_addend += dynreloc_value;
10605	    }
10606
10607	  if (isrofixup)
10608	    arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
10609	  else
10610	    elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10611
10612	  /* If this reloc is against an external symbol, we do not want to
10613	     fiddle with the addend.  Otherwise, we need to include the symbol
10614	     value so that it becomes an addend for the dynamic reloc.  */
10615	  if (! relocate)
10616	    return bfd_reloc_ok;
10617
10618	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
10619					   contents, rel->r_offset,
10620					   dynreloc_value, (bfd_vma) 0);
10621	}
10622      else switch (r_type)
10623	{
10624	case R_ARM_ABS12:
10625	  return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10626
10627	case R_ARM_XPC25:	  /* Arm BLX instruction.  */
10628	case R_ARM_CALL:
10629	case R_ARM_JUMP24:
10630	case R_ARM_PC24:	  /* Arm B/BL instruction.  */
10631	case R_ARM_PLT32:
10632	  {
10633	  struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10634
10635	  if (r_type == R_ARM_XPC25)
10636	    {
10637	      /* Check for Arm calling Arm function.  */
10638	      /* FIXME: Should we translate the instruction into a BL
10639		 instruction instead ?  */
10640	      if (branch_type != ST_BRANCH_TO_THUMB)
10641		_bfd_error_handler
10642		  (_("\%pB: warning: %s BLX instruction targets"
10643		     " %s function '%s'"),
10644		   input_bfd, "ARM",
10645		   "ARM", h ? h->root.root.string : "(local)");
10646	    }
10647	  else if (r_type == R_ARM_PC24)
10648	    {
10649	      /* Check for Arm calling Thumb function.  */
10650	      if (branch_type == ST_BRANCH_TO_THUMB)
10651		{
10652		  if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10653					       output_bfd, input_section,
10654					       hit_data, sym_sec, rel->r_offset,
10655					       signed_addend, value,
10656					       error_message))
10657		    return bfd_reloc_ok;
10658		  else
10659		    return bfd_reloc_dangerous;
10660		}
10661	    }
10662
10663	  /* Check if a stub has to be inserted because the
10664	     destination is too far or we are changing mode.  */
10665	  if (   r_type == R_ARM_CALL
10666	      || r_type == R_ARM_JUMP24
10667	      || r_type == R_ARM_PLT32)
10668	    {
10669	      enum elf32_arm_stub_type stub_type = arm_stub_none;
10670	      struct elf32_arm_link_hash_entry *hash;
10671
10672	      hash = (struct elf32_arm_link_hash_entry *) h;
10673	      stub_type = arm_type_of_stub (info, input_section, rel,
10674					    st_type, &branch_type,
10675					    hash, value, sym_sec,
10676					    input_bfd, sym_name);
10677
10678	      if (stub_type != arm_stub_none)
10679		{
10680		  /* The target is out of reach, so redirect the
10681		     branch to the local stub for this function.  */
10682		  stub_entry = elf32_arm_get_stub_entry (input_section,
10683							 sym_sec, h,
10684							 rel, globals,
10685							 stub_type);
10686		  {
10687		    if (stub_entry != NULL)
10688		      value = (stub_entry->stub_offset
10689			       + stub_entry->stub_sec->output_offset
10690			       + stub_entry->stub_sec->output_section->vma);
10691
10692		    if (plt_offset != (bfd_vma) -1)
10693		      *unresolved_reloc_p = false;
10694		  }
10695		}
10696	      else
10697		{
10698		  /* If the call goes through a PLT entry, make sure to
10699		     check distance to the right destination address.  */
10700		  if (plt_offset != (bfd_vma) -1)
10701		    {
10702		      value = (splt->output_section->vma
10703			       + splt->output_offset
10704			       + plt_offset);
10705		      *unresolved_reloc_p = false;
10706		      /* The PLT entry is in ARM mode, regardless of the
10707			 target function.  */
10708		      branch_type = ST_BRANCH_TO_ARM;
10709		    }
10710		}
10711	    }
10712
10713	  /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10714	     where:
10715	      S is the address of the symbol in the relocation.
10716	      P is address of the instruction being relocated.
10717	      A is the addend (extracted from the instruction) in bytes.
10718
10719	     S is held in 'value'.
10720	     P is the base address of the section containing the
10721	       instruction plus the offset of the reloc into that
10722	       section, ie:
10723		 (input_section->output_section->vma +
10724		  input_section->output_offset +
10725		  rel->r_offset).
10726	     A is the addend, converted into bytes, ie:
10727		 (signed_addend * 4)
10728
10729	     Note: None of these operations have knowledge of the pipeline
10730	     size of the processor, thus it is up to the assembler to
10731	     encode this information into the addend.  */
10732	  value -= (input_section->output_section->vma
10733		    + input_section->output_offset);
10734	  value -= rel->r_offset;
10735	  value += signed_addend;
10736
10737	  signed_addend = value;
10738	  signed_addend >>= howto->rightshift;
10739
10740	  /* A branch to an undefined weak symbol is turned into a jump to
10741	     the next instruction unless a PLT entry will be created.
10742	     Do the same for local undefined symbols (but not for STN_UNDEF).
10743	     The jump to the next instruction is optimized as a NOP depending
10744	     on the architecture.  */
10745	  if (h ? (h->root.type == bfd_link_hash_undefweak
10746		   && plt_offset == (bfd_vma) -1)
10747	      : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10748	    {
10749	      value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10750
10751	      if (arch_has_arm_nop (globals))
10752		value |= 0x0320f000;
10753	      else
10754		value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0.  */
10755	    }
10756	  else
10757	    {
10758	      /* Perform a signed range check.  */
10759	      if (   signed_addend >   ((bfd_signed_vma)  (howto->dst_mask >> 1))
10760		  || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10761		return bfd_reloc_overflow;
10762
10763	      addend = (value & 2);
10764
10765	      value = (signed_addend & howto->dst_mask)
10766		| (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10767
10768	      if (r_type == R_ARM_CALL)
10769		{
10770		  /* Set the H bit in the BLX instruction.  */
10771		  if (branch_type == ST_BRANCH_TO_THUMB)
10772		    {
10773		      if (addend)
10774			value |= (1 << 24);
10775		      else
10776			value &= ~(bfd_vma)(1 << 24);
10777		    }
10778
10779		  /* Select the correct instruction (BL or BLX).  */
10780		  /* Only if we are not handling a BL to a stub. In this
10781		     case, mode switching is performed by the stub.  */
10782		  if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10783		    value |= (1 << 28);
10784		  else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10785		    {
10786		      value &= ~(bfd_vma)(1 << 28);
10787		      value |= (1 << 24);
10788		    }
10789		}
10790	    }
10791	  }
10792	  break;
10793
10794	case R_ARM_ABS32:
10795	  value += addend;
10796	  if (branch_type == ST_BRANCH_TO_THUMB)
10797	    value |= 1;
10798	  break;
10799
10800	case R_ARM_ABS32_NOI:
10801	  value += addend;
10802	  break;
10803
10804	case R_ARM_REL32:
10805	  value += addend;
10806	  if (branch_type == ST_BRANCH_TO_THUMB)
10807	    value |= 1;
10808	  value -= (input_section->output_section->vma
10809		    + input_section->output_offset + rel->r_offset);
10810	  break;
10811
10812	case R_ARM_REL32_NOI:
10813	  value += addend;
10814	  value -= (input_section->output_section->vma
10815		    + input_section->output_offset + rel->r_offset);
10816	  break;
10817
10818	case R_ARM_PREL31:
10819	  value -= (input_section->output_section->vma
10820		    + input_section->output_offset + rel->r_offset);
10821	  value += signed_addend;
10822	  if (! h || h->root.type != bfd_link_hash_undefweak)
10823	    {
10824	      /* Check for overflow.  */
10825	      if ((value ^ (value >> 1)) & (1 << 30))
10826		return bfd_reloc_overflow;
10827	    }
10828	  value &= 0x7fffffff;
10829	  value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10830	  if (branch_type == ST_BRANCH_TO_THUMB)
10831	    value |= 1;
10832	  break;
10833	}
10834
10835      bfd_put_32 (input_bfd, value, hit_data);
10836      return bfd_reloc_ok;
10837
10838    case R_ARM_ABS8:
10839      value += addend;
10840
10841      /* There is no way to tell whether the user intended to use a signed or
10842	 unsigned addend.  When checking for overflow we accept either,
10843	 as specified by the AAELF.  */
10844      if ((long) value > 0xff || (long) value < -0x80)
10845	return bfd_reloc_overflow;
10846
10847      bfd_put_8 (input_bfd, value, hit_data);
10848      return bfd_reloc_ok;
10849
10850    case R_ARM_ABS16:
10851      value += addend;
10852
10853      /* See comment for R_ARM_ABS8.  */
10854      if ((long) value > 0xffff || (long) value < -0x8000)
10855	return bfd_reloc_overflow;
10856
10857      bfd_put_16 (input_bfd, value, hit_data);
10858      return bfd_reloc_ok;
10859
10860    case R_ARM_THM_ABS5:
10861      /* Support ldr and str instructions for the thumb.  */
10862      if (globals->use_rel)
10863	{
10864	  /* Need to refetch addend.  */
10865	  addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10866	  /* ??? Need to determine shift amount from operand size.  */
10867	  addend >>= howto->rightshift;
10868	}
10869      value += addend;
10870
10871      /* ??? Isn't value unsigned?  */
10872      if ((long) value > 0x1f || (long) value < -0x10)
10873	return bfd_reloc_overflow;
10874
10875      /* ??? Value needs to be properly shifted into place first.  */
10876      value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10877      bfd_put_16 (input_bfd, value, hit_data);
10878      return bfd_reloc_ok;
10879
10880    case R_ARM_THM_ALU_PREL_11_0:
10881      /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw).  */
10882      {
10883	bfd_vma insn;
10884	bfd_signed_vma relocation;
10885
10886	insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10887	     | bfd_get_16 (input_bfd, hit_data + 2);
10888
10889	if (globals->use_rel)
10890	  {
10891	    signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10892			  | ((insn & (1 << 26)) >> 15);
10893	    if (insn & 0xf00000)
10894	      signed_addend = -signed_addend;
10895	  }
10896
10897	relocation = value + signed_addend;
10898	relocation -= Pa (input_section->output_section->vma
10899			  + input_section->output_offset
10900			  + rel->r_offset);
10901
10902	/* PR 21523: Use an absolute value.  The user of this reloc will
10903	   have already selected an ADD or SUB insn appropriately.  */
10904	value = llabs (relocation);
10905
10906	if (value >= 0x1000)
10907	  return bfd_reloc_overflow;
10908
10909	/* Destination is Thumb.  Force bit 0 to 1 to reflect this.  */
10910	if (branch_type == ST_BRANCH_TO_THUMB)
10911	  value |= 1;
10912
10913	insn = (insn & 0xfb0f8f00) | (value & 0xff)
10914	     | ((value & 0x700) << 4)
10915	     | ((value & 0x800) << 15);
10916	if (relocation < 0)
10917	  insn |= 0xa00000;
10918
10919	bfd_put_16 (input_bfd, insn >> 16, hit_data);
10920	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10921
10922	return bfd_reloc_ok;
10923      }
10924
10925    case R_ARM_THM_PC8:
10926      /* PR 10073:  This reloc is not generated by the GNU toolchain,
10927	 but it is supported for compatibility with third party libraries
10928	 generated by other compilers, specifically the ARM/IAR.  */
10929      {
10930	bfd_vma insn;
10931	bfd_signed_vma relocation;
10932
10933	insn = bfd_get_16 (input_bfd, hit_data);
10934
10935	if (globals->use_rel)
10936	  addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
10937
10938	relocation = value + addend;
10939	relocation -= Pa (input_section->output_section->vma
10940			  + input_section->output_offset
10941			  + rel->r_offset);
10942
10943	value = relocation;
10944
10945	/* We do not check for overflow of this reloc.  Although strictly
10946	   speaking this is incorrect, it appears to be necessary in order
10947	   to work with IAR generated relocs.  Since GCC and GAS do not
10948	   generate R_ARM_THM_PC8 relocs, the lack of a check should not be
10949	   a problem for them.  */
10950	value &= 0x3fc;
10951
10952	insn = (insn & 0xff00) | (value >> 2);
10953
10954	bfd_put_16 (input_bfd, insn, hit_data);
10955
10956	return bfd_reloc_ok;
10957      }
10958
10959    case R_ARM_THM_PC12:
10960      /* Corresponds to: ldr.w reg, [pc, #offset].  */
10961      {
10962	bfd_vma insn;
10963	bfd_signed_vma relocation;
10964
10965	insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10966	     | bfd_get_16 (input_bfd, hit_data + 2);
10967
10968	if (globals->use_rel)
10969	  {
10970	    signed_addend = insn & 0xfff;
10971	    if (!(insn & (1 << 23)))
10972	      signed_addend = -signed_addend;
10973	  }
10974
10975	relocation = value + signed_addend;
10976	relocation -= Pa (input_section->output_section->vma
10977			  + input_section->output_offset
10978			  + rel->r_offset);
10979
10980	value = relocation;
10981
10982	if (value >= 0x1000)
10983	  return bfd_reloc_overflow;
10984
10985	insn = (insn & 0xff7ff000) | value;
10986	if (relocation >= 0)
10987	  insn |= (1 << 23);
10988
10989	bfd_put_16 (input_bfd, insn >> 16, hit_data);
10990	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10991
10992	return bfd_reloc_ok;
10993      }
10994
10995    case R_ARM_THM_XPC22:
10996    case R_ARM_THM_CALL:
10997    case R_ARM_THM_JUMP24:
10998      /* Thumb BL (branch long instruction).  */
10999      {
11000	bfd_vma relocation;
11001	bfd_vma reloc_sign;
11002	bool overflow = false;
11003	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11004	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11005	bfd_signed_vma reloc_signed_max;
11006	bfd_signed_vma reloc_signed_min;
11007	bfd_vma check;
11008	bfd_signed_vma signed_check;
11009	int bitsize;
11010	const int thumb2 = using_thumb2 (globals);
11011	const int thumb2_bl = using_thumb2_bl (globals);
11012
11013	/* A branch to an undefined weak symbol is turned into a jump to
11014	   the next instruction unless a PLT entry will be created.
11015	   The jump to the next instruction is optimized as a NOP.W for
11016	   Thumb-2 enabled architectures.  */
11017	if (h && h->root.type == bfd_link_hash_undefweak
11018	    && plt_offset == (bfd_vma) -1)
11019	  {
11020	    if (thumb2)
11021	      {
11022		bfd_put_16 (input_bfd, 0xf3af, hit_data);
11023		bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
11024	      }
11025	    else
11026	      {
11027		bfd_put_16 (input_bfd, 0xe000, hit_data);
11028		bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
11029	      }
11030	    return bfd_reloc_ok;
11031	  }
11032
11033	/* Fetch the addend.  We use the Thumb-2 encoding (backwards compatible
11034	   with Thumb-1) involving the J1 and J2 bits.  */
11035	if (globals->use_rel)
11036	  {
11037	    bfd_vma s = (upper_insn & (1 << 10)) >> 10;
11038	    bfd_vma upper = upper_insn & 0x3ff;
11039	    bfd_vma lower = lower_insn & 0x7ff;
11040	    bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
11041	    bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
11042	    bfd_vma i1 = j1 ^ s ? 0 : 1;
11043	    bfd_vma i2 = j2 ^ s ? 0 : 1;
11044
11045	    addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
11046	    /* Sign extend.  */
11047	    addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
11048
11049	    signed_addend = addend;
11050	  }
11051
11052	if (r_type == R_ARM_THM_XPC22)
11053	  {
11054	    /* Check for Thumb to Thumb call.  */
11055	    /* FIXME: Should we translate the instruction into a BL
11056	       instruction instead ?  */
11057	    if (branch_type == ST_BRANCH_TO_THUMB)
11058	      _bfd_error_handler
11059		(_("%pB: warning: %s BLX instruction targets"
11060		   " %s function '%s'"),
11061		 input_bfd, "Thumb",
11062		 "Thumb", h ? h->root.root.string : "(local)");
11063	  }
11064	else
11065	  {
11066	    /* If it is not a call to Thumb, assume call to Arm.
11067	       If it is a call relative to a section name, then it is not a
11068	       function call at all, but rather a long jump.  Calls through
11069	       the PLT do not require stubs.  */
11070	    if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
11071	      {
11072		if (globals->use_blx && r_type == R_ARM_THM_CALL)
11073		  {
11074		    /* Convert BL to BLX.  */
11075		    lower_insn = (lower_insn & ~0x1000) | 0x0800;
11076		  }
11077		else if ((   r_type != R_ARM_THM_CALL)
11078			 && (r_type != R_ARM_THM_JUMP24))
11079		  {
11080		    if (elf32_thumb_to_arm_stub
11081			(info, sym_name, input_bfd, output_bfd, input_section,
11082			 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11083			 error_message))
11084		      return bfd_reloc_ok;
11085		    else
11086		      return bfd_reloc_dangerous;
11087		  }
11088	      }
11089	    else if (branch_type == ST_BRANCH_TO_THUMB
11090		     && globals->use_blx
11091		     && r_type == R_ARM_THM_CALL)
11092	      {
11093		/* Make sure this is a BL.  */
11094		lower_insn |= 0x1800;
11095	      }
11096	  }
11097
11098	enum elf32_arm_stub_type stub_type = arm_stub_none;
11099	if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11100	  {
11101	    /* Check if a stub has to be inserted because the destination
11102	       is too far.  */
11103	    struct elf32_arm_stub_hash_entry *stub_entry;
11104	    struct elf32_arm_link_hash_entry *hash;
11105
11106	    hash = (struct elf32_arm_link_hash_entry *) h;
11107
11108	    stub_type = arm_type_of_stub (info, input_section, rel,
11109					  st_type, &branch_type,
11110					  hash, value, sym_sec,
11111					  input_bfd, sym_name);
11112
11113	    if (stub_type != arm_stub_none)
11114	      {
11115		/* The target is out of reach or we are changing modes, so
11116		   redirect the branch to the local stub for this
11117		   function.  */
11118		stub_entry = elf32_arm_get_stub_entry (input_section,
11119						       sym_sec, h,
11120						       rel, globals,
11121						       stub_type);
11122		if (stub_entry != NULL)
11123		  {
11124		    value = (stub_entry->stub_offset
11125			     + stub_entry->stub_sec->output_offset
11126			     + stub_entry->stub_sec->output_section->vma);
11127
11128		    if (plt_offset != (bfd_vma) -1)
11129		      *unresolved_reloc_p = false;
11130		  }
11131
11132		/* If this call becomes a call to Arm, force BLX.  */
11133		if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11134		  {
11135		    if ((stub_entry
11136			 && !arm_stub_is_thumb (stub_entry->stub_type))
11137			|| branch_type != ST_BRANCH_TO_THUMB)
11138		      lower_insn = (lower_insn & ~0x1000) | 0x0800;
11139		  }
11140	      }
11141	  }
11142
11143	/* Handle calls via the PLT.  */
11144	if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11145	  {
11146	    value = (splt->output_section->vma
11147		     + splt->output_offset
11148		     + plt_offset);
11149
11150	    if (globals->use_blx
11151		&& r_type == R_ARM_THM_CALL
11152		&& ! using_thumb_only (globals))
11153	      {
11154		/* If the Thumb BLX instruction is available, convert
11155		   the BL to a BLX instruction to call the ARM-mode
11156		   PLT entry.  */
11157		lower_insn = (lower_insn & ~0x1000) | 0x0800;
11158		branch_type = ST_BRANCH_TO_ARM;
11159	      }
11160	    else
11161	      {
11162		if (! using_thumb_only (globals))
11163		  /* Target the Thumb stub before the ARM PLT entry.  */
11164		  value -= PLT_THUMB_STUB_SIZE;
11165		branch_type = ST_BRANCH_TO_THUMB;
11166	      }
11167	    *unresolved_reloc_p = false;
11168	  }
11169
11170	relocation = value + signed_addend;
11171
11172	relocation -= (input_section->output_section->vma
11173		       + input_section->output_offset
11174		       + rel->r_offset);
11175
11176	check = relocation >> howto->rightshift;
11177
11178	/* If this is a signed value, the rightshift just dropped
11179	   leading 1 bits (assuming twos complement).  */
11180	if ((bfd_signed_vma) relocation >= 0)
11181	  signed_check = check;
11182	else
11183	  signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11184
11185	/* Calculate the permissable maximum and minimum values for
11186	   this relocation according to whether we're relocating for
11187	   Thumb-2 or not.  */
11188	bitsize = howto->bitsize;
11189	if (!thumb2_bl)
11190	  bitsize -= 2;
11191	reloc_signed_max = (1 << (bitsize - 1)) - 1;
11192	reloc_signed_min = ~reloc_signed_max;
11193
11194	/* Assumes two's complement.  */
11195	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11196	  overflow = true;
11197
11198	if ((lower_insn & 0x5000) == 0x4000)
11199	  /* For a BLX instruction, make sure that the relocation is rounded up
11200	     to a word boundary.  This follows the semantics of the instruction
11201	     which specifies that bit 1 of the target address will come from bit
11202	     1 of the base address.  */
11203	  relocation = (relocation + 2) & ~ 3;
11204
11205	/* Put RELOCATION back into the insn.  Assumes two's complement.
11206	   We use the Thumb-2 encoding, which is safe even if dealing with
11207	   a Thumb-1 instruction by virtue of our overflow check above.  */
11208	reloc_sign = (signed_check < 0) ? 1 : 0;
11209	upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11210		     | ((relocation >> 12) & 0x3ff)
11211		     | (reloc_sign << 10);
11212	lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11213		     | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11214		     | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11215		     | ((relocation >> 1) & 0x7ff);
11216
11217	/* Put the relocated value back in the object file:  */
11218	bfd_put_16 (input_bfd, upper_insn, hit_data);
11219	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11220
11221	return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11222      }
11223      break;
11224
11225    case R_ARM_THM_JUMP19:
11226      /* Thumb32 conditional branch instruction.  */
11227      {
11228	bfd_vma relocation;
11229	bool overflow = false;
11230	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11231	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11232	bfd_signed_vma reloc_signed_max = 0xffffe;
11233	bfd_signed_vma reloc_signed_min = -0x100000;
11234	bfd_signed_vma signed_check;
11235	enum elf32_arm_stub_type stub_type = arm_stub_none;
11236	struct elf32_arm_stub_hash_entry *stub_entry;
11237	struct elf32_arm_link_hash_entry *hash;
11238
11239	/* Need to refetch the addend, reconstruct the top three bits,
11240	   and squish the two 11 bit pieces together.  */
11241	if (globals->use_rel)
11242	  {
11243	    bfd_vma S     = (upper_insn & 0x0400) >> 10;
11244	    bfd_vma upper = (upper_insn & 0x003f);
11245	    bfd_vma J1    = (lower_insn & 0x2000) >> 13;
11246	    bfd_vma J2    = (lower_insn & 0x0800) >> 11;
11247	    bfd_vma lower = (lower_insn & 0x07ff);
11248
11249	    upper |= J1 << 6;
11250	    upper |= J2 << 7;
11251	    upper |= (!S) << 8;
11252	    upper -= 0x0100; /* Sign extend.  */
11253
11254	    addend = (upper << 12) | (lower << 1);
11255	    signed_addend = addend;
11256	  }
11257
11258	/* Handle calls via the PLT.  */
11259	if (plt_offset != (bfd_vma) -1)
11260	  {
11261	    value = (splt->output_section->vma
11262		     + splt->output_offset
11263		     + plt_offset);
11264	    /* Target the Thumb stub before the ARM PLT entry.  */
11265	    value -= PLT_THUMB_STUB_SIZE;
11266	    *unresolved_reloc_p = false;
11267	  }
11268
11269	hash = (struct elf32_arm_link_hash_entry *)h;
11270
11271	stub_type = arm_type_of_stub (info, input_section, rel,
11272				      st_type, &branch_type,
11273				      hash, value, sym_sec,
11274				      input_bfd, sym_name);
11275	if (stub_type != arm_stub_none)
11276	  {
11277	    stub_entry = elf32_arm_get_stub_entry (input_section,
11278						   sym_sec, h,
11279						   rel, globals,
11280						   stub_type);
11281	    if (stub_entry != NULL)
11282	      {
11283		value = (stub_entry->stub_offset
11284			+ stub_entry->stub_sec->output_offset
11285			+ stub_entry->stub_sec->output_section->vma);
11286	      }
11287	  }
11288
11289	relocation = value + signed_addend;
11290	relocation -= (input_section->output_section->vma
11291		       + input_section->output_offset
11292		       + rel->r_offset);
11293	signed_check = (bfd_signed_vma) relocation;
11294
11295	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11296	  overflow = true;
11297
11298	/* Put RELOCATION back into the insn.  */
11299	{
11300	  bfd_vma S  = (relocation & 0x00100000) >> 20;
11301	  bfd_vma J2 = (relocation & 0x00080000) >> 19;
11302	  bfd_vma J1 = (relocation & 0x00040000) >> 18;
11303	  bfd_vma hi = (relocation & 0x0003f000) >> 12;
11304	  bfd_vma lo = (relocation & 0x00000ffe) >>  1;
11305
11306	  upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11307	  lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11308	}
11309
11310	/* Put the relocated value back in the object file:  */
11311	bfd_put_16 (input_bfd, upper_insn, hit_data);
11312	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11313
11314	return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11315      }
11316
11317    case R_ARM_THM_JUMP11:
11318    case R_ARM_THM_JUMP8:
11319    case R_ARM_THM_JUMP6:
11320      /* Thumb B (branch) instruction).  */
11321      {
11322	bfd_signed_vma relocation;
11323	bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11324	bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11325	bfd_signed_vma signed_check;
11326
11327	/* CZB cannot jump backward.  */
11328	if (r_type == R_ARM_THM_JUMP6)
11329	  {
11330	    reloc_signed_min = 0;
11331	    if (globals->use_rel)
11332	      signed_addend = ((addend & 0x200) >> 3) | ((addend & 0xf8) >> 2);
11333	  }
11334
11335	relocation = value + signed_addend;
11336
11337	relocation -= (input_section->output_section->vma
11338		       + input_section->output_offset
11339		       + rel->r_offset);
11340
11341	relocation >>= howto->rightshift;
11342	signed_check = relocation;
11343
11344	if (r_type == R_ARM_THM_JUMP6)
11345	  relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11346	else
11347	  relocation &= howto->dst_mask;
11348	relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11349
11350	bfd_put_16 (input_bfd, relocation, hit_data);
11351
11352	/* Assumes two's complement.  */
11353	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11354	  return bfd_reloc_overflow;
11355
11356	return bfd_reloc_ok;
11357      }
11358
11359    case R_ARM_ALU_PCREL7_0:
11360    case R_ARM_ALU_PCREL15_8:
11361    case R_ARM_ALU_PCREL23_15:
11362      {
11363	bfd_vma insn;
11364	bfd_vma relocation;
11365
11366	insn = bfd_get_32 (input_bfd, hit_data);
11367	if (globals->use_rel)
11368	  {
11369	    /* Extract the addend.  */
11370	    addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11371	    signed_addend = addend;
11372	  }
11373	relocation = value + signed_addend;
11374
11375	relocation -= (input_section->output_section->vma
11376		       + input_section->output_offset
11377		       + rel->r_offset);
11378	insn = (insn & ~0xfff)
11379	       | ((howto->bitpos << 7) & 0xf00)
11380	       | ((relocation >> howto->bitpos) & 0xff);
11381	bfd_put_32 (input_bfd, value, hit_data);
11382      }
11383      return bfd_reloc_ok;
11384
11385    case R_ARM_GNU_VTINHERIT:
11386    case R_ARM_GNU_VTENTRY:
11387      return bfd_reloc_ok;
11388
11389    case R_ARM_GOTOFF32:
11390      /* Relocation is relative to the start of the
11391	 global offset table.  */
11392
11393      BFD_ASSERT (sgot != NULL);
11394      if (sgot == NULL)
11395	return bfd_reloc_notsupported;
11396
11397      /* If we are addressing a Thumb function, we need to adjust the
11398	 address by one, so that attempts to call the function pointer will
11399	 correctly interpret it as Thumb code.  */
11400      if (branch_type == ST_BRANCH_TO_THUMB)
11401	value += 1;
11402
11403      /* Note that sgot->output_offset is not involved in this
11404	 calculation.  We always want the start of .got.  If we
11405	 define _GLOBAL_OFFSET_TABLE in a different way, as is
11406	 permitted by the ABI, we might have to change this
11407	 calculation.  */
11408      value -= sgot->output_section->vma;
11409      return _bfd_final_link_relocate (howto, input_bfd, input_section,
11410				       contents, rel->r_offset, value,
11411				       rel->r_addend);
11412
11413    case R_ARM_GOTPC:
11414      /* Use global offset table as symbol value.  */
11415      BFD_ASSERT (sgot != NULL);
11416
11417      if (sgot == NULL)
11418	return bfd_reloc_notsupported;
11419
11420      *unresolved_reloc_p = false;
11421      value = sgot->output_section->vma;
11422      return _bfd_final_link_relocate (howto, input_bfd, input_section,
11423				       contents, rel->r_offset, value,
11424				       rel->r_addend);
11425
11426    case R_ARM_GOT32:
11427    case R_ARM_GOT_PREL:
11428      /* Relocation is to the entry for this symbol in the
11429	 global offset table.  */
11430      if (sgot == NULL)
11431	return bfd_reloc_notsupported;
11432
11433      if (dynreloc_st_type == STT_GNU_IFUNC
11434	  && plt_offset != (bfd_vma) -1
11435	  && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11436	{
11437	  /* We have a relocation against a locally-binding STT_GNU_IFUNC
11438	     symbol, and the relocation resolves directly to the runtime
11439	     target rather than to the .iplt entry.  This means that any
11440	     .got entry would be the same value as the .igot.plt entry,
11441	     so there's no point creating both.  */
11442	  sgot = globals->root.igotplt;
11443	  value = sgot->output_offset + gotplt_offset;
11444	}
11445      else if (h != NULL)
11446	{
11447	  bfd_vma off;
11448
11449	  off = h->got.offset;
11450	  BFD_ASSERT (off != (bfd_vma) -1);
11451	  if ((off & 1) != 0)
11452	    {
11453	      /* We have already processsed one GOT relocation against
11454		 this symbol.  */
11455	      off &= ~1;
11456	      if (globals->root.dynamic_sections_created
11457		  && !SYMBOL_REFERENCES_LOCAL (info, h))
11458		*unresolved_reloc_p = false;
11459	    }
11460	  else
11461	    {
11462	      Elf_Internal_Rela outrel;
11463	      int isrofixup = 0;
11464
11465	      if (((h->dynindx != -1) || globals->fdpic_p)
11466		  && !SYMBOL_REFERENCES_LOCAL (info, h))
11467		{
11468		  /* If the symbol doesn't resolve locally in a static
11469		     object, we have an undefined reference.  If the
11470		     symbol doesn't resolve locally in a dynamic object,
11471		     it should be resolved by the dynamic linker.  */
11472		  if (globals->root.dynamic_sections_created)
11473		    {
11474		      outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11475		      *unresolved_reloc_p = false;
11476		    }
11477		  else
11478		    outrel.r_info = 0;
11479		  outrel.r_addend = 0;
11480		}
11481	      else
11482		{
11483		  if (dynreloc_st_type == STT_GNU_IFUNC)
11484		    outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11485		  else if (bfd_link_pic (info)
11486			   && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
11487		    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11488		  else
11489		    {
11490		      outrel.r_info = 0;
11491		      if (globals->fdpic_p)
11492			isrofixup = 1;
11493		    }
11494		  outrel.r_addend = dynreloc_value;
11495		}
11496
11497	      /* The GOT entry is initialized to zero by default.
11498		 See if we should install a different value.  */
11499	      if (outrel.r_addend != 0
11500		  && (globals->use_rel || outrel.r_info == 0))
11501		{
11502		  bfd_put_32 (output_bfd, outrel.r_addend,
11503			      sgot->contents + off);
11504		  outrel.r_addend = 0;
11505		}
11506
11507	      if (isrofixup)
11508		arm_elf_add_rofixup (output_bfd,
11509				     elf32_arm_hash_table (info)->srofixup,
11510				     sgot->output_section->vma
11511				     + sgot->output_offset + off);
11512
11513	      else if (outrel.r_info != 0)
11514		{
11515		  outrel.r_offset = (sgot->output_section->vma
11516				     + sgot->output_offset
11517				     + off);
11518		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11519		}
11520
11521	      h->got.offset |= 1;
11522	    }
11523	  value = sgot->output_offset + off;
11524	}
11525      else
11526	{
11527	  bfd_vma off;
11528
11529	  BFD_ASSERT (local_got_offsets != NULL
11530		      && local_got_offsets[r_symndx] != (bfd_vma) -1);
11531
11532	  off = local_got_offsets[r_symndx];
11533
11534	  /* The offset must always be a multiple of 4.  We use the
11535	     least significant bit to record whether we have already
11536	     generated the necessary reloc.  */
11537	  if ((off & 1) != 0)
11538	    off &= ~1;
11539	  else
11540	    {
11541	      Elf_Internal_Rela outrel;
11542	      int isrofixup = 0;
11543
11544	      if (dynreloc_st_type == STT_GNU_IFUNC)
11545		outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11546	      else if (bfd_link_pic (info))
11547		outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11548	      else
11549		{
11550		  outrel.r_info = 0;
11551		  if (globals->fdpic_p)
11552		    isrofixup = 1;
11553		}
11554
11555	      /* The GOT entry is initialized to zero by default.
11556		 See if we should install a different value.  */
11557	      if (globals->use_rel || outrel.r_info == 0)
11558		bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11559
11560	      if (isrofixup)
11561		arm_elf_add_rofixup (output_bfd,
11562				     globals->srofixup,
11563				     sgot->output_section->vma
11564				     + sgot->output_offset + off);
11565
11566	      else if (outrel.r_info != 0)
11567		{
11568		  outrel.r_addend = addend + dynreloc_value;
11569		  outrel.r_offset = (sgot->output_section->vma
11570				     + sgot->output_offset
11571				     + off);
11572		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11573		}
11574
11575	      local_got_offsets[r_symndx] |= 1;
11576	    }
11577
11578	  value = sgot->output_offset + off;
11579	}
11580      if (r_type != R_ARM_GOT32)
11581	value += sgot->output_section->vma;
11582
11583      return _bfd_final_link_relocate (howto, input_bfd, input_section,
11584				       contents, rel->r_offset, value,
11585				       rel->r_addend);
11586
11587    case R_ARM_TLS_LDO32:
11588      value = value - dtpoff_base (info);
11589
11590      return _bfd_final_link_relocate (howto, input_bfd, input_section,
11591				       contents, rel->r_offset, value,
11592				       rel->r_addend);
11593
11594    case R_ARM_TLS_LDM32:
11595    case R_ARM_TLS_LDM32_FDPIC:
11596      {
11597	bfd_vma off;
11598
11599	if (sgot == NULL)
11600	  abort ();
11601
11602	off = globals->tls_ldm_got.offset;
11603
11604	if ((off & 1) != 0)
11605	  off &= ~1;
11606	else
11607	  {
11608	    /* If we don't know the module number, create a relocation
11609	       for it.  */
11610	    if (bfd_link_dll (info))
11611	      {
11612		Elf_Internal_Rela outrel;
11613
11614		if (srelgot == NULL)
11615		  abort ();
11616
11617		outrel.r_addend = 0;
11618		outrel.r_offset = (sgot->output_section->vma
11619				   + sgot->output_offset + off);
11620		outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11621
11622		if (globals->use_rel)
11623		  bfd_put_32 (output_bfd, outrel.r_addend,
11624			      sgot->contents + off);
11625
11626		elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11627	      }
11628	    else
11629	      bfd_put_32 (output_bfd, 1, sgot->contents + off);
11630
11631	    globals->tls_ldm_got.offset |= 1;
11632	  }
11633
11634	if (r_type == R_ARM_TLS_LDM32_FDPIC)
11635	  {
11636	    bfd_put_32 (output_bfd,
11637			globals->root.sgot->output_offset + off,
11638			contents + rel->r_offset);
11639
11640	    return bfd_reloc_ok;
11641	  }
11642	else
11643	  {
11644	    value = sgot->output_section->vma + sgot->output_offset + off
11645	      - (input_section->output_section->vma
11646		 + input_section->output_offset + rel->r_offset);
11647
11648	    return _bfd_final_link_relocate (howto, input_bfd, input_section,
11649					     contents, rel->r_offset, value,
11650					     rel->r_addend);
11651	  }
11652      }
11653
11654    case R_ARM_TLS_CALL:
11655    case R_ARM_THM_TLS_CALL:
11656    case R_ARM_TLS_GD32:
11657    case R_ARM_TLS_GD32_FDPIC:
11658    case R_ARM_TLS_IE32:
11659    case R_ARM_TLS_IE32_FDPIC:
11660    case R_ARM_TLS_GOTDESC:
11661    case R_ARM_TLS_DESCSEQ:
11662    case R_ARM_THM_TLS_DESCSEQ:
11663      {
11664	bfd_vma off, offplt;
11665	int indx = 0;
11666	char tls_type;
11667
11668	BFD_ASSERT (sgot != NULL);
11669
11670	if (h != NULL)
11671	  {
11672	    bool dyn;
11673	    dyn = globals->root.dynamic_sections_created;
11674	    if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11675						 bfd_link_pic (info),
11676						 h)
11677		&& (!bfd_link_pic (info)
11678		    || !SYMBOL_REFERENCES_LOCAL (info, h)))
11679	      {
11680		*unresolved_reloc_p = false;
11681		indx = h->dynindx;
11682	      }
11683	    off = h->got.offset;
11684	    offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11685	    tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11686	  }
11687	else
11688	  {
11689	    BFD_ASSERT (local_got_offsets != NULL);
11690
11691	    if (r_symndx >= elf32_arm_num_entries (input_bfd))
11692	      {
11693		_bfd_error_handler (_("\
11694%pB: expected symbol index in range 0..%lu but found local symbol with index %lu"),
11695				    input_bfd,
11696				    (unsigned long) elf32_arm_num_entries (input_bfd),
11697				    r_symndx);
11698		return false;
11699	      }
11700	    off = local_got_offsets[r_symndx];
11701	    offplt = local_tlsdesc_gotents[r_symndx];
11702	    tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11703	  }
11704
11705	/* Linker relaxations happens from one of the
11706	   R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE.  */
11707	if (ELF32_R_TYPE (rel->r_info) != r_type)
11708	  tls_type = GOT_TLS_IE;
11709
11710	BFD_ASSERT (tls_type != GOT_UNKNOWN);
11711
11712	if ((off & 1) != 0)
11713	  off &= ~1;
11714	else
11715	  {
11716	    bool need_relocs = false;
11717	    Elf_Internal_Rela outrel;
11718	    int cur_off = off;
11719
11720	    /* The GOT entries have not been initialized yet.  Do it
11721	       now, and emit any relocations.  If both an IE GOT and a
11722	       GD GOT are necessary, we emit the GD first.  */
11723
11724	    if ((bfd_link_dll (info) || indx != 0)
11725		&& (h == NULL
11726		    || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11727			&& !resolved_to_zero)
11728		    || h->root.type != bfd_link_hash_undefweak))
11729	      {
11730		need_relocs = true;
11731		BFD_ASSERT (srelgot != NULL);
11732	      }
11733
11734	    if (tls_type & GOT_TLS_GDESC)
11735	      {
11736		bfd_byte *loc;
11737
11738		/* We should have relaxed, unless this is an undefined
11739		   weak symbol.  */
11740		BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11741			    || bfd_link_dll (info));
11742		BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11743			    <= globals->root.sgotplt->size);
11744
11745		outrel.r_addend = 0;
11746		outrel.r_offset = (globals->root.sgotplt->output_section->vma
11747				   + globals->root.sgotplt->output_offset
11748				   + offplt
11749				   + globals->sgotplt_jump_table_size);
11750
11751		outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11752		sreloc = globals->root.srelplt;
11753		loc = sreloc->contents;
11754		loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11755		BFD_ASSERT (loc + RELOC_SIZE (globals)
11756			   <= sreloc->contents + sreloc->size);
11757
11758		SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11759
11760		/* For globals, the first word in the relocation gets
11761		   the relocation index and the top bit set, or zero,
11762		   if we're binding now.  For locals, it gets the
11763		   symbol's offset in the tls section.  */
11764		bfd_put_32 (output_bfd,
11765			    !h ? value - elf_hash_table (info)->tls_sec->vma
11766			    : info->flags & DF_BIND_NOW ? 0
11767			    : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11768			    globals->root.sgotplt->contents + offplt
11769			    + globals->sgotplt_jump_table_size);
11770
11771		/* Second word in the relocation is always zero.  */
11772		bfd_put_32 (output_bfd, 0,
11773			    globals->root.sgotplt->contents + offplt
11774			    + globals->sgotplt_jump_table_size + 4);
11775	      }
11776	    if (tls_type & GOT_TLS_GD)
11777	      {
11778		if (need_relocs)
11779		  {
11780		    outrel.r_addend = 0;
11781		    outrel.r_offset = (sgot->output_section->vma
11782				       + sgot->output_offset
11783				       + cur_off);
11784		    outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11785
11786		    if (globals->use_rel)
11787		      bfd_put_32 (output_bfd, outrel.r_addend,
11788				  sgot->contents + cur_off);
11789
11790		    elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11791
11792		    if (indx == 0)
11793		      bfd_put_32 (output_bfd, value - dtpoff_base (info),
11794				  sgot->contents + cur_off + 4);
11795		    else
11796		      {
11797			outrel.r_addend = 0;
11798			outrel.r_info = ELF32_R_INFO (indx,
11799						      R_ARM_TLS_DTPOFF32);
11800			outrel.r_offset += 4;
11801
11802			if (globals->use_rel)
11803			  bfd_put_32 (output_bfd, outrel.r_addend,
11804				      sgot->contents + cur_off + 4);
11805
11806			elf32_arm_add_dynreloc (output_bfd, info,
11807						srelgot, &outrel);
11808		      }
11809		  }
11810		else
11811		  {
11812		    /* If we are not emitting relocations for a
11813		       general dynamic reference, then we must be in a
11814		       static link or an executable link with the
11815		       symbol binding locally.  Mark it as belonging
11816		       to module 1, the executable.  */
11817		    bfd_put_32 (output_bfd, 1,
11818				sgot->contents + cur_off);
11819		    bfd_put_32 (output_bfd, value - dtpoff_base (info),
11820				sgot->contents + cur_off + 4);
11821		  }
11822
11823		cur_off += 8;
11824	      }
11825
11826	    if (tls_type & GOT_TLS_IE)
11827	      {
11828		if (need_relocs)
11829		  {
11830		    if (indx == 0)
11831		      outrel.r_addend = value - dtpoff_base (info);
11832		    else
11833		      outrel.r_addend = 0;
11834		    outrel.r_offset = (sgot->output_section->vma
11835				       + sgot->output_offset
11836				       + cur_off);
11837		    outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11838
11839		    if (globals->use_rel)
11840		      bfd_put_32 (output_bfd, outrel.r_addend,
11841				  sgot->contents + cur_off);
11842
11843		    elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11844		  }
11845		else
11846		  bfd_put_32 (output_bfd, tpoff (info, value),
11847			      sgot->contents + cur_off);
11848		cur_off += 4;
11849	      }
11850
11851	    if (h != NULL)
11852	      h->got.offset |= 1;
11853	    else
11854	      local_got_offsets[r_symndx] |= 1;
11855	  }
11856
11857	if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
11858	  off += 8;
11859	else if (tls_type & GOT_TLS_GDESC)
11860	  off = offplt;
11861
11862	if (ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
11863	    || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL)
11864	  {
11865	    bfd_signed_vma offset;
11866	    /* TLS stubs are arm mode.  The original symbol is a
11867	       data object, so branch_type is bogus.  */
11868	    branch_type = ST_BRANCH_TO_ARM;
11869	    enum elf32_arm_stub_type stub_type
11870	      = arm_type_of_stub (info, input_section, rel,
11871				  st_type, &branch_type,
11872				  (struct elf32_arm_link_hash_entry *)h,
11873				  globals->tls_trampoline, globals->root.splt,
11874				  input_bfd, sym_name);
11875
11876	    if (stub_type != arm_stub_none)
11877	      {
11878		struct elf32_arm_stub_hash_entry *stub_entry
11879		  = elf32_arm_get_stub_entry
11880		  (input_section, globals->root.splt, 0, rel,
11881		   globals, stub_type);
11882		offset = (stub_entry->stub_offset
11883			  + stub_entry->stub_sec->output_offset
11884			  + stub_entry->stub_sec->output_section->vma);
11885	      }
11886	    else
11887	      offset = (globals->root.splt->output_section->vma
11888			+ globals->root.splt->output_offset
11889			+ globals->tls_trampoline);
11890
11891	    if (ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL)
11892	      {
11893		unsigned long inst;
11894
11895		offset -= (input_section->output_section->vma
11896			   + input_section->output_offset
11897			   + rel->r_offset + 8);
11898
11899		inst = offset >> 2;
11900		inst &= 0x00ffffff;
11901		value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11902	      }
11903	    else
11904	      {
11905		/* Thumb blx encodes the offset in a complicated
11906		   fashion.  */
11907		unsigned upper_insn, lower_insn;
11908		unsigned neg;
11909
11910		offset -= (input_section->output_section->vma
11911			   + input_section->output_offset
11912			   + rel->r_offset + 4);
11913
11914		if (stub_type != arm_stub_none
11915		    && arm_stub_is_thumb (stub_type))
11916		  {
11917		    lower_insn = 0xd000;
11918		  }
11919		else
11920		  {
11921		    lower_insn = 0xc000;
11922		    /* Round up the offset to a word boundary.  */
11923		    offset = (offset + 2) & ~2;
11924		  }
11925
11926		neg = offset < 0;
11927		upper_insn = (0xf000
11928			      | ((offset >> 12) & 0x3ff)
11929			      | (neg << 10));
11930		lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
11931			      | (((!((offset >> 22) & 1)) ^ neg) << 11)
11932			      | ((offset >> 1) & 0x7ff);
11933		bfd_put_16 (input_bfd, upper_insn, hit_data);
11934		bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11935		return bfd_reloc_ok;
11936	      }
11937	  }
11938	/* These relocations needs special care, as besides the fact
11939	   they point somewhere in .gotplt, the addend must be
11940	   adjusted accordingly depending on the type of instruction
11941	   we refer to.  */
11942	else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
11943	  {
11944	    unsigned long data, insn;
11945	    unsigned thumb;
11946
11947	    data = bfd_get_signed_32 (input_bfd, hit_data);
11948	    thumb = data & 1;
11949	    data &= ~1ul;
11950
11951	    if (thumb)
11952	      {
11953		insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
11954		if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
11955		  insn = (insn << 16)
11956		    | bfd_get_16 (input_bfd,
11957				  contents + rel->r_offset - data + 2);
11958		if ((insn & 0xf800c000) == 0xf000c000)
11959		  /* bl/blx */
11960		  value = -6;
11961		else if ((insn & 0xffffff00) == 0x4400)
11962		  /* add */
11963		  value = -5;
11964		else
11965		  {
11966		    _bfd_error_handler
11967		      /* xgettext:c-format */
11968		      (_("%pB(%pA+%#" PRIx64 "): "
11969			 "unexpected %s instruction '%#lx' "
11970			 "referenced by TLS_GOTDESC"),
11971		       input_bfd, input_section, (uint64_t) rel->r_offset,
11972		       "Thumb", insn);
11973		    return bfd_reloc_notsupported;
11974		  }
11975	      }
11976	    else
11977	      {
11978		insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
11979
11980		switch (insn >> 24)
11981		  {
11982		  case 0xeb:  /* bl */
11983		  case 0xfa:  /* blx */
11984		    value = -4;
11985		    break;
11986
11987		  case 0xe0:	/* add */
11988		    value = -8;
11989		    break;
11990
11991		  default:
11992		    _bfd_error_handler
11993		      /* xgettext:c-format */
11994		      (_("%pB(%pA+%#" PRIx64 "): "
11995			 "unexpected %s instruction '%#lx' "
11996			 "referenced by TLS_GOTDESC"),
11997		       input_bfd, input_section, (uint64_t) rel->r_offset,
11998		       "ARM", insn);
11999		    return bfd_reloc_notsupported;
12000		  }
12001	      }
12002
12003	    value += ((globals->root.sgotplt->output_section->vma
12004		       + globals->root.sgotplt->output_offset + off)
12005		      - (input_section->output_section->vma
12006			 + input_section->output_offset
12007			 + rel->r_offset)
12008		      + globals->sgotplt_jump_table_size);
12009	  }
12010	else
12011	  value = ((globals->root.sgot->output_section->vma
12012		    + globals->root.sgot->output_offset + off)
12013		   - (input_section->output_section->vma
12014		      + input_section->output_offset + rel->r_offset));
12015
12016	if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
12017				 r_type == R_ARM_TLS_IE32_FDPIC))
12018	  {
12019	    /* For FDPIC relocations, resolve to the offset of the GOT
12020	       entry from the start of GOT.  */
12021	    bfd_put_32 (output_bfd,
12022			globals->root.sgot->output_offset + off,
12023			contents + rel->r_offset);
12024
12025	    return bfd_reloc_ok;
12026	  }
12027	else
12028	  {
12029	    return _bfd_final_link_relocate (howto, input_bfd, input_section,
12030					     contents, rel->r_offset, value,
12031					     rel->r_addend);
12032	  }
12033      }
12034
12035    case R_ARM_TLS_LE32:
12036      if (bfd_link_dll (info))
12037	{
12038	  _bfd_error_handler
12039	    /* xgettext:c-format */
12040	    (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
12041	       "in shared object"),
12042	     input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
12043	  return bfd_reloc_notsupported;
12044	}
12045      else
12046	value = tpoff (info, value);
12047
12048      return _bfd_final_link_relocate (howto, input_bfd, input_section,
12049				       contents, rel->r_offset, value,
12050				       rel->r_addend);
12051
12052    case R_ARM_V4BX:
12053      if (globals->fix_v4bx)
12054	{
12055	  bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12056
12057	  /* Ensure that we have a BX instruction.  */
12058	  BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
12059
12060	  if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
12061	    {
12062	      /* Branch to veneer.  */
12063	      bfd_vma glue_addr;
12064	      glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
12065	      glue_addr -= input_section->output_section->vma
12066			   + input_section->output_offset
12067			   + rel->r_offset + 8;
12068	      insn = (insn & 0xf0000000) | 0x0a000000
12069		     | ((glue_addr >> 2) & 0x00ffffff);
12070	    }
12071	  else
12072	    {
12073	      /* Preserve Rm (lowest four bits) and the condition code
12074		 (highest four bits). Other bits encode MOV PC,Rm.  */
12075	      insn = (insn & 0xf000000f) | 0x01a0f000;
12076	    }
12077
12078	  bfd_put_32 (input_bfd, insn, hit_data);
12079	}
12080      return bfd_reloc_ok;
12081
12082    case R_ARM_MOVW_ABS_NC:
12083    case R_ARM_MOVT_ABS:
12084    case R_ARM_MOVW_PREL_NC:
12085    case R_ARM_MOVT_PREL:
12086    /* Until we properly support segment-base-relative addressing then
12087       we assume the segment base to be zero, as for the group relocations.
12088       Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12089       and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS.  */
12090    case R_ARM_MOVW_BREL_NC:
12091    case R_ARM_MOVW_BREL:
12092    case R_ARM_MOVT_BREL:
12093      {
12094	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12095
12096	if (globals->use_rel)
12097	  {
12098	    addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12099	    signed_addend = (addend ^ 0x8000) - 0x8000;
12100	  }
12101
12102	value += signed_addend;
12103
12104	if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12105	  value -= (input_section->output_section->vma
12106		    + input_section->output_offset + rel->r_offset);
12107
12108	if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12109	  return bfd_reloc_overflow;
12110
12111	if (branch_type == ST_BRANCH_TO_THUMB)
12112	  value |= 1;
12113
12114	if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12115	    || r_type == R_ARM_MOVT_BREL)
12116	  value >>= 16;
12117
12118	insn &= 0xfff0f000;
12119	insn |= value & 0xfff;
12120	insn |= (value & 0xf000) << 4;
12121	bfd_put_32 (input_bfd, insn, hit_data);
12122      }
12123      return bfd_reloc_ok;
12124
12125    case R_ARM_THM_MOVW_ABS_NC:
12126    case R_ARM_THM_MOVT_ABS:
12127    case R_ARM_THM_MOVW_PREL_NC:
12128    case R_ARM_THM_MOVT_PREL:
12129    /* Until we properly support segment-base-relative addressing then
12130       we assume the segment base to be zero, as for the above relocations.
12131       Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12132       R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12133       as R_ARM_THM_MOVT_ABS.  */
12134    case R_ARM_THM_MOVW_BREL_NC:
12135    case R_ARM_THM_MOVW_BREL:
12136    case R_ARM_THM_MOVT_BREL:
12137      {
12138	bfd_vma insn;
12139
12140	insn = bfd_get_16 (input_bfd, hit_data) << 16;
12141	insn |= bfd_get_16 (input_bfd, hit_data + 2);
12142
12143	if (globals->use_rel)
12144	  {
12145	    addend = ((insn >> 4)  & 0xf000)
12146		   | ((insn >> 15) & 0x0800)
12147		   | ((insn >> 4)  & 0x0700)
12148		   | (insn	   & 0x00ff);
12149	    signed_addend = (addend ^ 0x8000) - 0x8000;
12150	  }
12151
12152	value += signed_addend;
12153
12154	if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12155	  value -= (input_section->output_section->vma
12156		    + input_section->output_offset + rel->r_offset);
12157
12158	if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12159	  return bfd_reloc_overflow;
12160
12161	if (branch_type == ST_BRANCH_TO_THUMB)
12162	  value |= 1;
12163
12164	if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12165	    || r_type == R_ARM_THM_MOVT_BREL)
12166	  value >>= 16;
12167
12168	insn &= 0xfbf08f00;
12169	insn |= (value & 0xf000) << 4;
12170	insn |= (value & 0x0800) << 15;
12171	insn |= (value & 0x0700) << 4;
12172	insn |= (value & 0x00ff);
12173
12174	bfd_put_16 (input_bfd, insn >> 16, hit_data);
12175	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12176      }
12177      return bfd_reloc_ok;
12178
12179    case R_ARM_ALU_PC_G0_NC:
12180    case R_ARM_ALU_PC_G1_NC:
12181    case R_ARM_ALU_PC_G0:
12182    case R_ARM_ALU_PC_G1:
12183    case R_ARM_ALU_PC_G2:
12184    case R_ARM_ALU_SB_G0_NC:
12185    case R_ARM_ALU_SB_G1_NC:
12186    case R_ARM_ALU_SB_G0:
12187    case R_ARM_ALU_SB_G1:
12188    case R_ARM_ALU_SB_G2:
12189      {
12190	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12191	bfd_vma pc = input_section->output_section->vma
12192		     + input_section->output_offset + rel->r_offset;
12193	/* sb is the origin of the *segment* containing the symbol.  */
12194	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12195	bfd_vma residual;
12196	bfd_vma g_n;
12197	bfd_signed_vma signed_value;
12198	int group = 0;
12199
12200	/* Determine which group of bits to select.  */
12201	switch (r_type)
12202	  {
12203	  case R_ARM_ALU_PC_G0_NC:
12204	  case R_ARM_ALU_PC_G0:
12205	  case R_ARM_ALU_SB_G0_NC:
12206	  case R_ARM_ALU_SB_G0:
12207	    group = 0;
12208	    break;
12209
12210	  case R_ARM_ALU_PC_G1_NC:
12211	  case R_ARM_ALU_PC_G1:
12212	  case R_ARM_ALU_SB_G1_NC:
12213	  case R_ARM_ALU_SB_G1:
12214	    group = 1;
12215	    break;
12216
12217	  case R_ARM_ALU_PC_G2:
12218	  case R_ARM_ALU_SB_G2:
12219	    group = 2;
12220	    break;
12221
12222	  default:
12223	    abort ();
12224	  }
12225
12226	/* If REL, extract the addend from the insn.  If RELA, it will
12227	   have already been fetched for us.  */
12228	if (globals->use_rel)
12229	  {
12230	    int negative;
12231	    bfd_vma constant = insn & 0xff;
12232	    bfd_vma rotation = (insn & 0xf00) >> 8;
12233
12234	    if (rotation == 0)
12235	      signed_addend = constant;
12236	    else
12237	      {
12238		/* Compensate for the fact that in the instruction, the
12239		   rotation is stored in multiples of 2 bits.  */
12240		rotation *= 2;
12241
12242		/* Rotate "constant" right by "rotation" bits.  */
12243		signed_addend = (constant >> rotation) |
12244				(constant << (8 * sizeof (bfd_vma) - rotation));
12245	      }
12246
12247	    /* Determine if the instruction is an ADD or a SUB.
12248	       (For REL, this determines the sign of the addend.)  */
12249	    negative = identify_add_or_sub (insn);
12250	    if (negative == 0)
12251	      {
12252		_bfd_error_handler
12253		  /* xgettext:c-format */
12254		  (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12255		     "are allowed for ALU group relocations"),
12256		  input_bfd, input_section, (uint64_t) rel->r_offset);
12257		return bfd_reloc_overflow;
12258	      }
12259
12260	    signed_addend *= negative;
12261	  }
12262
12263	/* Compute the value (X) to go in the place.  */
12264	if (r_type == R_ARM_ALU_PC_G0_NC
12265	    || r_type == R_ARM_ALU_PC_G1_NC
12266	    || r_type == R_ARM_ALU_PC_G0
12267	    || r_type == R_ARM_ALU_PC_G1
12268	    || r_type == R_ARM_ALU_PC_G2)
12269	  /* PC relative.  */
12270	  signed_value = value - pc + signed_addend;
12271	else
12272	  /* Section base relative.  */
12273	  signed_value = value - sb + signed_addend;
12274
12275	/* If the target symbol is a Thumb function, then set the
12276	   Thumb bit in the address.  */
12277	if (branch_type == ST_BRANCH_TO_THUMB)
12278	  signed_value |= 1;
12279
12280	/* Calculate the value of the relevant G_n, in encoded
12281	   constant-with-rotation format.  */
12282	g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12283					  group, &residual);
12284
12285	/* Check for overflow if required.  */
12286	if ((r_type == R_ARM_ALU_PC_G0
12287	     || r_type == R_ARM_ALU_PC_G1
12288	     || r_type == R_ARM_ALU_PC_G2
12289	     || r_type == R_ARM_ALU_SB_G0
12290	     || r_type == R_ARM_ALU_SB_G1
12291	     || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12292	  {
12293	    _bfd_error_handler
12294	      /* xgettext:c-format */
12295	      (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12296		 "splitting %#" PRIx64 " for group relocation %s"),
12297	       input_bfd, input_section, (uint64_t) rel->r_offset,
12298	       (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12299	       howto->name);
12300	    return bfd_reloc_overflow;
12301	  }
12302
12303	/* Mask out the value and the ADD/SUB part of the opcode; take care
12304	   not to destroy the S bit.  */
12305	insn &= 0xff1ff000;
12306
12307	/* Set the opcode according to whether the value to go in the
12308	   place is negative.  */
12309	if (signed_value < 0)
12310	  insn |= 1 << 22;
12311	else
12312	  insn |= 1 << 23;
12313
12314	/* Encode the offset.  */
12315	insn |= g_n;
12316
12317	bfd_put_32 (input_bfd, insn, hit_data);
12318      }
12319      return bfd_reloc_ok;
12320
12321    case R_ARM_LDR_PC_G0:
12322    case R_ARM_LDR_PC_G1:
12323    case R_ARM_LDR_PC_G2:
12324    case R_ARM_LDR_SB_G0:
12325    case R_ARM_LDR_SB_G1:
12326    case R_ARM_LDR_SB_G2:
12327      {
12328	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12329	bfd_vma pc = input_section->output_section->vma
12330		     + input_section->output_offset + rel->r_offset;
12331	/* sb is the origin of the *segment* containing the symbol.  */
12332	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12333	bfd_vma residual;
12334	bfd_signed_vma signed_value;
12335	int group = 0;
12336
12337	/* Determine which groups of bits to calculate.  */
12338	switch (r_type)
12339	  {
12340	  case R_ARM_LDR_PC_G0:
12341	  case R_ARM_LDR_SB_G0:
12342	    group = 0;
12343	    break;
12344
12345	  case R_ARM_LDR_PC_G1:
12346	  case R_ARM_LDR_SB_G1:
12347	    group = 1;
12348	    break;
12349
12350	  case R_ARM_LDR_PC_G2:
12351	  case R_ARM_LDR_SB_G2:
12352	    group = 2;
12353	    break;
12354
12355	  default:
12356	    abort ();
12357	  }
12358
12359	/* If REL, extract the addend from the insn.  If RELA, it will
12360	   have already been fetched for us.  */
12361	if (globals->use_rel)
12362	  {
12363	    int negative = (insn & (1 << 23)) ? 1 : -1;
12364	    signed_addend = negative * (insn & 0xfff);
12365	  }
12366
12367	/* Compute the value (X) to go in the place.  */
12368	if (r_type == R_ARM_LDR_PC_G0
12369	    || r_type == R_ARM_LDR_PC_G1
12370	    || r_type == R_ARM_LDR_PC_G2)
12371	  /* PC relative.  */
12372	  signed_value = value - pc + signed_addend;
12373	else
12374	  /* Section base relative.  */
12375	  signed_value = value - sb + signed_addend;
12376
12377	/* Calculate the value of the relevant G_{n-1} to obtain
12378	   the residual at that stage.  */
12379	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12380				    group - 1, &residual);
12381
12382	/* Check for overflow.  */
12383	if (residual >= 0x1000)
12384	  {
12385	    _bfd_error_handler
12386	      /* xgettext:c-format */
12387	      (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12388		 "splitting %#" PRIx64 " for group relocation %s"),
12389	       input_bfd, input_section, (uint64_t) rel->r_offset,
12390	       (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12391	       howto->name);
12392	    return bfd_reloc_overflow;
12393	  }
12394
12395	/* Mask out the value and U bit.  */
12396	insn &= 0xff7ff000;
12397
12398	/* Set the U bit if the value to go in the place is non-negative.  */
12399	if (signed_value >= 0)
12400	  insn |= 1 << 23;
12401
12402	/* Encode the offset.  */
12403	insn |= residual;
12404
12405	bfd_put_32 (input_bfd, insn, hit_data);
12406      }
12407      return bfd_reloc_ok;
12408
12409    case R_ARM_LDRS_PC_G0:
12410    case R_ARM_LDRS_PC_G1:
12411    case R_ARM_LDRS_PC_G2:
12412    case R_ARM_LDRS_SB_G0:
12413    case R_ARM_LDRS_SB_G1:
12414    case R_ARM_LDRS_SB_G2:
12415      {
12416	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12417	bfd_vma pc = input_section->output_section->vma
12418		     + input_section->output_offset + rel->r_offset;
12419	/* sb is the origin of the *segment* containing the symbol.  */
12420	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12421	bfd_vma residual;
12422	bfd_signed_vma signed_value;
12423	int group = 0;
12424
12425	/* Determine which groups of bits to calculate.  */
12426	switch (r_type)
12427	  {
12428	  case R_ARM_LDRS_PC_G0:
12429	  case R_ARM_LDRS_SB_G0:
12430	    group = 0;
12431	    break;
12432
12433	  case R_ARM_LDRS_PC_G1:
12434	  case R_ARM_LDRS_SB_G1:
12435	    group = 1;
12436	    break;
12437
12438	  case R_ARM_LDRS_PC_G2:
12439	  case R_ARM_LDRS_SB_G2:
12440	    group = 2;
12441	    break;
12442
12443	  default:
12444	    abort ();
12445	  }
12446
12447	/* If REL, extract the addend from the insn.  If RELA, it will
12448	   have already been fetched for us.  */
12449	if (globals->use_rel)
12450	  {
12451	    int negative = (insn & (1 << 23)) ? 1 : -1;
12452	    signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12453	  }
12454
12455	/* Compute the value (X) to go in the place.  */
12456	if (r_type == R_ARM_LDRS_PC_G0
12457	    || r_type == R_ARM_LDRS_PC_G1
12458	    || r_type == R_ARM_LDRS_PC_G2)
12459	  /* PC relative.  */
12460	  signed_value = value - pc + signed_addend;
12461	else
12462	  /* Section base relative.  */
12463	  signed_value = value - sb + signed_addend;
12464
12465	/* Calculate the value of the relevant G_{n-1} to obtain
12466	   the residual at that stage.  */
12467	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12468				    group - 1, &residual);
12469
12470	/* Check for overflow.  */
12471	if (residual >= 0x100)
12472	  {
12473	    _bfd_error_handler
12474	      /* xgettext:c-format */
12475	      (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12476		 "splitting %#" PRIx64 " for group relocation %s"),
12477	       input_bfd, input_section, (uint64_t) rel->r_offset,
12478	       (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12479	       howto->name);
12480	    return bfd_reloc_overflow;
12481	  }
12482
12483	/* Mask out the value and U bit.  */
12484	insn &= 0xff7ff0f0;
12485
12486	/* Set the U bit if the value to go in the place is non-negative.  */
12487	if (signed_value >= 0)
12488	  insn |= 1 << 23;
12489
12490	/* Encode the offset.  */
12491	insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12492
12493	bfd_put_32 (input_bfd, insn, hit_data);
12494      }
12495      return bfd_reloc_ok;
12496
12497    case R_ARM_LDC_PC_G0:
12498    case R_ARM_LDC_PC_G1:
12499    case R_ARM_LDC_PC_G2:
12500    case R_ARM_LDC_SB_G0:
12501    case R_ARM_LDC_SB_G1:
12502    case R_ARM_LDC_SB_G2:
12503      {
12504	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12505	bfd_vma pc = input_section->output_section->vma
12506		     + input_section->output_offset + rel->r_offset;
12507	/* sb is the origin of the *segment* containing the symbol.  */
12508	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12509	bfd_vma residual;
12510	bfd_signed_vma signed_value;
12511	int group = 0;
12512
12513	/* Determine which groups of bits to calculate.  */
12514	switch (r_type)
12515	  {
12516	  case R_ARM_LDC_PC_G0:
12517	  case R_ARM_LDC_SB_G0:
12518	    group = 0;
12519	    break;
12520
12521	  case R_ARM_LDC_PC_G1:
12522	  case R_ARM_LDC_SB_G1:
12523	    group = 1;
12524	    break;
12525
12526	  case R_ARM_LDC_PC_G2:
12527	  case R_ARM_LDC_SB_G2:
12528	    group = 2;
12529	    break;
12530
12531	  default:
12532	    abort ();
12533	  }
12534
12535	/* If REL, extract the addend from the insn.  If RELA, it will
12536	   have already been fetched for us.  */
12537	if (globals->use_rel)
12538	  {
12539	    int negative = (insn & (1 << 23)) ? 1 : -1;
12540	    signed_addend = negative * ((insn & 0xff) << 2);
12541	  }
12542
12543	/* Compute the value (X) to go in the place.  */
12544	if (r_type == R_ARM_LDC_PC_G0
12545	    || r_type == R_ARM_LDC_PC_G1
12546	    || r_type == R_ARM_LDC_PC_G2)
12547	  /* PC relative.  */
12548	  signed_value = value - pc + signed_addend;
12549	else
12550	  /* Section base relative.  */
12551	  signed_value = value - sb + signed_addend;
12552
12553	/* Calculate the value of the relevant G_{n-1} to obtain
12554	   the residual at that stage.  */
12555	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12556				    group - 1, &residual);
12557
12558	/* Check for overflow.  (The absolute value to go in the place must be
12559	   divisible by four and, after having been divided by four, must
12560	   fit in eight bits.)  */
12561	if ((residual & 0x3) != 0 || residual >= 0x400)
12562	  {
12563	    _bfd_error_handler
12564	      /* xgettext:c-format */
12565	      (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12566		 "splitting %#" PRIx64 " for group relocation %s"),
12567	       input_bfd, input_section, (uint64_t) rel->r_offset,
12568	       (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12569	       howto->name);
12570	    return bfd_reloc_overflow;
12571	  }
12572
12573	/* Mask out the value and U bit.  */
12574	insn &= 0xff7fff00;
12575
12576	/* Set the U bit if the value to go in the place is non-negative.  */
12577	if (signed_value >= 0)
12578	  insn |= 1 << 23;
12579
12580	/* Encode the offset.  */
12581	insn |= residual >> 2;
12582
12583	bfd_put_32 (input_bfd, insn, hit_data);
12584      }
12585      return bfd_reloc_ok;
12586
12587    case R_ARM_THM_ALU_ABS_G0_NC:
12588    case R_ARM_THM_ALU_ABS_G1_NC:
12589    case R_ARM_THM_ALU_ABS_G2_NC:
12590    case R_ARM_THM_ALU_ABS_G3_NC:
12591	{
12592	    const int shift_array[4] = {0, 8, 16, 24};
12593	    bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12594	    bfd_vma addr = value;
12595	    int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12596
12597	    /* Compute address.  */
12598	    if (globals->use_rel)
12599		signed_addend = insn & 0xff;
12600	    addr += signed_addend;
12601	    if (branch_type == ST_BRANCH_TO_THUMB)
12602		addr |= 1;
12603	    /* Clean imm8 insn.  */
12604	    insn &= 0xff00;
12605	    /* And update with correct part of address.  */
12606	    insn |= (addr >> shift) & 0xff;
12607	    /* Update insn.  */
12608	    bfd_put_16 (input_bfd, insn, hit_data);
12609	}
12610
12611	*unresolved_reloc_p = false;
12612	return bfd_reloc_ok;
12613
12614    case R_ARM_GOTOFFFUNCDESC:
12615      {
12616	if (h == NULL)
12617	  {
12618	    struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts (input_bfd);
12619	    int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12620
12621	    if (r_symndx >= elf32_arm_num_entries (input_bfd))
12622	      {
12623		* error_message = _("local symbol index too big");
12624		return bfd_reloc_dangerous;
12625	      }
12626
12627	    int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12628	    bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12629	    bfd_vma seg = -1;
12630
12631	    if (bfd_link_pic (info) && dynindx == 0)
12632	      {
12633		* error_message = _("no dynamic index information available");
12634		return bfd_reloc_dangerous;
12635	      }
12636
12637	    /* Resolve relocation.  */
12638	    bfd_put_32 (output_bfd, (offset + sgot->output_offset)
12639		       , contents + rel->r_offset);
12640	    /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12641	       not done yet.  */
12642	    arm_elf_fill_funcdesc (output_bfd, info,
12643				   &local_fdpic_cnts[r_symndx].funcdesc_offset,
12644				   dynindx, offset, addr, dynreloc_value, seg);
12645	  }
12646	else
12647	  {
12648	    int dynindx;
12649	    int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12650	    bfd_vma addr;
12651	    bfd_vma seg = -1;
12652
12653	    /* For static binaries, sym_sec can be null.  */
12654	    if (sym_sec)
12655	      {
12656		dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12657		addr = dynreloc_value - sym_sec->output_section->vma;
12658	      }
12659	    else
12660	      {
12661		dynindx = 0;
12662		addr = 0;
12663	      }
12664
12665	    if (bfd_link_pic (info) && dynindx == 0)
12666	      {
12667		* error_message = _("no dynamic index information available");
12668		return bfd_reloc_dangerous;
12669	      }
12670
12671	    /* This case cannot occur since funcdesc is allocated by
12672	       the dynamic loader so we cannot resolve the relocation.  */
12673	    if (h->dynindx != -1)
12674	      {
12675		* error_message = _("invalid dynamic index");
12676		return bfd_reloc_dangerous;
12677	      }
12678
12679	    /* Resolve relocation.  */
12680	    bfd_put_32 (output_bfd, (offset + sgot->output_offset),
12681		        contents + rel->r_offset);
12682	    /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet.  */
12683	    arm_elf_fill_funcdesc (output_bfd, info,
12684				   &eh->fdpic_cnts.funcdesc_offset,
12685				   dynindx, offset, addr, dynreloc_value, seg);
12686	  }
12687      }
12688      *unresolved_reloc_p = false;
12689      return bfd_reloc_ok;
12690
12691    case R_ARM_GOTFUNCDESC:
12692      {
12693	if (h != NULL)
12694	  {
12695	    Elf_Internal_Rela outrel;
12696
12697	    /* Resolve relocation.  */
12698	    bfd_put_32 (output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12699				     + sgot->output_offset),
12700			contents + rel->r_offset);
12701	    /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE.  */
12702	    if (h->dynindx == -1)
12703	      {
12704		int dynindx;
12705		int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12706		bfd_vma addr;
12707		bfd_vma seg = -1;
12708
12709		/* For static binaries sym_sec can be null.  */
12710		if (sym_sec)
12711		  {
12712		    dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12713		    addr = dynreloc_value - sym_sec->output_section->vma;
12714		  }
12715		else
12716		  {
12717		    dynindx = 0;
12718		    addr = 0;
12719		  }
12720
12721		/* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet.  */
12722		arm_elf_fill_funcdesc (output_bfd, info,
12723				       &eh->fdpic_cnts.funcdesc_offset,
12724				       dynindx, offset, addr, dynreloc_value, seg);
12725	      }
12726
12727	    /* Add a dynamic relocation on GOT entry if not already done.  */
12728	    if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12729	      {
12730		if (h->dynindx == -1)
12731		  {
12732		    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12733		    if (h->root.type == bfd_link_hash_undefweak)
12734		      bfd_put_32 (output_bfd, 0, sgot->contents
12735				  + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12736		    else
12737		      bfd_put_32 (output_bfd, sgot->output_section->vma
12738				  + sgot->output_offset
12739				  + (eh->fdpic_cnts.funcdesc_offset & ~1),
12740				  sgot->contents
12741				  + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12742		  }
12743		else
12744		  {
12745		    outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12746		  }
12747		outrel.r_offset = sgot->output_section->vma
12748		  + sgot->output_offset
12749		  + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12750		outrel.r_addend = 0;
12751		if (h->dynindx == -1 && !bfd_link_pic (info))
12752		  if (h->root.type == bfd_link_hash_undefweak)
12753		    arm_elf_add_rofixup (output_bfd, globals->srofixup, -1);
12754		  else
12755		    arm_elf_add_rofixup (output_bfd, globals->srofixup,
12756					 outrel.r_offset);
12757		else
12758		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12759		eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12760	      }
12761	  }
12762	else
12763	  {
12764	    /* Such relocation on static function should not have been
12765	       emitted by the compiler.  */
12766	    return bfd_reloc_notsupported;
12767	  }
12768      }
12769      *unresolved_reloc_p = false;
12770      return bfd_reloc_ok;
12771
12772    case R_ARM_FUNCDESC:
12773      {
12774	if (h == NULL)
12775	  {
12776	    struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts (input_bfd);
12777	    Elf_Internal_Rela outrel;
12778	    int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12779
12780	    if (r_symndx >= elf32_arm_num_entries (input_bfd))
12781	      {
12782		* error_message = _("local symbol index too big");
12783		return bfd_reloc_dangerous;
12784	      }
12785
12786	    int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12787	    bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12788	    bfd_vma seg = -1;
12789
12790	    if (bfd_link_pic (info) && dynindx == 0)
12791	      {
12792		* error_message = _("dynamic index information not available");
12793		return bfd_reloc_dangerous;
12794	      }
12795
12796	    /* Replace static FUNCDESC relocation with a
12797	       R_ARM_RELATIVE dynamic relocation or with a rofixup for
12798	       executable.  */
12799	    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12800	    outrel.r_offset = input_section->output_section->vma
12801	      + input_section->output_offset + rel->r_offset;
12802	    outrel.r_addend = 0;
12803	    if (bfd_link_pic (info))
12804	      elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12805	    else
12806	      arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
12807
12808	    bfd_put_32 (input_bfd, sgot->output_section->vma
12809			+ sgot->output_offset + offset, hit_data);
12810
12811	    /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet.  */
12812	    arm_elf_fill_funcdesc (output_bfd, info,
12813				   &local_fdpic_cnts[r_symndx].funcdesc_offset,
12814				   dynindx, offset, addr, dynreloc_value, seg);
12815	  }
12816	else
12817	  {
12818	    if (h->dynindx == -1)
12819	      {
12820		int dynindx;
12821		int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12822		bfd_vma addr;
12823		bfd_vma seg = -1;
12824		Elf_Internal_Rela outrel;
12825
12826		/* For static binaries sym_sec can be null.  */
12827		if (sym_sec)
12828		  {
12829		    dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12830		    addr = dynreloc_value - sym_sec->output_section->vma;
12831		  }
12832		else
12833		  {
12834		    dynindx = 0;
12835		    addr = 0;
12836		  }
12837
12838		if (bfd_link_pic (info) && dynindx == 0)
12839		  abort ();
12840
12841		/* Replace static FUNCDESC relocation with a
12842		   R_ARM_RELATIVE dynamic relocation.  */
12843		outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12844		outrel.r_offset = input_section->output_section->vma
12845		  + input_section->output_offset + rel->r_offset;
12846		outrel.r_addend = 0;
12847		if (bfd_link_pic (info))
12848		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12849		else
12850		  arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
12851
12852		bfd_put_32 (input_bfd, sgot->output_section->vma
12853			    + sgot->output_offset + offset, hit_data);
12854
12855		/* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet.  */
12856		arm_elf_fill_funcdesc (output_bfd, info,
12857				       &eh->fdpic_cnts.funcdesc_offset,
12858				       dynindx, offset, addr, dynreloc_value, seg);
12859	      }
12860	    else
12861	      {
12862		Elf_Internal_Rela outrel;
12863
12864		/* Add a dynamic relocation.  */
12865		outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12866		outrel.r_offset = input_section->output_section->vma
12867		  + input_section->output_offset + rel->r_offset;
12868		outrel.r_addend = 0;
12869		elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12870	      }
12871	  }
12872      }
12873      *unresolved_reloc_p = false;
12874      return bfd_reloc_ok;
12875
12876    case R_ARM_THM_BF16:
12877      {
12878	bfd_vma relocation;
12879	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12880	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12881
12882	if (globals->use_rel)
12883	  {
12884	    bfd_vma immA  = (upper_insn & 0x001f);
12885	    bfd_vma immB  = (lower_insn & 0x07fe) >> 1;
12886	    bfd_vma immC  = (lower_insn & 0x0800) >> 11;
12887	    addend  = (immA << 12);
12888	    addend |= (immB << 2);
12889	    addend |= (immC << 1);
12890	    addend |= 1;
12891	    /* Sign extend.  */
12892	    signed_addend = (addend & 0x10000) ? addend - (1 << 17) : addend;
12893	  }
12894
12895	relocation  = value + signed_addend;
12896	relocation -= (input_section->output_section->vma
12897		       + input_section->output_offset
12898		       + rel->r_offset);
12899
12900	/* Put RELOCATION back into the insn.  */
12901	{
12902	  bfd_vma immA = (relocation & 0x0001f000) >> 12;
12903	  bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12904	  bfd_vma immC = (relocation & 0x00000002) >> 1;
12905
12906	  upper_insn = (upper_insn & 0xffe0) | immA;
12907	  lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12908	}
12909
12910	/* Put the relocated value back in the object file:  */
12911	bfd_put_16 (input_bfd, upper_insn, hit_data);
12912	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12913
12914	return bfd_reloc_ok;
12915      }
12916
12917    case R_ARM_THM_BF12:
12918      {
12919	bfd_vma relocation;
12920	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12921	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12922
12923	if (globals->use_rel)
12924	  {
12925	    bfd_vma immA  = (upper_insn & 0x0001);
12926	    bfd_vma immB  = (lower_insn & 0x07fe) >> 1;
12927	    bfd_vma immC  = (lower_insn & 0x0800) >> 11;
12928	    addend  = (immA << 12);
12929	    addend |= (immB << 2);
12930	    addend |= (immC << 1);
12931	    addend |= 1;
12932	    /* Sign extend.  */
12933	    addend = (addend & 0x1000) ? addend - (1 << 13) : addend;
12934	    signed_addend = addend;
12935	  }
12936
12937	relocation  = value + signed_addend;
12938	relocation -= (input_section->output_section->vma
12939		       + input_section->output_offset
12940		       + rel->r_offset);
12941
12942	/* Put RELOCATION back into the insn.  */
12943	{
12944	  bfd_vma immA = (relocation & 0x00001000) >> 12;
12945	  bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12946	  bfd_vma immC = (relocation & 0x00000002) >> 1;
12947
12948	  upper_insn = (upper_insn & 0xfffe) | immA;
12949	  lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12950	}
12951
12952	/* Put the relocated value back in the object file:  */
12953	bfd_put_16 (input_bfd, upper_insn, hit_data);
12954	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12955
12956	return bfd_reloc_ok;
12957      }
12958
12959    case R_ARM_THM_BF18:
12960      {
12961	bfd_vma relocation;
12962	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12963	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12964
12965	if (globals->use_rel)
12966	  {
12967	    bfd_vma immA  = (upper_insn & 0x007f);
12968	    bfd_vma immB  = (lower_insn & 0x07fe) >> 1;
12969	    bfd_vma immC  = (lower_insn & 0x0800) >> 11;
12970	    addend  = (immA << 12);
12971	    addend |= (immB << 2);
12972	    addend |= (immC << 1);
12973	    addend |= 1;
12974	    /* Sign extend.  */
12975	    addend = (addend & 0x40000) ? addend - (1 << 19) : addend;
12976	    signed_addend = addend;
12977	  }
12978
12979	relocation  = value + signed_addend;
12980	relocation -= (input_section->output_section->vma
12981		       + input_section->output_offset
12982		       + rel->r_offset);
12983
12984	/* Put RELOCATION back into the insn.  */
12985	{
12986	  bfd_vma immA = (relocation & 0x0007f000) >> 12;
12987	  bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12988	  bfd_vma immC = (relocation & 0x00000002) >> 1;
12989
12990	  upper_insn = (upper_insn & 0xff80) | immA;
12991	  lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12992	}
12993
12994	/* Put the relocated value back in the object file:  */
12995	bfd_put_16 (input_bfd, upper_insn, hit_data);
12996	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12997
12998	return bfd_reloc_ok;
12999      }
13000
13001    default:
13002      return bfd_reloc_notsupported;
13003    }
13004}
13005
13006/* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS.  */
13007static void
13008arm_add_to_rel (bfd *		   abfd,
13009		bfd_byte *	   address,
13010		reloc_howto_type * howto,
13011		bfd_signed_vma	   increment)
13012{
13013  bfd_signed_vma addend;
13014
13015  if (howto->type == R_ARM_THM_CALL
13016      || howto->type == R_ARM_THM_JUMP24)
13017    {
13018      int upper_insn, lower_insn;
13019      int upper, lower;
13020
13021      upper_insn = bfd_get_16 (abfd, address);
13022      lower_insn = bfd_get_16 (abfd, address + 2);
13023      upper = upper_insn & 0x7ff;
13024      lower = lower_insn & 0x7ff;
13025
13026      addend = (upper << 12) | (lower << 1);
13027      addend += increment;
13028      addend >>= 1;
13029
13030      upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
13031      lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
13032
13033      bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
13034      bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
13035    }
13036  else
13037    {
13038      bfd_vma	     contents;
13039
13040      contents = bfd_get_32 (abfd, address);
13041
13042      /* Get the (signed) value from the instruction.  */
13043      addend = contents & howto->src_mask;
13044      if (addend & ((howto->src_mask + 1) >> 1))
13045	{
13046	  bfd_signed_vma mask;
13047
13048	  mask = -1;
13049	  mask &= ~ howto->src_mask;
13050	  addend |= mask;
13051	}
13052
13053      /* Add in the increment, (which is a byte value).  */
13054      switch (howto->type)
13055	{
13056	default:
13057	  addend += increment;
13058	  break;
13059
13060	case R_ARM_PC24:
13061	case R_ARM_PLT32:
13062	case R_ARM_CALL:
13063	case R_ARM_JUMP24:
13064	  addend *= bfd_get_reloc_size (howto);
13065	  addend += increment;
13066
13067	  /* Should we check for overflow here ?  */
13068
13069	  /* Drop any undesired bits.  */
13070	  addend >>= howto->rightshift;
13071	  break;
13072	}
13073
13074      contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
13075
13076      bfd_put_32 (abfd, contents, address);
13077    }
13078}
13079
13080#define IS_ARM_TLS_RELOC(R_TYPE)	\
13081  ((R_TYPE) == R_ARM_TLS_GD32		\
13082   || (R_TYPE) == R_ARM_TLS_GD32_FDPIC  \
13083   || (R_TYPE) == R_ARM_TLS_LDO32	\
13084   || (R_TYPE) == R_ARM_TLS_LDM32	\
13085   || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC	\
13086   || (R_TYPE) == R_ARM_TLS_DTPOFF32	\
13087   || (R_TYPE) == R_ARM_TLS_DTPMOD32	\
13088   || (R_TYPE) == R_ARM_TLS_TPOFF32	\
13089   || (R_TYPE) == R_ARM_TLS_LE32	\
13090   || (R_TYPE) == R_ARM_TLS_IE32	\
13091   || (R_TYPE) == R_ARM_TLS_IE32_FDPIC	\
13092   || IS_ARM_TLS_GNU_RELOC (R_TYPE))
13093
13094/* Specific set of relocations for the gnu tls dialect.  */
13095#define IS_ARM_TLS_GNU_RELOC(R_TYPE)	\
13096  ((R_TYPE) == R_ARM_TLS_GOTDESC	\
13097   || (R_TYPE) == R_ARM_TLS_CALL	\
13098   || (R_TYPE) == R_ARM_THM_TLS_CALL	\
13099   || (R_TYPE) == R_ARM_TLS_DESCSEQ	\
13100   || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
13101
13102/* Relocate an ARM ELF section.  */
13103
13104static int
13105elf32_arm_relocate_section (bfd *		   output_bfd,
13106			    struct bfd_link_info * info,
13107			    bfd *		   input_bfd,
13108			    asection *		   input_section,
13109			    bfd_byte *		   contents,
13110			    Elf_Internal_Rela *	   relocs,
13111			    Elf_Internal_Sym *	   local_syms,
13112			    asection **		   local_sections)
13113{
13114  Elf_Internal_Shdr *symtab_hdr;
13115  struct elf_link_hash_entry **sym_hashes;
13116  Elf_Internal_Rela *rel;
13117  Elf_Internal_Rela *relend;
13118  const char *name;
13119  struct elf32_arm_link_hash_table * globals;
13120
13121  globals = elf32_arm_hash_table (info);
13122  if (globals == NULL)
13123    return false;
13124
13125  symtab_hdr = & elf_symtab_hdr (input_bfd);
13126  sym_hashes = elf_sym_hashes (input_bfd);
13127
13128  rel = relocs;
13129  relend = relocs + input_section->reloc_count;
13130  for (; rel < relend; rel++)
13131    {
13132      int			   r_type;
13133      reloc_howto_type *	   howto;
13134      unsigned long		   r_symndx;
13135      Elf_Internal_Sym *	   sym;
13136      asection *		   sec;
13137      struct elf_link_hash_entry * h;
13138      bfd_vma			   relocation;
13139      bfd_reloc_status_type	   r;
13140      arelent			   bfd_reloc;
13141      char			   sym_type;
13142      bool			   unresolved_reloc = false;
13143      char *error_message = NULL;
13144
13145      r_symndx = ELF32_R_SYM (rel->r_info);
13146      r_type   = ELF32_R_TYPE (rel->r_info);
13147      r_type   = arm_real_reloc_type (globals, r_type);
13148
13149      if (   r_type == R_ARM_GNU_VTENTRY
13150	  || r_type == R_ARM_GNU_VTINHERIT)
13151	continue;
13152
13153      howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
13154
13155      if (howto == NULL)
13156	return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
13157
13158      h = NULL;
13159      sym = NULL;
13160      sec = NULL;
13161
13162      if (r_symndx < symtab_hdr->sh_info)
13163	{
13164	  sym = local_syms + r_symndx;
13165	  sym_type = ELF32_ST_TYPE (sym->st_info);
13166	  sec = local_sections[r_symndx];
13167
13168	  /* An object file might have a reference to a local
13169	     undefined symbol.  This is a daft object file, but we
13170	     should at least do something about it.  V4BX & NONE
13171	     relocations do not use the symbol and are explicitly
13172	     allowed to use the undefined symbol, so allow those.
13173	     Likewise for relocations against STN_UNDEF.  */
13174	  if (r_type != R_ARM_V4BX
13175	      && r_type != R_ARM_NONE
13176	      && r_symndx != STN_UNDEF
13177	      && bfd_is_und_section (sec)
13178	      && ELF_ST_BIND (sym->st_info) != STB_WEAK)
13179	    (*info->callbacks->undefined_symbol)
13180	      (info, bfd_elf_string_from_elf_section
13181	       (input_bfd, symtab_hdr->sh_link, sym->st_name),
13182	       input_bfd, input_section,
13183	       rel->r_offset, true);
13184
13185	  if (globals->use_rel)
13186	    {
13187	      relocation = (sec->output_section->vma
13188			    + sec->output_offset
13189			    + sym->st_value);
13190	      if (!bfd_link_relocatable (info)
13191		  && (sec->flags & SEC_MERGE)
13192		  && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13193		{
13194		  asection *msec;
13195		  bfd_vma addend, value;
13196
13197		  switch (r_type)
13198		    {
13199		    case R_ARM_MOVW_ABS_NC:
13200		    case R_ARM_MOVT_ABS:
13201		      value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13202		      addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
13203		      addend = (addend ^ 0x8000) - 0x8000;
13204		      break;
13205
13206		    case R_ARM_THM_MOVW_ABS_NC:
13207		    case R_ARM_THM_MOVT_ABS:
13208		      value = bfd_get_16 (input_bfd, contents + rel->r_offset)
13209			      << 16;
13210		      value |= bfd_get_16 (input_bfd,
13211					   contents + rel->r_offset + 2);
13212		      addend = ((value & 0xf7000) >> 4) | (value & 0xff)
13213			       | ((value & 0x04000000) >> 15);
13214		      addend = (addend ^ 0x8000) - 0x8000;
13215		      break;
13216
13217		    default:
13218		      if (howto->rightshift
13219			  || (howto->src_mask & (howto->src_mask + 1)))
13220			{
13221			  _bfd_error_handler
13222			    /* xgettext:c-format */
13223			    (_("%pB(%pA+%#" PRIx64 "): "
13224			       "%s relocation against SEC_MERGE section"),
13225			     input_bfd, input_section,
13226			     (uint64_t) rel->r_offset, howto->name);
13227			  return false;
13228			}
13229
13230		      value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13231
13232		      /* Get the (signed) value from the instruction.  */
13233		      addend = value & howto->src_mask;
13234		      if (addend & ((howto->src_mask + 1) >> 1))
13235			{
13236			  bfd_signed_vma mask;
13237
13238			  mask = -1;
13239			  mask &= ~ howto->src_mask;
13240			  addend |= mask;
13241			}
13242		      break;
13243		    }
13244
13245		  msec = sec;
13246		  addend =
13247		    _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13248		    - relocation;
13249		  addend += msec->output_section->vma + msec->output_offset;
13250
13251		  /* Cases here must match those in the preceding
13252		     switch statement.  */
13253		  switch (r_type)
13254		    {
13255		    case R_ARM_MOVW_ABS_NC:
13256		    case R_ARM_MOVT_ABS:
13257		      value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13258			      | (addend & 0xfff);
13259		      bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13260		      break;
13261
13262		    case R_ARM_THM_MOVW_ABS_NC:
13263		    case R_ARM_THM_MOVT_ABS:
13264		      value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13265			      | (addend & 0xff) | ((addend & 0x0800) << 15);
13266		      bfd_put_16 (input_bfd, value >> 16,
13267				  contents + rel->r_offset);
13268		      bfd_put_16 (input_bfd, value,
13269				  contents + rel->r_offset + 2);
13270		      break;
13271
13272		    default:
13273		      value = (value & ~ howto->dst_mask)
13274			      | (addend & howto->dst_mask);
13275		      bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13276		      break;
13277		    }
13278		}
13279	    }
13280	  else
13281	    relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13282	}
13283      else
13284	{
13285	  bool warned, ignored;
13286
13287	  RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13288				   r_symndx, symtab_hdr, sym_hashes,
13289				   h, sec, relocation,
13290				   unresolved_reloc, warned, ignored);
13291
13292	  sym_type = h->type;
13293	}
13294
13295      if (sec != NULL && discarded_section (sec))
13296	RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13297					 rel, 1, relend, howto, 0, contents);
13298
13299      if (bfd_link_relocatable (info))
13300	{
13301	  /* This is a relocatable link.  We don't have to change
13302	     anything, unless the reloc is against a section symbol,
13303	     in which case we have to adjust according to where the
13304	     section symbol winds up in the output section.  */
13305	  if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13306	    {
13307	      if (globals->use_rel)
13308		arm_add_to_rel (input_bfd, contents + rel->r_offset,
13309				howto, (bfd_signed_vma) sec->output_offset);
13310	      else
13311		rel->r_addend += sec->output_offset;
13312	    }
13313	  continue;
13314	}
13315
13316      if (h != NULL)
13317	name = h->root.root.string;
13318      else
13319	{
13320	  name = (bfd_elf_string_from_elf_section
13321		  (input_bfd, symtab_hdr->sh_link, sym->st_name));
13322	  if (name == NULL || *name == '\0')
13323	    name = bfd_section_name (sec);
13324	}
13325
13326      if (r_symndx != STN_UNDEF
13327	  && r_type != R_ARM_NONE
13328	  && (h == NULL
13329	      || h->root.type == bfd_link_hash_defined
13330	      || h->root.type == bfd_link_hash_defweak)
13331	  && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13332	{
13333	  _bfd_error_handler
13334	    ((sym_type == STT_TLS
13335	      /* xgettext:c-format */
13336	      ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13337	      /* xgettext:c-format */
13338	      : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13339	     input_bfd,
13340	     input_section,
13341	     (uint64_t) rel->r_offset,
13342	     howto->name,
13343	     name);
13344	}
13345
13346      /* We call elf32_arm_final_link_relocate unless we're completely
13347	 done, i.e., the relaxation produced the final output we want,
13348	 and we won't let anybody mess with it. Also, we have to do
13349	 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13350	 both in relaxed and non-relaxed cases.  */
13351      if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13352	  || (IS_ARM_TLS_GNU_RELOC (r_type)
13353	      && !((h ? elf32_arm_hash_entry (h)->tls_type :
13354		    elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13355		   & GOT_TLS_GDESC)))
13356	{
13357	  r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13358				   contents, rel, h == NULL);
13359	  /* This may have been marked unresolved because it came from
13360	     a shared library.  But we've just dealt with that.  */
13361	  unresolved_reloc = 0;
13362	}
13363      else
13364	r = bfd_reloc_continue;
13365
13366      if (r == bfd_reloc_continue)
13367	{
13368	  unsigned char branch_type =
13369	    h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13370	      : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13371
13372	  r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13373					     input_section, contents, rel,
13374					     relocation, info, sec, name,
13375					     sym_type, branch_type, h,
13376					     &unresolved_reloc,
13377					     &error_message);
13378	}
13379
13380      /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13381	 because such sections are not SEC_ALLOC and thus ld.so will
13382	 not process them.  */
13383      if (unresolved_reloc
13384	  && !((input_section->flags & SEC_DEBUGGING) != 0
13385	       && h->def_dynamic)
13386	  && _bfd_elf_section_offset (output_bfd, info, input_section,
13387				      rel->r_offset) != (bfd_vma) -1)
13388	{
13389	  _bfd_error_handler
13390	    /* xgettext:c-format */
13391	    (_("%pB(%pA+%#" PRIx64 "): "
13392	       "unresolvable %s relocation against symbol `%s'"),
13393	     input_bfd,
13394	     input_section,
13395	     (uint64_t) rel->r_offset,
13396	     howto->name,
13397	     h->root.root.string);
13398	  return false;
13399	}
13400
13401      if (r != bfd_reloc_ok)
13402	{
13403	  switch (r)
13404	    {
13405	    case bfd_reloc_overflow:
13406	      /* If the overflowing reloc was to an undefined symbol,
13407		 we have already printed one error message and there
13408		 is no point complaining again.  */
13409	      if (!h || h->root.type != bfd_link_hash_undefined)
13410		(*info->callbacks->reloc_overflow)
13411		  (info, (h ? &h->root : NULL), name, howto->name,
13412		   (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13413	      break;
13414
13415	    case bfd_reloc_undefined:
13416	      (*info->callbacks->undefined_symbol)
13417		(info, name, input_bfd, input_section, rel->r_offset, true);
13418	      break;
13419
13420	    case bfd_reloc_outofrange:
13421	      error_message = _("out of range");
13422	      goto common_error;
13423
13424	    case bfd_reloc_notsupported:
13425	      error_message = _("unsupported relocation");
13426	      goto common_error;
13427
13428	    case bfd_reloc_dangerous:
13429	      /* error_message should already be set.  */
13430	      goto common_error;
13431
13432	    default:
13433	      error_message = _("unknown error");
13434	      /* Fall through.  */
13435
13436	    common_error:
13437	      BFD_ASSERT (error_message != NULL);
13438	      (*info->callbacks->reloc_dangerous)
13439		(info, error_message, input_bfd, input_section, rel->r_offset);
13440	      break;
13441	    }
13442	}
13443    }
13444
13445  return true;
13446}
13447
13448/* Add a new unwind edit to the list described by HEAD, TAIL.  If TINDEX is zero,
13449   adds the edit to the start of the list.  (The list must be built in order of
13450   ascending TINDEX: the function's callers are primarily responsible for
13451   maintaining that condition).  */
13452
13453static void
13454add_unwind_table_edit (arm_unwind_table_edit **head,
13455		       arm_unwind_table_edit **tail,
13456		       arm_unwind_edit_type type,
13457		       asection *linked_section,
13458		       unsigned int tindex)
13459{
13460  arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13461      xmalloc (sizeof (arm_unwind_table_edit));
13462
13463  new_edit->type = type;
13464  new_edit->linked_section = linked_section;
13465  new_edit->index = tindex;
13466
13467  if (tindex > 0)
13468    {
13469      new_edit->next = NULL;
13470
13471      if (*tail)
13472	(*tail)->next = new_edit;
13473
13474      (*tail) = new_edit;
13475
13476      if (!*head)
13477	(*head) = new_edit;
13478    }
13479  else
13480    {
13481      new_edit->next = *head;
13482
13483      if (!*tail)
13484	*tail = new_edit;
13485
13486      *head = new_edit;
13487    }
13488}
13489
13490static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13491
13492/* Increase the size of EXIDX_SEC by ADJUST bytes.  ADJUST mau be negative.  */
13493
13494static void
13495adjust_exidx_size (asection *exidx_sec, int adjust)
13496{
13497  asection *out_sec;
13498
13499  if (!exidx_sec->rawsize)
13500    exidx_sec->rawsize = exidx_sec->size;
13501
13502  bfd_set_section_size (exidx_sec, exidx_sec->size + adjust);
13503  out_sec = exidx_sec->output_section;
13504  /* Adjust size of output section.  */
13505  bfd_set_section_size (out_sec, out_sec->size + adjust);
13506}
13507
13508/* Insert an EXIDX_CANTUNWIND marker at the end of a section.  */
13509
13510static void
13511insert_cantunwind_after (asection *text_sec, asection *exidx_sec)
13512{
13513  struct _arm_elf_section_data *exidx_arm_data;
13514
13515  exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13516  add_unwind_table_edit
13517    (&exidx_arm_data->u.exidx.unwind_edit_list,
13518     &exidx_arm_data->u.exidx.unwind_edit_tail,
13519     INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13520
13521  exidx_arm_data->additional_reloc_count++;
13522
13523  adjust_exidx_size (exidx_sec, 8);
13524}
13525
13526/* Scan .ARM.exidx tables, and create a list describing edits which should be
13527   made to those tables, such that:
13528
13529     1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13530     2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13531	codes which have been inlined into the index).
13532
13533   If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13534
13535   The edits are applied when the tables are written
13536   (in elf32_arm_write_section).  */
13537
13538bool
13539elf32_arm_fix_exidx_coverage (asection **text_section_order,
13540			      unsigned int num_text_sections,
13541			      struct bfd_link_info *info,
13542			      bool merge_exidx_entries)
13543{
13544  bfd *inp;
13545  unsigned int last_second_word = 0, i;
13546  asection *last_exidx_sec = NULL;
13547  asection *last_text_sec = NULL;
13548  int last_unwind_type = -1;
13549
13550  /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13551     text sections.  */
13552  for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13553    {
13554      asection *sec;
13555
13556      for (sec = inp->sections; sec != NULL; sec = sec->next)
13557	{
13558	  struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13559	  Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13560
13561	  if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13562	    continue;
13563
13564	  if (elf_sec->linked_to)
13565	    {
13566	      Elf_Internal_Shdr *linked_hdr
13567		= &elf_section_data (elf_sec->linked_to)->this_hdr;
13568	      struct _arm_elf_section_data *linked_sec_arm_data
13569		= get_arm_elf_section_data (linked_hdr->bfd_section);
13570
13571	      if (linked_sec_arm_data == NULL)
13572		continue;
13573
13574	      /* Link this .ARM.exidx section back from the text section it
13575		 describes.  */
13576	      linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13577	    }
13578	}
13579    }
13580
13581  /* Walk all text sections in order of increasing VMA.  Eilminate duplicate
13582     index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13583     and add EXIDX_CANTUNWIND entries for sections with no unwind table data.  */
13584
13585  for (i = 0; i < num_text_sections; i++)
13586    {
13587      asection *sec = text_section_order[i];
13588      asection *exidx_sec;
13589      struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13590      struct _arm_elf_section_data *exidx_arm_data;
13591      bfd_byte *contents = NULL;
13592      int deleted_exidx_bytes = 0;
13593      bfd_vma j;
13594      arm_unwind_table_edit *unwind_edit_head = NULL;
13595      arm_unwind_table_edit *unwind_edit_tail = NULL;
13596      Elf_Internal_Shdr *hdr;
13597      bfd *ibfd;
13598
13599      if (arm_data == NULL)
13600	continue;
13601
13602      exidx_sec = arm_data->u.text.arm_exidx_sec;
13603      if (exidx_sec == NULL)
13604	{
13605	  /* Section has no unwind data.  */
13606	  if (last_unwind_type == 0 || !last_exidx_sec)
13607	    continue;
13608
13609	  /* Ignore zero sized sections.  */
13610	  if (sec->size == 0)
13611	    continue;
13612
13613	  insert_cantunwind_after (last_text_sec, last_exidx_sec);
13614	  last_unwind_type = 0;
13615	  continue;
13616	}
13617
13618      /* Skip /DISCARD/ sections.  */
13619      if (bfd_is_abs_section (exidx_sec->output_section))
13620	continue;
13621
13622      hdr = &elf_section_data (exidx_sec)->this_hdr;
13623      if (hdr->sh_type != SHT_ARM_EXIDX)
13624	continue;
13625
13626      exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13627      if (exidx_arm_data == NULL)
13628	continue;
13629
13630      ibfd = exidx_sec->owner;
13631
13632      if (hdr->contents != NULL)
13633	contents = hdr->contents;
13634      else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13635	/* An error?  */
13636	continue;
13637
13638      if (last_unwind_type > 0)
13639	{
13640	  unsigned int first_word = bfd_get_32 (ibfd, contents);
13641	  /* Add cantunwind if first unwind item does not match section
13642	     start.  */
13643	  if (first_word != sec->vma)
13644	    {
13645	      insert_cantunwind_after (last_text_sec, last_exidx_sec);
13646	      last_unwind_type = 0;
13647	    }
13648	}
13649
13650      for (j = 0; j < hdr->sh_size; j += 8)
13651	{
13652	  unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13653	  int unwind_type;
13654	  int elide = 0;
13655
13656	  /* An EXIDX_CANTUNWIND entry.  */
13657	  if (second_word == 1)
13658	    {
13659	      if (last_unwind_type == 0)
13660		elide = 1;
13661	      unwind_type = 0;
13662	    }
13663	  /* Inlined unwinding data.  Merge if equal to previous.  */
13664	  else if ((second_word & 0x80000000) != 0)
13665	    {
13666	      if (merge_exidx_entries
13667		   && last_second_word == second_word && last_unwind_type == 1)
13668		elide = 1;
13669	      unwind_type = 1;
13670	      last_second_word = second_word;
13671	    }
13672	  /* Normal table entry.  In theory we could merge these too,
13673	     but duplicate entries are likely to be much less common.  */
13674	  else
13675	    unwind_type = 2;
13676
13677	  if (elide && !bfd_link_relocatable (info))
13678	    {
13679	      add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13680				     DELETE_EXIDX_ENTRY, NULL, j / 8);
13681
13682	      deleted_exidx_bytes += 8;
13683	    }
13684
13685	  last_unwind_type = unwind_type;
13686	}
13687
13688      /* Free contents if we allocated it ourselves.  */
13689      if (contents != hdr->contents)
13690	free (contents);
13691
13692      /* Record edits to be applied later (in elf32_arm_write_section).  */
13693      exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13694      exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13695
13696      if (deleted_exidx_bytes > 0)
13697	adjust_exidx_size (exidx_sec, - deleted_exidx_bytes);
13698
13699      last_exidx_sec = exidx_sec;
13700      last_text_sec = sec;
13701    }
13702
13703  /* Add terminating CANTUNWIND entry.  */
13704  if (!bfd_link_relocatable (info) && last_exidx_sec
13705      && last_unwind_type != 0)
13706    insert_cantunwind_after (last_text_sec, last_exidx_sec);
13707
13708  return true;
13709}
13710
13711static bool
13712elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13713			       bfd *ibfd, const char *name)
13714{
13715  asection *sec, *osec;
13716
13717  sec = bfd_get_linker_section (ibfd, name);
13718  if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13719    return true;
13720
13721  osec = sec->output_section;
13722  if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13723    return true;
13724
13725  if (! bfd_set_section_contents (obfd, osec, sec->contents,
13726				  sec->output_offset, sec->size))
13727    return false;
13728
13729  return true;
13730}
13731
13732static bool
13733elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13734{
13735  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13736  asection *sec, *osec;
13737
13738  if (globals == NULL)
13739    return false;
13740
13741  /* Invoke the regular ELF backend linker to do all the work.  */
13742  if (!bfd_elf_final_link (abfd, info))
13743    return false;
13744
13745  /* Process stub sections (eg BE8 encoding, ...).  */
13746  struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13747  unsigned int i;
13748  for (i=0; i<htab->top_id; i++)
13749    {
13750      sec = htab->stub_group[i].stub_sec;
13751      /* Only process it once, in its link_sec slot.  */
13752      if (sec && i == htab->stub_group[i].link_sec->id)
13753	{
13754	  osec = sec->output_section;
13755	  elf32_arm_write_section (abfd, info, sec, sec->contents);
13756	  if (! bfd_set_section_contents (abfd, osec, sec->contents,
13757					  sec->output_offset, sec->size))
13758	    return false;
13759	}
13760    }
13761
13762  /* Write out any glue sections now that we have created all the
13763     stubs.  */
13764  if (globals->bfd_of_glue_owner != NULL)
13765    {
13766      if (! elf32_arm_output_glue_section (info, abfd,
13767					   globals->bfd_of_glue_owner,
13768					   ARM2THUMB_GLUE_SECTION_NAME))
13769	return false;
13770
13771      if (! elf32_arm_output_glue_section (info, abfd,
13772					   globals->bfd_of_glue_owner,
13773					   THUMB2ARM_GLUE_SECTION_NAME))
13774	return false;
13775
13776      if (! elf32_arm_output_glue_section (info, abfd,
13777					   globals->bfd_of_glue_owner,
13778					   VFP11_ERRATUM_VENEER_SECTION_NAME))
13779	return false;
13780
13781      if (! elf32_arm_output_glue_section (info, abfd,
13782					   globals->bfd_of_glue_owner,
13783					   STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13784	return false;
13785
13786      if (! elf32_arm_output_glue_section (info, abfd,
13787					   globals->bfd_of_glue_owner,
13788					   ARM_BX_GLUE_SECTION_NAME))
13789	return false;
13790    }
13791
13792  return true;
13793}
13794
13795/* Return a best guess for the machine number based on the attributes.  */
13796
13797static unsigned int
13798bfd_arm_get_mach_from_attributes (bfd * abfd)
13799{
13800  int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13801
13802  switch (arch)
13803    {
13804    case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
13805    case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13806    case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13807    case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13808
13809    case TAG_CPU_ARCH_V5TE:
13810      {
13811	char * name;
13812
13813	BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13814	name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13815
13816	if (name)
13817	  {
13818	    if (strcmp (name, "IWMMXT2") == 0)
13819	      return bfd_mach_arm_iWMMXt2;
13820
13821	    if (strcmp (name, "IWMMXT") == 0)
13822	      return bfd_mach_arm_iWMMXt;
13823
13824	    if (strcmp (name, "XSCALE") == 0)
13825	      {
13826		int wmmx;
13827
13828		BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13829		wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13830		switch (wmmx)
13831		  {
13832		  case 1: return bfd_mach_arm_iWMMXt;
13833		  case 2: return bfd_mach_arm_iWMMXt2;
13834		  default: return bfd_mach_arm_XScale;
13835		  }
13836	      }
13837	  }
13838
13839	return bfd_mach_arm_5TE;
13840      }
13841
13842    case TAG_CPU_ARCH_V5TEJ:
13843	return bfd_mach_arm_5TEJ;
13844    case TAG_CPU_ARCH_V6:
13845	return bfd_mach_arm_6;
13846    case TAG_CPU_ARCH_V6KZ:
13847	return bfd_mach_arm_6KZ;
13848    case TAG_CPU_ARCH_V6T2:
13849	return bfd_mach_arm_6T2;
13850    case TAG_CPU_ARCH_V6K:
13851	return bfd_mach_arm_6K;
13852    case TAG_CPU_ARCH_V7:
13853	return bfd_mach_arm_7;
13854    case TAG_CPU_ARCH_V6_M:
13855	return bfd_mach_arm_6M;
13856    case TAG_CPU_ARCH_V6S_M:
13857	return bfd_mach_arm_6SM;
13858    case TAG_CPU_ARCH_V7E_M:
13859	return bfd_mach_arm_7EM;
13860    case TAG_CPU_ARCH_V8:
13861	return bfd_mach_arm_8;
13862    case TAG_CPU_ARCH_V8R:
13863	return bfd_mach_arm_8R;
13864    case TAG_CPU_ARCH_V8M_BASE:
13865	return bfd_mach_arm_8M_BASE;
13866    case TAG_CPU_ARCH_V8M_MAIN:
13867	return bfd_mach_arm_8M_MAIN;
13868    case TAG_CPU_ARCH_V8_1M_MAIN:
13869	return bfd_mach_arm_8_1M_MAIN;
13870    case TAG_CPU_ARCH_V9:
13871	return bfd_mach_arm_9;
13872
13873    default:
13874      /* Force entry to be added for any new known Tag_CPU_arch value.  */
13875      BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
13876
13877      /* Unknown Tag_CPU_arch value.  */
13878      return bfd_mach_arm_unknown;
13879    }
13880}
13881
13882/* Set the right machine number.  */
13883
13884static bool
13885elf32_arm_object_p (bfd *abfd)
13886{
13887  unsigned int mach;
13888
13889  mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13890
13891  if (mach == bfd_mach_arm_unknown)
13892    {
13893      if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13894	mach = bfd_mach_arm_ep9312;
13895      else
13896	mach = bfd_arm_get_mach_from_attributes (abfd);
13897    }
13898
13899  bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13900  return true;
13901}
13902
13903/* Function to keep ARM specific flags in the ELF header.  */
13904
13905static bool
13906elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13907{
13908  if (elf_flags_init (abfd)
13909      && elf_elfheader (abfd)->e_flags != flags)
13910    {
13911      if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13912	{
13913	  if (flags & EF_ARM_INTERWORK)
13914	    _bfd_error_handler
13915	      (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13916	       abfd);
13917	  else
13918	    _bfd_error_handler
13919	      (_("warning: clearing the interworking flag of %pB due to outside request"),
13920	       abfd);
13921	}
13922    }
13923  else
13924    {
13925      elf_elfheader (abfd)->e_flags = flags;
13926      elf_flags_init (abfd) = true;
13927    }
13928
13929  return true;
13930}
13931
13932/* Copy backend specific data from one object module to another.  */
13933
13934static bool
13935elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13936{
13937  flagword in_flags;
13938  flagword out_flags;
13939
13940  if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13941    return true;
13942
13943  in_flags  = elf_elfheader (ibfd)->e_flags;
13944  out_flags = elf_elfheader (obfd)->e_flags;
13945
13946  if (elf_flags_init (obfd)
13947      && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
13948      && in_flags != out_flags)
13949    {
13950      /* Cannot mix APCS26 and APCS32 code.  */
13951      if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
13952	return false;
13953
13954      /* Cannot mix float APCS and non-float APCS code.  */
13955      if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
13956	return false;
13957
13958      /* If the src and dest have different interworking flags
13959	 then turn off the interworking bit.  */
13960      if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
13961	{
13962	  if (out_flags & EF_ARM_INTERWORK)
13963	    _bfd_error_handler
13964	      (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13965	       obfd, ibfd);
13966
13967	  in_flags &= ~EF_ARM_INTERWORK;
13968	}
13969
13970      /* Likewise for PIC, though don't warn for this case.  */
13971      if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
13972	in_flags &= ~EF_ARM_PIC;
13973    }
13974
13975  elf_elfheader (obfd)->e_flags = in_flags;
13976  elf_flags_init (obfd) = true;
13977
13978  return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
13979}
13980
13981/* Values for Tag_ABI_PCS_R9_use.  */
13982enum
13983{
13984  AEABI_R9_V6,
13985  AEABI_R9_SB,
13986  AEABI_R9_TLS,
13987  AEABI_R9_unused
13988};
13989
13990/* Values for Tag_ABI_PCS_RW_data.  */
13991enum
13992{
13993  AEABI_PCS_RW_data_absolute,
13994  AEABI_PCS_RW_data_PCrel,
13995  AEABI_PCS_RW_data_SBrel,
13996  AEABI_PCS_RW_data_unused
13997};
13998
13999/* Values for Tag_ABI_enum_size.  */
14000enum
14001{
14002  AEABI_enum_unused,
14003  AEABI_enum_short,
14004  AEABI_enum_wide,
14005  AEABI_enum_forced_wide
14006};
14007
14008/* Determine whether an object attribute tag takes an integer, a
14009   string or both.  */
14010
14011static int
14012elf32_arm_obj_attrs_arg_type (int tag)
14013{
14014  if (tag == Tag_compatibility)
14015    return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
14016  else if (tag == Tag_nodefaults)
14017    return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
14018  else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
14019    return ATTR_TYPE_FLAG_STR_VAL;
14020  else if (tag < 32)
14021    return ATTR_TYPE_FLAG_INT_VAL;
14022  else
14023    return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
14024}
14025
14026/* The ABI defines that Tag_conformance should be emitted first, and that
14027   Tag_nodefaults should be second (if either is defined).  This sets those
14028   two positions, and bumps up the position of all the remaining tags to
14029   compensate.  */
14030static int
14031elf32_arm_obj_attrs_order (int num)
14032{
14033  if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
14034    return Tag_conformance;
14035  if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
14036    return Tag_nodefaults;
14037  if ((num - 2) < Tag_nodefaults)
14038    return num - 2;
14039  if ((num - 1) < Tag_conformance)
14040    return num - 1;
14041  return num;
14042}
14043
14044/* Attribute numbers >=64 (mod 128) can be safely ignored.  */
14045static bool
14046elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
14047{
14048  if ((tag & 127) < 64)
14049    {
14050      _bfd_error_handler
14051	(_("%pB: unknown mandatory EABI object attribute %d"),
14052	 abfd, tag);
14053      bfd_set_error (bfd_error_bad_value);
14054      return false;
14055    }
14056  else
14057    {
14058      _bfd_error_handler
14059	(_("warning: %pB: unknown EABI object attribute %d"),
14060	 abfd, tag);
14061      return true;
14062    }
14063}
14064
14065/* Read the architecture from the Tag_also_compatible_with attribute, if any.
14066   Returns -1 if no architecture could be read.  */
14067
14068static int
14069get_secondary_compatible_arch (bfd *abfd)
14070{
14071  obj_attribute *attr =
14072    &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14073
14074  /* Note: the tag and its argument below are uleb128 values, though
14075     currently-defined values fit in one byte for each.  */
14076  if (attr->s
14077      && attr->s[0] == Tag_CPU_arch
14078      && (attr->s[1] & 128) != 128
14079      && attr->s[2] == 0)
14080   return attr->s[1];
14081
14082  /* This tag is "safely ignorable", so don't complain if it looks funny.  */
14083  return -1;
14084}
14085
14086/* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
14087   The tag is removed if ARCH is -1.  */
14088
14089static void
14090set_secondary_compatible_arch (bfd *abfd, int arch)
14091{
14092  obj_attribute *attr =
14093    &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14094
14095  if (arch == -1)
14096    {
14097      attr->s = NULL;
14098      return;
14099    }
14100
14101  /* Note: the tag and its argument below are uleb128 values, though
14102     currently-defined values fit in one byte for each.  */
14103  if (!attr->s)
14104    attr->s = (char *) bfd_alloc (abfd, 3);
14105  attr->s[0] = Tag_CPU_arch;
14106  attr->s[1] = arch;
14107  attr->s[2] = '\0';
14108}
14109
14110/* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
14111   into account.  */
14112
14113static int
14114tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
14115		      int newtag, int secondary_compat)
14116{
14117#define T(X) TAG_CPU_ARCH_##X
14118  int tagl, tagh, result;
14119  const int v6t2[] =
14120    {
14121      T(V6T2),   /* PRE_V4.  */
14122      T(V6T2),   /* V4.  */
14123      T(V6T2),   /* V4T.  */
14124      T(V6T2),   /* V5T.  */
14125      T(V6T2),   /* V5TE.  */
14126      T(V6T2),   /* V5TEJ.  */
14127      T(V6T2),   /* V6.  */
14128      T(V7),     /* V6KZ.  */
14129      T(V6T2)    /* V6T2.  */
14130    };
14131  const int v6k[] =
14132    {
14133      T(V6K),    /* PRE_V4.  */
14134      T(V6K),    /* V4.  */
14135      T(V6K),    /* V4T.  */
14136      T(V6K),    /* V5T.  */
14137      T(V6K),    /* V5TE.  */
14138      T(V6K),    /* V5TEJ.  */
14139      T(V6K),    /* V6.  */
14140      T(V6KZ),   /* V6KZ.  */
14141      T(V7),     /* V6T2.  */
14142      T(V6K)     /* V6K.  */
14143    };
14144  const int v7[] =
14145    {
14146      T(V7),     /* PRE_V4.  */
14147      T(V7),     /* V4.  */
14148      T(V7),     /* V4T.  */
14149      T(V7),     /* V5T.  */
14150      T(V7),     /* V5TE.  */
14151      T(V7),     /* V5TEJ.  */
14152      T(V7),     /* V6.  */
14153      T(V7),     /* V6KZ.  */
14154      T(V7),     /* V6T2.  */
14155      T(V7),     /* V6K.  */
14156      T(V7)      /* V7.  */
14157    };
14158  const int v6_m[] =
14159    {
14160      -1,	 /* PRE_V4.  */
14161      -1,	 /* V4.  */
14162      T(V6K),    /* V4T.  */
14163      T(V6K),    /* V5T.  */
14164      T(V6K),    /* V5TE.  */
14165      T(V6K),    /* V5TEJ.  */
14166      T(V6K),    /* V6.  */
14167      T(V6KZ),   /* V6KZ.  */
14168      T(V7),     /* V6T2.  */
14169      T(V6K),    /* V6K.  */
14170      T(V7),     /* V7.  */
14171      T(V6_M)    /* V6_M.  */
14172    };
14173  const int v6s_m[] =
14174    {
14175      -1,	 /* PRE_V4.  */
14176      -1,	 /* V4.  */
14177      T(V6K),    /* V4T.  */
14178      T(V6K),    /* V5T.  */
14179      T(V6K),    /* V5TE.  */
14180      T(V6K),    /* V5TEJ.  */
14181      T(V6K),    /* V6.  */
14182      T(V6KZ),   /* V6KZ.  */
14183      T(V7),     /* V6T2.  */
14184      T(V6K),    /* V6K.  */
14185      T(V7),     /* V7.  */
14186      T(V6S_M),  /* V6_M.  */
14187      T(V6S_M)   /* V6S_M.  */
14188    };
14189  const int v7e_m[] =
14190    {
14191      -1,	 /* PRE_V4.  */
14192      -1,	 /* V4.  */
14193      T(V7E_M),  /* V4T.  */
14194      T(V7E_M),  /* V5T.  */
14195      T(V7E_M),  /* V5TE.  */
14196      T(V7E_M),  /* V5TEJ.  */
14197      T(V7E_M),  /* V6.  */
14198      T(V7E_M),  /* V6KZ.  */
14199      T(V7E_M),  /* V6T2.  */
14200      T(V7E_M),  /* V6K.  */
14201      T(V7E_M),  /* V7.  */
14202      T(V7E_M),  /* V6_M.  */
14203      T(V7E_M),  /* V6S_M.  */
14204      T(V7E_M)   /* V7E_M.  */
14205    };
14206  const int v8[] =
14207    {
14208      T(V8),		/* PRE_V4.  */
14209      T(V8),		/* V4.  */
14210      T(V8),		/* V4T.  */
14211      T(V8),		/* V5T.  */
14212      T(V8),		/* V5TE.  */
14213      T(V8),		/* V5TEJ.  */
14214      T(V8),		/* V6.  */
14215      T(V8),		/* V6KZ.  */
14216      T(V8),		/* V6T2.  */
14217      T(V8),		/* V6K.  */
14218      T(V8),		/* V7.  */
14219      T(V8),		/* V6_M.  */
14220      T(V8),		/* V6S_M.  */
14221      T(V8),		/* V7E_M.  */
14222      T(V8),		/* V8.  */
14223      T(V8),		/* V8-R.  */
14224      T(V8),		/* V8-M.BASE.  */
14225      T(V8),		/* V8-M.MAIN.  */
14226      T(V8),		/* V8.1.  */
14227      T(V8),		/* V8.2.  */
14228      T(V8),		/* V8.3.  */
14229      T(V8),		/* V8.1-M.MAIN.  */
14230    };
14231  const int v8r[] =
14232    {
14233      T(V8R),		/* PRE_V4.  */
14234      T(V8R),		/* V4.  */
14235      T(V8R),		/* V4T.  */
14236      T(V8R),		/* V5T.  */
14237      T(V8R),		/* V5TE.  */
14238      T(V8R),		/* V5TEJ.  */
14239      T(V8R),		/* V6.  */
14240      T(V8R),		/* V6KZ.  */
14241      T(V8R),		/* V6T2.  */
14242      T(V8R),		/* V6K.  */
14243      T(V8R),		/* V7.  */
14244      T(V8R),		/* V6_M.  */
14245      T(V8R),		/* V6S_M.  */
14246      T(V8R),		/* V7E_M.  */
14247      T(V8),		/* V8.  */
14248      T(V8R),		/* V8R.  */
14249    };
14250  const int v8m_baseline[] =
14251    {
14252      -1,		/* PRE_V4.  */
14253      -1,		/* V4.  */
14254      -1,		/* V4T.  */
14255      -1,		/* V5T.  */
14256      -1,		/* V5TE.  */
14257      -1,		/* V5TEJ.  */
14258      -1,		/* V6.  */
14259      -1,		/* V6KZ.  */
14260      -1,		/* V6T2.  */
14261      -1,		/* V6K.  */
14262      -1,		/* V7.  */
14263      T(V8M_BASE),	/* V6_M.  */
14264      T(V8M_BASE),	/* V6S_M.  */
14265      -1,		/* V7E_M.  */
14266      -1,		/* V8.  */
14267      -1,		/* V8R.  */
14268      T(V8M_BASE)	/* V8-M BASELINE.  */
14269    };
14270  const int v8m_mainline[] =
14271    {
14272      -1,		/* PRE_V4.  */
14273      -1,		/* V4.  */
14274      -1,		/* V4T.  */
14275      -1,		/* V5T.  */
14276      -1,		/* V5TE.  */
14277      -1,		/* V5TEJ.  */
14278      -1,		/* V6.  */
14279      -1,		/* V6KZ.  */
14280      -1,		/* V6T2.  */
14281      -1,		/* V6K.  */
14282      T(V8M_MAIN),	/* V7.  */
14283      T(V8M_MAIN),	/* V6_M.  */
14284      T(V8M_MAIN),	/* V6S_M.  */
14285      T(V8M_MAIN),	/* V7E_M.  */
14286      -1,		/* V8.  */
14287      -1,		/* V8R.  */
14288      T(V8M_MAIN),	/* V8-M BASELINE.  */
14289      T(V8M_MAIN)	/* V8-M MAINLINE.  */
14290    };
14291  const int v8_1m_mainline[] =
14292    {
14293      -1,		/* PRE_V4.  */
14294      -1,		/* V4.  */
14295      -1,		/* V4T.  */
14296      -1,		/* V5T.  */
14297      -1,		/* V5TE.  */
14298      -1,		/* V5TEJ.  */
14299      -1,		/* V6.  */
14300      -1,		/* V6KZ.  */
14301      -1,		/* V6T2.  */
14302      -1,		/* V6K.  */
14303      T(V8_1M_MAIN),	/* V7.  */
14304      T(V8_1M_MAIN),	/* V6_M.  */
14305      T(V8_1M_MAIN),	/* V6S_M.  */
14306      T(V8_1M_MAIN),	/* V7E_M.  */
14307      -1,		/* V8.  */
14308      -1,		/* V8R.  */
14309      T(V8_1M_MAIN),	/* V8-M BASELINE.  */
14310      T(V8_1M_MAIN),	/* V8-M MAINLINE.  */
14311      -1,		/* Unused (18).  */
14312      -1,		/* Unused (19).  */
14313      -1,		/* Unused (20).  */
14314      T(V8_1M_MAIN)	/* V8.1-M MAINLINE.  */
14315    };
14316  const int v9[] =
14317    {
14318      T(V9),		/* PRE_V4.  */
14319      T(V9),		/* V4.  */
14320      T(V9),		/* V4T.  */
14321      T(V9),		/* V5T.  */
14322      T(V9),		/* V5TE.  */
14323      T(V9),		/* V5TEJ.  */
14324      T(V9),		/* V6.  */
14325      T(V9),		/* V6KZ.  */
14326      T(V9),		/* V6T2.  */
14327      T(V9),		/* V6K.  */
14328      T(V9),		/* V7.  */
14329      T(V9),		/* V6_M.  */
14330      T(V9),		/* V6S_M.  */
14331      T(V9),		/* V7E_M.  */
14332      T(V9),		/* V8.  */
14333      T(V9),		/* V8-R.  */
14334      T(V9),		/* V8-M.BASE.  */
14335      T(V9),		/* V8-M.MAIN.  */
14336      T(V9),		/* V8.1.  */
14337      T(V9),		/* V8.2.  */
14338      T(V9),		/* V8.3.  */
14339      T(V9),		/* V8.1-M.MAIN.  */
14340      T(V9),		/* V9.  */
14341     };
14342  const int v4t_plus_v6_m[] =
14343    {
14344      -1,		/* PRE_V4.  */
14345      -1,		/* V4.  */
14346      T(V4T),		/* V4T.  */
14347      T(V5T),		/* V5T.  */
14348      T(V5TE),		/* V5TE.  */
14349      T(V5TEJ),		/* V5TEJ.  */
14350      T(V6),		/* V6.  */
14351      T(V6KZ),		/* V6KZ.  */
14352      T(V6T2),		/* V6T2.  */
14353      T(V6K),		/* V6K.  */
14354      T(V7),		/* V7.  */
14355      T(V6_M),		/* V6_M.  */
14356      T(V6S_M),		/* V6S_M.  */
14357      T(V7E_M),		/* V7E_M.  */
14358      T(V8),		/* V8.  */
14359      -1,		/* V8R.  */
14360      T(V8M_BASE),	/* V8-M BASELINE.  */
14361      T(V8M_MAIN),	/* V8-M MAINLINE.  */
14362      -1,		/* Unused (18).  */
14363      -1,		/* Unused (19).  */
14364      -1,		/* Unused (20).  */
14365      T(V8_1M_MAIN),	/* V8.1-M MAINLINE.  */
14366      T(V9),		/* V9.  */
14367      T(V4T_PLUS_V6_M)	/* V4T plus V6_M.  */
14368    };
14369  const int *comb[] =
14370    {
14371      v6t2,
14372      v6k,
14373      v7,
14374      v6_m,
14375      v6s_m,
14376      v7e_m,
14377      v8,
14378      v8r,
14379      v8m_baseline,
14380      v8m_mainline,
14381      NULL,
14382      NULL,
14383      NULL,
14384      v8_1m_mainline,
14385      v9,
14386      /* Pseudo-architecture.  */
14387      v4t_plus_v6_m
14388    };
14389
14390  /* Check we've not got a higher architecture than we know about.  */
14391
14392  if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14393    {
14394      _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14395      return -1;
14396    }
14397
14398  /* Override old tag if we have a Tag_also_compatible_with on the output.  */
14399
14400  if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14401      || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14402    oldtag = T(V4T_PLUS_V6_M);
14403
14404  /* And override the new tag if we have a Tag_also_compatible_with on the
14405     input.  */
14406
14407  if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14408      || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14409    newtag = T(V4T_PLUS_V6_M);
14410
14411  tagl = (oldtag < newtag) ? oldtag : newtag;
14412  result = tagh = (oldtag > newtag) ? oldtag : newtag;
14413
14414  /* Architectures before V6KZ add features monotonically.  */
14415  if (tagh <= TAG_CPU_ARCH_V6KZ)
14416    return result;
14417
14418  result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14419
14420  /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14421     as the canonical version.  */
14422  if (result == T(V4T_PLUS_V6_M))
14423    {
14424      result = T(V4T);
14425      *secondary_compat_out = T(V6_M);
14426    }
14427  else
14428    *secondary_compat_out = -1;
14429
14430  if (result == -1)
14431    {
14432      _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14433			  ibfd, oldtag, newtag);
14434      return -1;
14435    }
14436
14437  return result;
14438#undef T
14439}
14440
14441/* Query attributes object to see if integer divide instructions may be
14442   present in an object.  */
14443static bool
14444elf32_arm_attributes_accept_div (const obj_attribute *attr)
14445{
14446  int arch = attr[Tag_CPU_arch].i;
14447  int profile = attr[Tag_CPU_arch_profile].i;
14448
14449  switch (attr[Tag_DIV_use].i)
14450    {
14451    case 0:
14452      /* Integer divide allowed if instruction contained in archetecture.  */
14453      if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14454	return true;
14455      else if (arch >= TAG_CPU_ARCH_V7E_M)
14456	return true;
14457      else
14458	return false;
14459
14460    case 1:
14461      /* Integer divide explicitly prohibited.  */
14462      return false;
14463
14464    default:
14465      /* Unrecognised case - treat as allowing divide everywhere.  */
14466    case 2:
14467      /* Integer divide allowed in ARM state.  */
14468      return true;
14469    }
14470}
14471
14472/* Query attributes object to see if integer divide instructions are
14473   forbidden to be in the object.  This is not the inverse of
14474   elf32_arm_attributes_accept_div.  */
14475static bool
14476elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14477{
14478  return attr[Tag_DIV_use].i == 1;
14479}
14480
14481/* Merge EABI object attributes from IBFD into OBFD.  Raise an error if there
14482   are conflicting attributes.  */
14483
14484static bool
14485elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14486{
14487  bfd *obfd = info->output_bfd;
14488  obj_attribute *in_attr;
14489  obj_attribute *out_attr;
14490  /* Some tags have 0 = don't care, 1 = strong requirement,
14491     2 = weak requirement.  */
14492  static const int order_021[3] = {0, 2, 1};
14493  int i;
14494  bool result = true;
14495  const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14496
14497  /* Skip the linker stubs file.  This preserves previous behavior
14498     of accepting unknown attributes in the first input file - but
14499     is that a bug?  */
14500  if (ibfd->flags & BFD_LINKER_CREATED)
14501    return true;
14502
14503  /* Skip any input that hasn't attribute section.
14504     This enables to link object files without attribute section with
14505     any others.  */
14506  if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14507    return true;
14508
14509  if (!elf_known_obj_attributes_proc (obfd)[0].i)
14510    {
14511      /* This is the first object.  Copy the attributes.  */
14512      _bfd_elf_copy_obj_attributes (ibfd, obfd);
14513
14514      out_attr = elf_known_obj_attributes_proc (obfd);
14515
14516      /* Use the Tag_null value to indicate the attributes have been
14517	 initialized.  */
14518      out_attr[0].i = 1;
14519
14520      /* We do not output objects with Tag_MPextension_use_legacy - we move
14521	 the attribute's value to Tag_MPextension_use.  */
14522      if (out_attr[Tag_MPextension_use_legacy].i != 0)
14523	{
14524	  if (out_attr[Tag_MPextension_use].i != 0
14525	      && out_attr[Tag_MPextension_use_legacy].i
14526		!= out_attr[Tag_MPextension_use].i)
14527	    {
14528	      _bfd_error_handler
14529		(_("Error: %pB has both the current and legacy "
14530		   "Tag_MPextension_use attributes"), ibfd);
14531	      result = false;
14532	    }
14533
14534	  out_attr[Tag_MPextension_use] =
14535	    out_attr[Tag_MPextension_use_legacy];
14536	  out_attr[Tag_MPextension_use_legacy].type = 0;
14537	  out_attr[Tag_MPextension_use_legacy].i = 0;
14538	}
14539
14540      /* PR 28859 and 28848:  Handle the case where the first input file,
14541	 eg crti.o, has a Tag_ABI_HardFP_use of 3 but no Tag_FP_arch set.
14542	 Using Tag_ABI_HardFP_use in this way is deprecated, so reset the
14543	 attribute to zero.
14544	 FIXME: Should we handle other non-zero values of Tag_ABI_HardFO_use ? */
14545      if (out_attr[Tag_ABI_HardFP_use].i == 3 && out_attr[Tag_FP_arch].i == 0)
14546	out_attr[Tag_ABI_HardFP_use].i = 0;
14547
14548      return result;
14549    }
14550
14551  in_attr = elf_known_obj_attributes_proc (ibfd);
14552  out_attr = elf_known_obj_attributes_proc (obfd);
14553  /* This needs to happen before Tag_ABI_FP_number_model is merged.  */
14554  if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14555    {
14556      /* Ignore mismatches if the object doesn't use floating point or is
14557	 floating point ABI independent.  */
14558      if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14559	  || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14560	      && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14561	out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14562      else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14563	       && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14564	{
14565	  _bfd_error_handler
14566	    (_("error: %pB uses VFP register arguments, %pB does not"),
14567	     in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14568	     in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14569	  result = false;
14570	}
14571    }
14572
14573  for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14574    {
14575      /* Merge this attribute with existing attributes.  */
14576      switch (i)
14577	{
14578	case Tag_CPU_raw_name:
14579	case Tag_CPU_name:
14580	  /* These are merged after Tag_CPU_arch.  */
14581	  break;
14582
14583	case Tag_ABI_optimization_goals:
14584	case Tag_ABI_FP_optimization_goals:
14585	  /* Use the first value seen.  */
14586	  break;
14587
14588	case Tag_CPU_arch:
14589	  {
14590	    int secondary_compat = -1, secondary_compat_out = -1;
14591	    unsigned int saved_out_attr = out_attr[i].i;
14592	    int arch_attr;
14593	    static const char *name_table[] =
14594	      {
14595		/* These aren't real CPU names, but we can't guess
14596		   that from the architecture version alone.  */
14597		"Pre v4",
14598		"ARM v4",
14599		"ARM v4T",
14600		"ARM v5T",
14601		"ARM v5TE",
14602		"ARM v5TEJ",
14603		"ARM v6",
14604		"ARM v6KZ",
14605		"ARM v6T2",
14606		"ARM v6K",
14607		"ARM v7",
14608		"ARM v6-M",
14609		"ARM v6S-M",
14610		"ARM v7E-M",
14611		"ARM v8",
14612		"ARM v8-R",
14613		"ARM v8-M.baseline",
14614		"ARM v8-M.mainline",
14615		"ARM v8.1-A",
14616		"ARM v8.2-A",
14617		"ARM v8.3-A",
14618		"ARM v8.1-M.mainline",
14619		"ARM v9",
14620	    };
14621
14622	    /* Merge Tag_CPU_arch and Tag_also_compatible_with.  */
14623	    secondary_compat = get_secondary_compatible_arch (ibfd);
14624	    secondary_compat_out = get_secondary_compatible_arch (obfd);
14625	    arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14626					      &secondary_compat_out,
14627					      in_attr[i].i,
14628					      secondary_compat);
14629
14630	    /* Return with error if failed to merge.  */
14631	    if (arch_attr == -1)
14632	      return false;
14633
14634	    out_attr[i].i = arch_attr;
14635
14636	    set_secondary_compatible_arch (obfd, secondary_compat_out);
14637
14638	    /* Merge Tag_CPU_name and Tag_CPU_raw_name.  */
14639	    if (out_attr[i].i == saved_out_attr)
14640	      ; /* Leave the names alone.  */
14641	    else if (out_attr[i].i == in_attr[i].i)
14642	      {
14643		/* The output architecture has been changed to match the
14644		   input architecture.  Use the input names.  */
14645		out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14646		  ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14647		  : NULL;
14648		out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14649		  ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14650		  : NULL;
14651	      }
14652	    else
14653	      {
14654		out_attr[Tag_CPU_name].s = NULL;
14655		out_attr[Tag_CPU_raw_name].s = NULL;
14656	      }
14657
14658	    /* If we still don't have a value for Tag_CPU_name,
14659	       make one up now.  Tag_CPU_raw_name remains blank.  */
14660	    if (out_attr[Tag_CPU_name].s == NULL
14661		&& out_attr[i].i < ARRAY_SIZE (name_table))
14662	      out_attr[Tag_CPU_name].s =
14663		_bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14664	  }
14665	  break;
14666
14667	case Tag_ARM_ISA_use:
14668	case Tag_THUMB_ISA_use:
14669	case Tag_WMMX_arch:
14670	case Tag_Advanced_SIMD_arch:
14671	  /* ??? Do Advanced_SIMD (NEON) and WMMX conflict?  */
14672	case Tag_ABI_FP_rounding:
14673	case Tag_ABI_FP_exceptions:
14674	case Tag_ABI_FP_user_exceptions:
14675	case Tag_ABI_FP_number_model:
14676	case Tag_FP_HP_extension:
14677	case Tag_CPU_unaligned_access:
14678	case Tag_T2EE_use:
14679	case Tag_MPextension_use:
14680	case Tag_MVE_arch:
14681	case Tag_PAC_extension:
14682	case Tag_BTI_extension:
14683	case Tag_BTI_use:
14684	case Tag_PACRET_use:
14685	  /* Use the largest value specified.  */
14686	  if (in_attr[i].i > out_attr[i].i)
14687	    out_attr[i].i = in_attr[i].i;
14688	  break;
14689
14690	case Tag_ABI_align_preserved:
14691	case Tag_ABI_PCS_RO_data:
14692	  /* Use the smallest value specified.  */
14693	  if (in_attr[i].i < out_attr[i].i)
14694	    out_attr[i].i = in_attr[i].i;
14695	  break;
14696
14697	case Tag_ABI_align_needed:
14698	  if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14699	      && (in_attr[Tag_ABI_align_preserved].i == 0
14700		  || out_attr[Tag_ABI_align_preserved].i == 0))
14701	    {
14702	      /* This error message should be enabled once all non-conformant
14703		 binaries in the toolchain have had the attributes set
14704		 properly.
14705	      _bfd_error_handler
14706		(_("error: %pB: 8-byte data alignment conflicts with %pB"),
14707		 obfd, ibfd);
14708	      result = false; */
14709	    }
14710	  /* Fall through.  */
14711	case Tag_ABI_FP_denormal:
14712	case Tag_ABI_PCS_GOT_use:
14713	  /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14714	     value if greater than 2 (for future-proofing).  */
14715	  if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14716	      || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14717		  && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14718	    out_attr[i].i = in_attr[i].i;
14719	  break;
14720
14721	case Tag_Virtualization_use:
14722	  /* The virtualization tag effectively stores two bits of
14723	     information: the intended use of TrustZone (in bit 0), and the
14724	     intended use of Virtualization (in bit 1).  */
14725	  if (out_attr[i].i == 0)
14726	    out_attr[i].i = in_attr[i].i;
14727	  else if (in_attr[i].i != 0
14728		   && in_attr[i].i != out_attr[i].i)
14729	    {
14730	      if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14731		out_attr[i].i = 3;
14732	      else
14733		{
14734		  _bfd_error_handler
14735		    (_("error: %pB: unable to merge virtualization attributes "
14736		       "with %pB"),
14737		     obfd, ibfd);
14738		  result = false;
14739		}
14740	    }
14741	  break;
14742
14743	case Tag_CPU_arch_profile:
14744	  if (out_attr[i].i != in_attr[i].i)
14745	    {
14746	      /* 0 will merge with anything.
14747		 'A' and 'S' merge to 'A'.
14748		 'R' and 'S' merge to 'R'.
14749		 'M' and 'A|R|S' is an error.  */
14750	      if (out_attr[i].i == 0
14751		  || (out_attr[i].i == 'S'
14752		      && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14753		out_attr[i].i = in_attr[i].i;
14754	      else if (in_attr[i].i == 0
14755		       || (in_attr[i].i == 'S'
14756			   && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14757		; /* Do nothing.  */
14758	      else
14759		{
14760		  _bfd_error_handler
14761		    (_("error: %pB: conflicting architecture profiles %c/%c"),
14762		     ibfd,
14763		     in_attr[i].i ? in_attr[i].i : '0',
14764		     out_attr[i].i ? out_attr[i].i : '0');
14765		  result = false;
14766		}
14767	    }
14768	  break;
14769
14770	case Tag_DSP_extension:
14771	  /* No need to change output value if any of:
14772	     - pre (<=) ARMv5T input architecture (do not have DSP)
14773	     - M input profile not ARMv7E-M and do not have DSP.  */
14774	  if (in_attr[Tag_CPU_arch].i <= 3
14775	      || (in_attr[Tag_CPU_arch_profile].i == 'M'
14776		  && in_attr[Tag_CPU_arch].i != 13
14777		  && in_attr[i].i == 0))
14778	    ; /* Do nothing.  */
14779	  /* Output value should be 0 if DSP part of architecture, ie.
14780	     - post (>=) ARMv5te architecture output
14781	     - A, R or S profile output or ARMv7E-M output architecture.  */
14782	  else if (out_attr[Tag_CPU_arch].i >= 4
14783		   && (out_attr[Tag_CPU_arch_profile].i == 'A'
14784		       || out_attr[Tag_CPU_arch_profile].i == 'R'
14785		       || out_attr[Tag_CPU_arch_profile].i == 'S'
14786		       || out_attr[Tag_CPU_arch].i == 13))
14787	    out_attr[i].i = 0;
14788	  /* Otherwise, DSP instructions are added and not part of output
14789	     architecture.  */
14790	  else
14791	    out_attr[i].i = 1;
14792	  break;
14793
14794	case Tag_FP_arch:
14795	    {
14796	      /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14797		 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14798		 when it's 0.  It might mean absence of FP hardware if
14799		 Tag_FP_arch is zero.  */
14800
14801#define VFP_VERSION_COUNT 9
14802	      static const struct
14803	      {
14804		  int ver;
14805		  int regs;
14806	      } vfp_versions[VFP_VERSION_COUNT] =
14807		{
14808		  {0, 0},
14809		  {1, 16},
14810		  {2, 16},
14811		  {3, 32},
14812		  {3, 16},
14813		  {4, 32},
14814		  {4, 16},
14815		  {8, 32},
14816		  {8, 16}
14817		};
14818	      int ver;
14819	      int regs;
14820	      int newval;
14821
14822	      /* If the output has no requirement about FP hardware,
14823		 follow the requirement of the input.  */
14824	      if (out_attr[i].i == 0)
14825		{
14826		  /* This assert is still reasonable, we shouldn't
14827		     produce the suspicious build attribute
14828		     combination (See below for in_attr).  */
14829		  BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14830		  out_attr[i].i = in_attr[i].i;
14831		  out_attr[Tag_ABI_HardFP_use].i
14832		    = in_attr[Tag_ABI_HardFP_use].i;
14833		  break;
14834		}
14835	      /* If the input has no requirement about FP hardware, do
14836		 nothing.  */
14837	      else if (in_attr[i].i == 0)
14838		{
14839		  /* We used to assert that Tag_ABI_HardFP_use was
14840		     zero here, but we should never assert when
14841		     consuming an object file that has suspicious
14842		     build attributes.  The single precision variant
14843		     of 'no FP architecture' is still 'no FP
14844		     architecture', so we just ignore the tag in this
14845		     case.  */
14846		  break;
14847		}
14848
14849	      /* Both the input and the output have nonzero Tag_FP_arch.
14850		 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero.  */
14851
14852	      /* If both the input and the output have zero Tag_ABI_HardFP_use,
14853		 do nothing.  */
14854	      if (in_attr[Tag_ABI_HardFP_use].i == 0
14855		  && out_attr[Tag_ABI_HardFP_use].i == 0)
14856		;
14857	      /* If the input and the output have different Tag_ABI_HardFP_use,
14858		 the combination of them is 0 (implied by Tag_FP_arch).  */
14859	      else if (in_attr[Tag_ABI_HardFP_use].i
14860		       != out_attr[Tag_ABI_HardFP_use].i)
14861		out_attr[Tag_ABI_HardFP_use].i = 0;
14862
14863	      /* Now we can handle Tag_FP_arch.  */
14864
14865	      /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14866		 pick the biggest.  */
14867	      if (in_attr[i].i >= VFP_VERSION_COUNT
14868		  && in_attr[i].i > out_attr[i].i)
14869		{
14870		  out_attr[i] = in_attr[i];
14871		  break;
14872		}
14873	      /* The output uses the superset of input features
14874		 (ISA version) and registers.  */
14875	      ver = vfp_versions[in_attr[i].i].ver;
14876	      if (ver < vfp_versions[out_attr[i].i].ver)
14877		ver = vfp_versions[out_attr[i].i].ver;
14878	      regs = vfp_versions[in_attr[i].i].regs;
14879	      if (regs < vfp_versions[out_attr[i].i].regs)
14880		regs = vfp_versions[out_attr[i].i].regs;
14881	      /* This assumes all possible supersets are also a valid
14882		 options.  */
14883	      for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14884		{
14885		  if (regs == vfp_versions[newval].regs
14886		      && ver == vfp_versions[newval].ver)
14887		    break;
14888		}
14889	      out_attr[i].i = newval;
14890	    }
14891	  break;
14892	case Tag_PCS_config:
14893	  if (out_attr[i].i == 0)
14894	    out_attr[i].i = in_attr[i].i;
14895	  else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14896	    {
14897	      /* It's sometimes ok to mix different configs, so this is only
14898		 a warning.  */
14899	      _bfd_error_handler
14900		(_("warning: %pB: conflicting platform configuration"), ibfd);
14901	    }
14902	  break;
14903	case Tag_ABI_PCS_R9_use:
14904	  if (in_attr[i].i != out_attr[i].i
14905	      && out_attr[i].i != AEABI_R9_unused
14906	      && in_attr[i].i != AEABI_R9_unused)
14907	    {
14908	      _bfd_error_handler
14909		(_("error: %pB: conflicting use of R9"), ibfd);
14910	      result = false;
14911	    }
14912	  if (out_attr[i].i == AEABI_R9_unused)
14913	    out_attr[i].i = in_attr[i].i;
14914	  break;
14915	case Tag_ABI_PCS_RW_data:
14916	  if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14917	      && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14918	      && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14919	    {
14920	      _bfd_error_handler
14921		(_("error: %pB: SB relative addressing conflicts with use of R9"),
14922		 ibfd);
14923	      result = false;
14924	    }
14925	  /* Use the smallest value specified.  */
14926	  if (in_attr[i].i < out_attr[i].i)
14927	    out_attr[i].i = in_attr[i].i;
14928	  break;
14929	case Tag_ABI_PCS_wchar_t:
14930	  if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
14931	      && !elf_arm_tdata (obfd)->no_wchar_size_warning)
14932	    {
14933	      _bfd_error_handler
14934		(_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14935		 ibfd, in_attr[i].i, out_attr[i].i);
14936	    }
14937	  else if (in_attr[i].i && !out_attr[i].i)
14938	    out_attr[i].i = in_attr[i].i;
14939	  break;
14940	case Tag_ABI_enum_size:
14941	  if (in_attr[i].i != AEABI_enum_unused)
14942	    {
14943	      if (out_attr[i].i == AEABI_enum_unused
14944		  || out_attr[i].i == AEABI_enum_forced_wide)
14945		{
14946		  /* The existing object is compatible with anything.
14947		     Use whatever requirements the new object has.  */
14948		  out_attr[i].i = in_attr[i].i;
14949		}
14950	      else if (in_attr[i].i != AEABI_enum_forced_wide
14951		       && out_attr[i].i != in_attr[i].i
14952		       && !elf_arm_tdata (obfd)->no_enum_size_warning)
14953		{
14954		  static const char *aeabi_enum_names[] =
14955		    { "", "variable-size", "32-bit", "" };
14956		  const char *in_name =
14957		    in_attr[i].i < ARRAY_SIZE (aeabi_enum_names)
14958		    ? aeabi_enum_names[in_attr[i].i]
14959		    : "<unknown>";
14960		  const char *out_name =
14961		    out_attr[i].i < ARRAY_SIZE (aeabi_enum_names)
14962		    ? aeabi_enum_names[out_attr[i].i]
14963		    : "<unknown>";
14964		  _bfd_error_handler
14965		    (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14966		     ibfd, in_name, out_name);
14967		}
14968	    }
14969	  break;
14970	case Tag_ABI_VFP_args:
14971	  /* Aready done.  */
14972	  break;
14973	case Tag_ABI_WMMX_args:
14974	  if (in_attr[i].i != out_attr[i].i)
14975	    {
14976	      _bfd_error_handler
14977		(_("error: %pB uses iWMMXt register arguments, %pB does not"),
14978		 ibfd, obfd);
14979	      result = false;
14980	    }
14981	  break;
14982	case Tag_compatibility:
14983	  /* Merged in target-independent code.  */
14984	  break;
14985	case Tag_ABI_HardFP_use:
14986	  /* This is handled along with Tag_FP_arch.  */
14987	  break;
14988	case Tag_ABI_FP_16bit_format:
14989	  if (in_attr[i].i != 0 && out_attr[i].i != 0)
14990	    {
14991	      if (in_attr[i].i != out_attr[i].i)
14992		{
14993		  _bfd_error_handler
14994		    (_("error: fp16 format mismatch between %pB and %pB"),
14995		     ibfd, obfd);
14996		  result = false;
14997		}
14998	    }
14999	  if (in_attr[i].i != 0)
15000	    out_attr[i].i = in_attr[i].i;
15001	  break;
15002
15003	case Tag_DIV_use:
15004	  /* A value of zero on input means that the divide instruction may
15005	     be used if available in the base architecture as specified via
15006	     Tag_CPU_arch and Tag_CPU_arch_profile.  A value of 1 means that
15007	     the user did not want divide instructions.  A value of 2
15008	     explicitly means that divide instructions were allowed in ARM
15009	     and Thumb state.  */
15010	  if (in_attr[i].i == out_attr[i].i)
15011	    /* Do nothing.  */ ;
15012	  else if (elf32_arm_attributes_forbid_div (in_attr)
15013		   && !elf32_arm_attributes_accept_div (out_attr))
15014	    out_attr[i].i = 1;
15015	  else if (elf32_arm_attributes_forbid_div (out_attr)
15016		   && elf32_arm_attributes_accept_div (in_attr))
15017	    out_attr[i].i = in_attr[i].i;
15018	  else if (in_attr[i].i == 2)
15019	    out_attr[i].i = in_attr[i].i;
15020	  break;
15021
15022	case Tag_MPextension_use_legacy:
15023	  /* We don't output objects with Tag_MPextension_use_legacy - we
15024	     move the value to Tag_MPextension_use.  */
15025	  if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
15026	    {
15027	      if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
15028		{
15029		  _bfd_error_handler
15030		    (_("%pB has both the current and legacy "
15031		       "Tag_MPextension_use attributes"),
15032		     ibfd);
15033		  result = false;
15034		}
15035	    }
15036
15037	  if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
15038	    out_attr[Tag_MPextension_use] = in_attr[i];
15039
15040	  break;
15041
15042	case Tag_nodefaults:
15043	  /* This tag is set if it exists, but the value is unused (and is
15044	     typically zero).  We don't actually need to do anything here -
15045	     the merge happens automatically when the type flags are merged
15046	     below.  */
15047	  break;
15048	case Tag_also_compatible_with:
15049	  /* Already done in Tag_CPU_arch.  */
15050	  break;
15051	case Tag_conformance:
15052	  /* Keep the attribute if it matches.  Throw it away otherwise.
15053	     No attribute means no claim to conform.  */
15054	  if (!in_attr[i].s || !out_attr[i].s
15055	      || strcmp (in_attr[i].s, out_attr[i].s) != 0)
15056	    out_attr[i].s = NULL;
15057	  break;
15058
15059	default:
15060	  result
15061	    = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
15062	}
15063
15064      /* If out_attr was copied from in_attr then it won't have a type yet.  */
15065      if (in_attr[i].type && !out_attr[i].type)
15066	out_attr[i].type = in_attr[i].type;
15067    }
15068
15069  /* Merge Tag_compatibility attributes and any common GNU ones.  */
15070  if (!_bfd_elf_merge_object_attributes (ibfd, info))
15071    return false;
15072
15073  /* Check for any attributes not known on ARM.  */
15074  result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
15075
15076  return result;
15077}
15078
15079
15080/* Return TRUE if the two EABI versions are incompatible.  */
15081
15082static bool
15083elf32_arm_versions_compatible (unsigned iver, unsigned over)
15084{
15085  /* v4 and v5 are the same spec before and after it was released,
15086     so allow mixing them.  */
15087  if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
15088      || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
15089    return true;
15090
15091  return (iver == over);
15092}
15093
15094/* Merge backend specific data from an object file to the output
15095   object file when linking.  */
15096
15097static bool
15098elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
15099
15100/* Display the flags field.  */
15101
15102static bool
15103elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
15104{
15105  FILE * file = (FILE *) ptr;
15106  unsigned long flags;
15107
15108  BFD_ASSERT (abfd != NULL && ptr != NULL);
15109
15110  /* Print normal ELF private data.  */
15111  _bfd_elf_print_private_bfd_data (abfd, ptr);
15112
15113  flags = elf_elfheader (abfd)->e_flags;
15114  /* Ignore init flag - it may not be set, despite the flags field
15115     containing valid data.  */
15116
15117  fprintf (file, _("private flags = 0x%lx:"), elf_elfheader (abfd)->e_flags);
15118
15119  switch (EF_ARM_EABI_VERSION (flags))
15120    {
15121    case EF_ARM_EABI_UNKNOWN:
15122      /* The following flag bits are GNU extensions and not part of the
15123	 official ARM ELF extended ABI.  Hence they are only decoded if
15124	 the EABI version is not set.  */
15125      if (flags & EF_ARM_INTERWORK)
15126	fprintf (file, _(" [interworking enabled]"));
15127
15128      if (flags & EF_ARM_APCS_26)
15129	fprintf (file, " [APCS-26]");
15130      else
15131	fprintf (file, " [APCS-32]");
15132
15133      if (flags & EF_ARM_VFP_FLOAT)
15134	fprintf (file, _(" [VFP float format]"));
15135      else if (flags & EF_ARM_MAVERICK_FLOAT)
15136	fprintf (file, _(" [Maverick float format]"));
15137      else
15138	fprintf (file, _(" [FPA float format]"));
15139
15140      if (flags & EF_ARM_APCS_FLOAT)
15141	fprintf (file, _(" [floats passed in float registers]"));
15142
15143      if (flags & EF_ARM_PIC)
15144	fprintf (file, _(" [position independent]"));
15145
15146      if (flags & EF_ARM_NEW_ABI)
15147	fprintf (file, _(" [new ABI]"));
15148
15149      if (flags & EF_ARM_OLD_ABI)
15150	fprintf (file, _(" [old ABI]"));
15151
15152      if (flags & EF_ARM_SOFT_FLOAT)
15153	fprintf (file, _(" [software FP]"));
15154
15155      flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
15156		 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
15157		 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
15158		 | EF_ARM_MAVERICK_FLOAT);
15159      break;
15160
15161    case EF_ARM_EABI_VER1:
15162      fprintf (file, _(" [Version1 EABI]"));
15163
15164      if (flags & EF_ARM_SYMSARESORTED)
15165	fprintf (file, _(" [sorted symbol table]"));
15166      else
15167	fprintf (file, _(" [unsorted symbol table]"));
15168
15169      flags &= ~ EF_ARM_SYMSARESORTED;
15170      break;
15171
15172    case EF_ARM_EABI_VER2:
15173      fprintf (file, _(" [Version2 EABI]"));
15174
15175      if (flags & EF_ARM_SYMSARESORTED)
15176	fprintf (file, _(" [sorted symbol table]"));
15177      else
15178	fprintf (file, _(" [unsorted symbol table]"));
15179
15180      if (flags & EF_ARM_DYNSYMSUSESEGIDX)
15181	fprintf (file, _(" [dynamic symbols use segment index]"));
15182
15183      if (flags & EF_ARM_MAPSYMSFIRST)
15184	fprintf (file, _(" [mapping symbols precede others]"));
15185
15186      flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
15187		 | EF_ARM_MAPSYMSFIRST);
15188      break;
15189
15190    case EF_ARM_EABI_VER3:
15191      fprintf (file, _(" [Version3 EABI]"));
15192      break;
15193
15194    case EF_ARM_EABI_VER4:
15195      fprintf (file, _(" [Version4 EABI]"));
15196      goto eabi;
15197
15198    case EF_ARM_EABI_VER5:
15199      fprintf (file, _(" [Version5 EABI]"));
15200
15201      if (flags & EF_ARM_ABI_FLOAT_SOFT)
15202	fprintf (file, _(" [soft-float ABI]"));
15203
15204      if (flags & EF_ARM_ABI_FLOAT_HARD)
15205	fprintf (file, _(" [hard-float ABI]"));
15206
15207      flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
15208
15209    eabi:
15210      if (flags & EF_ARM_BE8)
15211	fprintf (file, _(" [BE8]"));
15212
15213      if (flags & EF_ARM_LE8)
15214	fprintf (file, _(" [LE8]"));
15215
15216      flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
15217      break;
15218
15219    default:
15220      fprintf (file, _(" <EABI version unrecognised>"));
15221      break;
15222    }
15223
15224  flags &= ~ EF_ARM_EABIMASK;
15225
15226  if (flags & EF_ARM_RELEXEC)
15227    fprintf (file, _(" [relocatable executable]"));
15228
15229  if (flags & EF_ARM_PIC)
15230    fprintf (file, _(" [position independent]"));
15231
15232  if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
15233    fprintf (file, _(" [FDPIC ABI supplement]"));
15234
15235  flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
15236
15237  if (flags)
15238    fprintf (file, _(" <Unrecognised flag bits set>"));
15239
15240  fputc ('\n', file);
15241
15242  return true;
15243}
15244
15245static int
15246elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
15247{
15248  switch (ELF_ST_TYPE (elf_sym->st_info))
15249    {
15250    case STT_ARM_TFUNC:
15251      return ELF_ST_TYPE (elf_sym->st_info);
15252
15253    case STT_ARM_16BIT:
15254      /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15255	 This allows us to distinguish between data used by Thumb instructions
15256	 and non-data (which is probably code) inside Thumb regions of an
15257	 executable.  */
15258      if (type != STT_OBJECT && type != STT_TLS)
15259	return ELF_ST_TYPE (elf_sym->st_info);
15260      break;
15261
15262    default:
15263      break;
15264    }
15265
15266  return type;
15267}
15268
15269static asection *
15270elf32_arm_gc_mark_hook (asection *sec,
15271			struct bfd_link_info *info,
15272			Elf_Internal_Rela *rel,
15273			struct elf_link_hash_entry *h,
15274			Elf_Internal_Sym *sym)
15275{
15276  if (h != NULL)
15277    switch (ELF32_R_TYPE (rel->r_info))
15278      {
15279      case R_ARM_GNU_VTINHERIT:
15280      case R_ARM_GNU_VTENTRY:
15281	return NULL;
15282      }
15283
15284  return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
15285}
15286
15287/* Look through the relocs for a section during the first phase.  */
15288
15289static bool
15290elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
15291			asection *sec, const Elf_Internal_Rela *relocs)
15292{
15293  Elf_Internal_Shdr *symtab_hdr;
15294  struct elf_link_hash_entry **sym_hashes;
15295  const Elf_Internal_Rela *rel;
15296  const Elf_Internal_Rela *rel_end;
15297  bfd *dynobj;
15298  asection *sreloc;
15299  struct elf32_arm_link_hash_table *htab;
15300  bool call_reloc_p;
15301  bool may_become_dynamic_p;
15302  bool may_need_local_target_p;
15303  unsigned long nsyms;
15304
15305  if (bfd_link_relocatable (info))
15306    return true;
15307
15308  BFD_ASSERT (is_arm_elf (abfd));
15309
15310  htab = elf32_arm_hash_table (info);
15311  if (htab == NULL)
15312    return false;
15313
15314  sreloc = NULL;
15315
15316  /* Create dynamic sections for relocatable executables so that we can
15317     copy relocations.  */
15318  if (htab->root.is_relocatable_executable
15319      && ! htab->root.dynamic_sections_created)
15320    {
15321      if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
15322	return false;
15323    }
15324
15325  if (htab->root.dynobj == NULL)
15326    htab->root.dynobj = abfd;
15327  if (!create_ifunc_sections (info))
15328    return false;
15329
15330  dynobj = htab->root.dynobj;
15331
15332  symtab_hdr = & elf_symtab_hdr (abfd);
15333  sym_hashes = elf_sym_hashes (abfd);
15334  nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
15335
15336  rel_end = relocs + sec->reloc_count;
15337  for (rel = relocs; rel < rel_end; rel++)
15338    {
15339      Elf_Internal_Sym *isym;
15340      struct elf_link_hash_entry *h;
15341      struct elf32_arm_link_hash_entry *eh;
15342      unsigned int r_symndx;
15343      int r_type;
15344
15345      r_symndx = ELF32_R_SYM (rel->r_info);
15346      r_type = ELF32_R_TYPE (rel->r_info);
15347      r_type = arm_real_reloc_type (htab, r_type);
15348
15349      if (r_symndx >= nsyms
15350	  /* PR 9934: It is possible to have relocations that do not
15351	     refer to symbols, thus it is also possible to have an
15352	     object file containing relocations but no symbol table.  */
15353	  && (r_symndx > STN_UNDEF || nsyms > 0))
15354	{
15355	  _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15356			      r_symndx);
15357	  return false;
15358	}
15359
15360      h = NULL;
15361      isym = NULL;
15362      if (nsyms > 0)
15363	{
15364	  if (r_symndx < symtab_hdr->sh_info)
15365	    {
15366	      /* A local symbol.  */
15367	      isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
15368					    abfd, r_symndx);
15369	      if (isym == NULL)
15370		return false;
15371	    }
15372	  else
15373	    {
15374	      h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15375	      while (h->root.type == bfd_link_hash_indirect
15376		     || h->root.type == bfd_link_hash_warning)
15377		h = (struct elf_link_hash_entry *) h->root.u.i.link;
15378	    }
15379	}
15380
15381      eh = (struct elf32_arm_link_hash_entry *) h;
15382
15383      call_reloc_p = false;
15384      may_become_dynamic_p = false;
15385      may_need_local_target_p = false;
15386
15387      /* Could be done earlier, if h were already available.  */
15388      r_type = elf32_arm_tls_transition (info, r_type, h);
15389      switch (r_type)
15390	{
15391	case R_ARM_GOTOFFFUNCDESC:
15392	  {
15393	    if (h == NULL)
15394	      {
15395		if (!elf32_arm_allocate_local_sym_info (abfd))
15396		  return false;
15397		if (r_symndx >= elf32_arm_num_entries (abfd))
15398		  return false;
15399		elf32_arm_local_fdpic_cnts (abfd) [r_symndx].gotofffuncdesc_cnt += 1;
15400		elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_offset = -1;
15401	      }
15402	    else
15403	      {
15404		eh->fdpic_cnts.gotofffuncdesc_cnt++;
15405	      }
15406	  }
15407	  break;
15408
15409	case R_ARM_GOTFUNCDESC:
15410	  {
15411	    if (h == NULL)
15412	      {
15413		/* Such a relocation is not supposed to be generated
15414		   by gcc on a static function.  */
15415		/* Anyway if needed it could be handled.  */
15416		return false;
15417	      }
15418	    else
15419	      {
15420		eh->fdpic_cnts.gotfuncdesc_cnt++;
15421	      }
15422	  }
15423	  break;
15424
15425	case R_ARM_FUNCDESC:
15426	  {
15427	    if (h == NULL)
15428	      {
15429		if (!elf32_arm_allocate_local_sym_info (abfd))
15430		  return false;
15431		if (r_symndx >= elf32_arm_num_entries (abfd))
15432		  return false;
15433		elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_cnt += 1;
15434		elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_offset = -1;
15435	      }
15436	    else
15437	      {
15438		eh->fdpic_cnts.funcdesc_cnt++;
15439	      }
15440	  }
15441	  break;
15442
15443	  case R_ARM_GOT32:
15444	  case R_ARM_GOT_PREL:
15445	  case R_ARM_TLS_GD32:
15446	  case R_ARM_TLS_GD32_FDPIC:
15447	  case R_ARM_TLS_IE32:
15448	  case R_ARM_TLS_IE32_FDPIC:
15449	  case R_ARM_TLS_GOTDESC:
15450	  case R_ARM_TLS_DESCSEQ:
15451	  case R_ARM_THM_TLS_DESCSEQ:
15452	  case R_ARM_TLS_CALL:
15453	  case R_ARM_THM_TLS_CALL:
15454	    /* This symbol requires a global offset table entry.  */
15455	    {
15456	      int tls_type, old_tls_type;
15457
15458	      switch (r_type)
15459		{
15460		case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15461		case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
15462
15463		case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15464		case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
15465
15466		case R_ARM_TLS_GOTDESC:
15467		case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15468		case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15469		  tls_type = GOT_TLS_GDESC; break;
15470
15471		default: tls_type = GOT_NORMAL; break;
15472		}
15473
15474	      if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15475		info->flags |= DF_STATIC_TLS;
15476
15477	      if (h != NULL)
15478		{
15479		  h->got.refcount++;
15480		  old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15481		}
15482	      else
15483		{
15484		  /* This is a global offset table entry for a local symbol.  */
15485		  if (!elf32_arm_allocate_local_sym_info (abfd))
15486		    return false;
15487		  if (r_symndx >= elf32_arm_num_entries (abfd))
15488		    {
15489		      _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15490					  r_symndx);
15491		      return false;
15492		    }
15493
15494		  elf_local_got_refcounts (abfd)[r_symndx] += 1;
15495		  old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15496		}
15497
15498	      /* If a variable is accessed with both tls methods, two
15499		 slots may be created.  */
15500	      if (GOT_TLS_GD_ANY_P (old_tls_type)
15501		  && GOT_TLS_GD_ANY_P (tls_type))
15502		tls_type |= old_tls_type;
15503
15504	      /* We will already have issued an error message if there
15505		 is a TLS/non-TLS mismatch, based on the symbol
15506		 type.  So just combine any TLS types needed.  */
15507	      if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15508		  && tls_type != GOT_NORMAL)
15509		tls_type |= old_tls_type;
15510
15511	      /* If the symbol is accessed in both IE and GDESC
15512		 method, we're able to relax. Turn off the GDESC flag,
15513		 without messing up with any other kind of tls types
15514		 that may be involved.  */
15515	      if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15516		tls_type &= ~GOT_TLS_GDESC;
15517
15518	      if (old_tls_type != tls_type)
15519		{
15520		  if (h != NULL)
15521		    elf32_arm_hash_entry (h)->tls_type = tls_type;
15522		  else
15523		    elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15524		}
15525	    }
15526	    /* Fall through.  */
15527
15528	  case R_ARM_TLS_LDM32:
15529	  case R_ARM_TLS_LDM32_FDPIC:
15530	    if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
15531		htab->tls_ldm_got.refcount++;
15532	    /* Fall through.  */
15533
15534	  case R_ARM_GOTOFF32:
15535	  case R_ARM_GOTPC:
15536	    if (htab->root.sgot == NULL
15537		&& !create_got_section (htab->root.dynobj, info))
15538	      return false;
15539	    break;
15540
15541	  case R_ARM_PC24:
15542	  case R_ARM_PLT32:
15543	  case R_ARM_CALL:
15544	  case R_ARM_JUMP24:
15545	  case R_ARM_PREL31:
15546	  case R_ARM_THM_CALL:
15547	  case R_ARM_THM_JUMP24:
15548	  case R_ARM_THM_JUMP19:
15549	    call_reloc_p = true;
15550	    may_need_local_target_p = true;
15551	    break;
15552
15553	  case R_ARM_ABS12:
15554	    /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15555	       ldr __GOTT_INDEX__ offsets.  */
15556	    if (htab->root.target_os != is_vxworks)
15557	      {
15558		may_need_local_target_p = true;
15559		break;
15560	      }
15561	    else goto jump_over;
15562
15563	    /* Fall through.  */
15564
15565	  case R_ARM_MOVW_ABS_NC:
15566	  case R_ARM_MOVT_ABS:
15567	  case R_ARM_THM_MOVW_ABS_NC:
15568	  case R_ARM_THM_MOVT_ABS:
15569	    if (bfd_link_pic (info))
15570	      {
15571		_bfd_error_handler
15572		  (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15573		   abfd, elf32_arm_howto_table_1[r_type].name,
15574		   (h) ? h->root.root.string : "a local symbol");
15575		bfd_set_error (bfd_error_bad_value);
15576		return false;
15577	      }
15578
15579	    /* Fall through.  */
15580	  case R_ARM_ABS32:
15581	  case R_ARM_ABS32_NOI:
15582	jump_over:
15583	    if (h != NULL && bfd_link_executable (info))
15584	      {
15585		h->pointer_equality_needed = 1;
15586	      }
15587	    /* Fall through.  */
15588	  case R_ARM_REL32:
15589	  case R_ARM_REL32_NOI:
15590	  case R_ARM_MOVW_PREL_NC:
15591	  case R_ARM_MOVT_PREL:
15592	  case R_ARM_THM_MOVW_PREL_NC:
15593	  case R_ARM_THM_MOVT_PREL:
15594
15595	    /* Should the interworking branches be listed here?  */
15596	    if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
15597		 || htab->fdpic_p)
15598		&& (sec->flags & SEC_ALLOC) != 0)
15599	      {
15600		if (h == NULL
15601		    && elf32_arm_howto_from_type (r_type)->pc_relative)
15602		  {
15603		    /* In shared libraries and relocatable executables,
15604		       we treat local relative references as calls;
15605		       see the related SYMBOL_CALLS_LOCAL code in
15606		       allocate_dynrelocs.  */
15607		    call_reloc_p = true;
15608		    may_need_local_target_p = true;
15609		  }
15610		else
15611		  /* We are creating a shared library or relocatable
15612		     executable, and this is a reloc against a global symbol,
15613		     or a non-PC-relative reloc against a local symbol.
15614		     We may need to copy the reloc into the output.  */
15615		  may_become_dynamic_p = true;
15616	      }
15617	    else
15618	      may_need_local_target_p = true;
15619	    break;
15620
15621	/* This relocation describes the C++ object vtable hierarchy.
15622	   Reconstruct it for later use during GC.  */
15623	case R_ARM_GNU_VTINHERIT:
15624	  if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15625	    return false;
15626	  break;
15627
15628	/* This relocation describes which C++ vtable entries are actually
15629	   used.  Record for later use during GC.  */
15630	case R_ARM_GNU_VTENTRY:
15631	  if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15632	    return false;
15633	  break;
15634	}
15635
15636      if (h != NULL)
15637	{
15638	  if (call_reloc_p)
15639	    /* We may need a .plt entry if the function this reloc
15640	       refers to is in a different object, regardless of the
15641	       symbol's type.  We can't tell for sure yet, because
15642	       something later might force the symbol local.  */
15643	    h->needs_plt = 1;
15644	  else if (may_need_local_target_p)
15645	    /* If this reloc is in a read-only section, we might
15646	       need a copy reloc.  We can't check reliably at this
15647	       stage whether the section is read-only, as input
15648	       sections have not yet been mapped to output sections.
15649	       Tentatively set the flag for now, and correct in
15650	       adjust_dynamic_symbol.  */
15651	    h->non_got_ref = 1;
15652	}
15653
15654      if (may_need_local_target_p
15655	  && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15656	{
15657	  union gotplt_union *root_plt;
15658	  struct arm_plt_info *arm_plt;
15659	  struct arm_local_iplt_info *local_iplt;
15660
15661	  if (h != NULL)
15662	    {
15663	      root_plt = &h->plt;
15664	      arm_plt = &eh->plt;
15665	    }
15666	  else
15667	    {
15668	      local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15669	      if (local_iplt == NULL)
15670		return false;
15671	      root_plt = &local_iplt->root;
15672	      arm_plt = &local_iplt->arm;
15673	    }
15674
15675	  /* If the symbol is a function that doesn't bind locally,
15676	     this relocation will need a PLT entry.  */
15677	  if (root_plt->refcount != -1)
15678	    root_plt->refcount += 1;
15679
15680	  if (!call_reloc_p)
15681	    arm_plt->noncall_refcount++;
15682
15683	  /* It's too early to use htab->use_blx here, so we have to
15684	     record possible blx references separately from
15685	     relocs that definitely need a thumb stub.  */
15686
15687	  if (r_type == R_ARM_THM_CALL)
15688	    arm_plt->maybe_thumb_refcount += 1;
15689
15690	  if (r_type == R_ARM_THM_JUMP24
15691	      || r_type == R_ARM_THM_JUMP19)
15692	    arm_plt->thumb_refcount += 1;
15693	}
15694
15695      if (may_become_dynamic_p)
15696	{
15697	  struct elf_dyn_relocs *p, **head;
15698
15699	  /* Create a reloc section in dynobj.  */
15700	  if (sreloc == NULL)
15701	    {
15702	      sreloc = _bfd_elf_make_dynamic_reloc_section
15703		(sec, dynobj, 2, abfd, ! htab->use_rel);
15704
15705	      if (sreloc == NULL)
15706		return false;
15707	    }
15708
15709	  /* If this is a global symbol, count the number of
15710	     relocations we need for this symbol.  */
15711	  if (h != NULL)
15712	    head = &h->dyn_relocs;
15713	  else
15714	    {
15715	      head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15716	      if (head == NULL)
15717		return false;
15718	    }
15719
15720	  p = *head;
15721	  if (p == NULL || p->sec != sec)
15722	    {
15723	      size_t amt = sizeof *p;
15724
15725	      p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15726	      if (p == NULL)
15727		return false;
15728	      p->next = *head;
15729	      *head = p;
15730	      p->sec = sec;
15731	      p->count = 0;
15732	      p->pc_count = 0;
15733	    }
15734
15735	  if (elf32_arm_howto_from_type (r_type)->pc_relative)
15736	    p->pc_count += 1;
15737	  p->count += 1;
15738	  if (h == NULL && htab->fdpic_p && !bfd_link_pic (info)
15739	      && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI)
15740	    {
15741	      /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15742		 that will become rofixup.  */
15743	      /* This is due to the fact that we suppose all will become rofixup.  */
15744	      _bfd_error_handler
15745		(_("FDPIC does not yet support %s relocation"
15746		   " to become dynamic for executable"),
15747		 elf32_arm_howto_table_1[r_type].name);
15748	      abort ();
15749	    }
15750	}
15751    }
15752
15753  return true;
15754}
15755
15756static void
15757elf32_arm_update_relocs (asection *o,
15758			 struct bfd_elf_section_reloc_data *reldata)
15759{
15760  void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15761  void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15762  const struct elf_backend_data *bed;
15763  _arm_elf_section_data *eado;
15764  struct bfd_link_order *p;
15765  bfd_byte *erela_head, *erela;
15766  Elf_Internal_Rela *irela_head, *irela;
15767  Elf_Internal_Shdr *rel_hdr;
15768  bfd *abfd;
15769  unsigned int count;
15770
15771  eado = get_arm_elf_section_data (o);
15772
15773  if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15774    return;
15775
15776  abfd = o->owner;
15777  bed = get_elf_backend_data (abfd);
15778  rel_hdr = reldata->hdr;
15779
15780  if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15781    {
15782      swap_in = bed->s->swap_reloc_in;
15783      swap_out = bed->s->swap_reloc_out;
15784    }
15785  else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15786    {
15787      swap_in = bed->s->swap_reloca_in;
15788      swap_out = bed->s->swap_reloca_out;
15789    }
15790  else
15791    abort ();
15792
15793  erela_head = rel_hdr->contents;
15794  irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15795    ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15796
15797  erela = erela_head;
15798  irela = irela_head;
15799  count = 0;
15800
15801  for (p = o->map_head.link_order; p; p = p->next)
15802    {
15803      if (p->type == bfd_section_reloc_link_order
15804	  || p->type == bfd_symbol_reloc_link_order)
15805	{
15806	  (*swap_in) (abfd, erela, irela);
15807	  erela += rel_hdr->sh_entsize;
15808	  irela++;
15809	  count++;
15810	}
15811      else if (p->type == bfd_indirect_link_order)
15812	{
15813	  struct bfd_elf_section_reloc_data *input_reldata;
15814	  arm_unwind_table_edit *edit_list, *edit_tail;
15815	  _arm_elf_section_data *eadi;
15816	  bfd_size_type j;
15817	  bfd_vma offset;
15818	  asection *i;
15819
15820	  i = p->u.indirect.section;
15821
15822	  eadi = get_arm_elf_section_data (i);
15823	  edit_list = eadi->u.exidx.unwind_edit_list;
15824	  edit_tail = eadi->u.exidx.unwind_edit_tail;
15825	  offset = i->output_offset;
15826
15827	  if (eadi->elf.rel.hdr &&
15828	      eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15829	    input_reldata = &eadi->elf.rel;
15830	  else if (eadi->elf.rela.hdr &&
15831		   eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15832	    input_reldata = &eadi->elf.rela;
15833	  else
15834	    abort ();
15835
15836	  if (edit_list)
15837	    {
15838	      for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15839		{
15840		  arm_unwind_table_edit *edit_node, *edit_next;
15841		  bfd_vma bias;
15842		  bfd_vma reloc_index;
15843
15844		  (*swap_in) (abfd, erela, irela);
15845		  reloc_index = (irela->r_offset - offset) / 8;
15846
15847		  bias = 0;
15848		  edit_node = edit_list;
15849		  for (edit_next = edit_list;
15850		       edit_next && edit_next->index <= reloc_index;
15851		       edit_next = edit_node->next)
15852		    {
15853		      bias++;
15854		      edit_node = edit_next;
15855		    }
15856
15857		  if (edit_node->type != DELETE_EXIDX_ENTRY
15858		      || edit_node->index != reloc_index)
15859		    {
15860		      irela->r_offset -= bias * 8;
15861		      irela++;
15862		      count++;
15863		    }
15864
15865		  erela += rel_hdr->sh_entsize;
15866		}
15867
15868	      if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15869		{
15870		  /* New relocation entity.  */
15871		  asection *text_sec = edit_tail->linked_section;
15872		  asection *text_out = text_sec->output_section;
15873		  bfd_vma exidx_offset = offset + i->size - 8;
15874
15875		  irela->r_addend = 0;
15876		  irela->r_offset = exidx_offset;
15877		  irela->r_info = ELF32_R_INFO
15878		    (text_out->target_index, R_ARM_PREL31);
15879		  irela++;
15880		  count++;
15881		}
15882	    }
15883	  else
15884	    {
15885	      for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15886		{
15887		  (*swap_in) (abfd, erela, irela);
15888		  erela += rel_hdr->sh_entsize;
15889		  irela++;
15890		}
15891
15892	      count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15893	    }
15894	}
15895    }
15896
15897  reldata->count = count;
15898  rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15899
15900  erela = erela_head;
15901  irela = irela_head;
15902  while (count > 0)
15903    {
15904      (*swap_out) (abfd, irela, erela);
15905      erela += rel_hdr->sh_entsize;
15906      irela++;
15907      count--;
15908    }
15909
15910  free (irela_head);
15911
15912  /* Hashes are no longer valid.  */
15913  free (reldata->hashes);
15914  reldata->hashes = NULL;
15915}
15916
15917/* Unwinding tables are not referenced directly.  This pass marks them as
15918   required if the corresponding code section is marked.  Similarly, ARMv8-M
15919   secure entry functions can only be referenced by SG veneers which are
15920   created after the GC process. They need to be marked in case they reside in
15921   their own section (as would be the case if code was compiled with
15922   -ffunction-sections).  */
15923
15924static bool
15925elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15926				  elf_gc_mark_hook_fn gc_mark_hook)
15927{
15928  bfd *sub;
15929  Elf_Internal_Shdr **elf_shdrp;
15930  asection *cmse_sec;
15931  obj_attribute *out_attr;
15932  Elf_Internal_Shdr *symtab_hdr;
15933  unsigned i, sym_count, ext_start;
15934  const struct elf_backend_data *bed;
15935  struct elf_link_hash_entry **sym_hashes;
15936  struct elf32_arm_link_hash_entry *cmse_hash;
15937  bool again, is_v8m, first_bfd_browse = true;
15938  bool debug_sec_need_to_be_marked = false;
15939  asection *isec;
15940
15941  _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
15942
15943  out_attr = elf_known_obj_attributes_proc (info->output_bfd);
15944  is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
15945	   && out_attr[Tag_CPU_arch_profile].i == 'M';
15946
15947  /* Marking EH data may cause additional code sections to be marked,
15948     requiring multiple passes.  */
15949  again = true;
15950  while (again)
15951    {
15952      again = false;
15953      for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
15954	{
15955	  asection *o;
15956
15957	  if (! is_arm_elf (sub))
15958	    continue;
15959
15960	  elf_shdrp = elf_elfsections (sub);
15961	  for (o = sub->sections; o != NULL; o = o->next)
15962	    {
15963	      Elf_Internal_Shdr *hdr;
15964
15965	      hdr = &elf_section_data (o)->this_hdr;
15966	      if (hdr->sh_type == SHT_ARM_EXIDX
15967		  && hdr->sh_link
15968		  && hdr->sh_link < elf_numsections (sub)
15969		  && !o->gc_mark
15970		  && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
15971		{
15972		  again = true;
15973		  if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
15974		    return false;
15975		}
15976	    }
15977
15978	  /* Mark section holding ARMv8-M secure entry functions.  We mark all
15979	     of them so no need for a second browsing.  */
15980	  if (is_v8m && first_bfd_browse)
15981	    {
15982	      sym_hashes = elf_sym_hashes (sub);
15983	      bed = get_elf_backend_data (sub);
15984	      symtab_hdr = &elf_tdata (sub)->symtab_hdr;
15985	      sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
15986	      ext_start = symtab_hdr->sh_info;
15987
15988	      /* Scan symbols.  */
15989	      for (i = ext_start; i < sym_count; i++)
15990		{
15991		  cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
15992
15993		  /* Assume it is a special symbol.  If not, cmse_scan will
15994		     warn about it and user can do something about it.  */
15995		  if (startswith (cmse_hash->root.root.root.string,
15996				    CMSE_PREFIX))
15997		    {
15998		      cmse_sec = cmse_hash->root.root.u.def.section;
15999		      if (!cmse_sec->gc_mark
16000			  && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
16001			return false;
16002		      /* The debug sections related to these secure entry
16003			 functions are marked on enabling below flag.  */
16004		      debug_sec_need_to_be_marked = true;
16005		    }
16006		}
16007
16008	      if (debug_sec_need_to_be_marked)
16009		{
16010		  /* Looping over all the sections of the object file containing
16011		     Armv8-M secure entry functions and marking all the debug
16012		     sections.  */
16013		  for (isec = sub->sections; isec != NULL; isec = isec->next)
16014		    {
16015		      /* If not a debug sections, skip it.  */
16016		      if (!isec->gc_mark && (isec->flags & SEC_DEBUGGING))
16017			isec->gc_mark = 1 ;
16018		    }
16019		  debug_sec_need_to_be_marked = false;
16020		}
16021	    }
16022	}
16023      first_bfd_browse = false;
16024    }
16025
16026  return true;
16027}
16028
16029/* Treat mapping symbols as special target symbols.  */
16030
16031static bool
16032elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
16033{
16034  return bfd_is_arm_special_symbol_name (sym->name,
16035					 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
16036}
16037
16038/* If the ELF symbol SYM might be a function in SEC, return the
16039   function size and set *CODE_OFF to the function's entry point,
16040   otherwise return zero.  */
16041
16042static bfd_size_type
16043elf32_arm_maybe_function_sym (const asymbol *sym, asection *sec,
16044			      bfd_vma *code_off)
16045{
16046  bfd_size_type size;
16047  elf_symbol_type * elf_sym = (elf_symbol_type *) sym;
16048
16049  if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT
16050		     | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0
16051      || sym->section != sec)
16052    return 0;
16053
16054  size = (sym->flags & BSF_SYNTHETIC) ? 0 : elf_sym->internal_elf_sym.st_size;
16055
16056  if (!(sym->flags & BSF_SYNTHETIC))
16057    switch (ELF_ST_TYPE (elf_sym->internal_elf_sym.st_info))
16058      {
16059	case STT_NOTYPE:
16060	  /* Ignore symbols created by the annobin plugin for gcc and clang.
16061	     These symbols are hidden, local, notype and have a size of 0.  */
16062	  if (size == 0
16063	      && sym->flags & BSF_LOCAL
16064	      && ELF_ST_VISIBILITY (elf_sym->internal_elf_sym.st_other) == STV_HIDDEN)
16065	    return 0;
16066	  /* Fall through.  */
16067	case STT_FUNC:
16068	case STT_ARM_TFUNC:
16069	  /* FIXME: Allow STT_GNU_IFUNC as well ?  */
16070	  break;
16071	default:
16072	  return 0;
16073      }
16074
16075  if ((sym->flags & BSF_LOCAL)
16076      && bfd_is_arm_special_symbol_name (sym->name,
16077					 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
16078    return 0;
16079
16080  *code_off = sym->value;
16081
16082  /* Do not return 0 for the function's size.  */
16083  return size ? size : 1;
16084
16085}
16086
16087static bool
16088elf32_arm_find_inliner_info (bfd *	    abfd,
16089			     const char **  filename_ptr,
16090			     const char **  functionname_ptr,
16091			     unsigned int * line_ptr)
16092{
16093  bool found;
16094  found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
16095					 functionname_ptr, line_ptr,
16096					 & elf_tdata (abfd)->dwarf2_find_line_info);
16097  return found;
16098}
16099
16100/* Adjust a symbol defined by a dynamic object and referenced by a
16101   regular object.  The current definition is in some section of the
16102   dynamic object, but we're not including those sections.  We have to
16103   change the definition to something the rest of the link can
16104   understand.  */
16105
16106static bool
16107elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
16108				 struct elf_link_hash_entry * h)
16109{
16110  bfd * dynobj;
16111  asection *s, *srel;
16112  struct elf32_arm_link_hash_entry * eh;
16113  struct elf32_arm_link_hash_table *globals;
16114
16115  globals = elf32_arm_hash_table (info);
16116  if (globals == NULL)
16117    return false;
16118
16119  dynobj = elf_hash_table (info)->dynobj;
16120
16121  /* Make sure we know what is going on here.  */
16122  BFD_ASSERT (dynobj != NULL
16123	      && (h->needs_plt
16124		  || h->type == STT_GNU_IFUNC
16125		  || h->is_weakalias
16126		  || (h->def_dynamic
16127		      && h->ref_regular
16128		      && !h->def_regular)));
16129
16130  eh = (struct elf32_arm_link_hash_entry *) h;
16131
16132  /* If this is a function, put it in the procedure linkage table.  We
16133     will fill in the contents of the procedure linkage table later,
16134     when we know the address of the .got section.  */
16135  if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
16136    {
16137      /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
16138	 symbol binds locally.  */
16139      if (h->plt.refcount <= 0
16140	  || (h->type != STT_GNU_IFUNC
16141	      && (SYMBOL_CALLS_LOCAL (info, h)
16142		  || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16143		      && h->root.type == bfd_link_hash_undefweak))))
16144	{
16145	  /* This case can occur if we saw a PLT32 reloc in an input
16146	     file, but the symbol was never referred to by a dynamic
16147	     object, or if all references were garbage collected.  In
16148	     such a case, we don't actually need to build a procedure
16149	     linkage table, and we can just do a PC24 reloc instead.  */
16150	  h->plt.offset = (bfd_vma) -1;
16151	  eh->plt.thumb_refcount = 0;
16152	  eh->plt.maybe_thumb_refcount = 0;
16153	  eh->plt.noncall_refcount = 0;
16154	  h->needs_plt = 0;
16155	}
16156
16157      return true;
16158    }
16159  else
16160    {
16161      /* It's possible that we incorrectly decided a .plt reloc was
16162	 needed for an R_ARM_PC24 or similar reloc to a non-function sym
16163	 in check_relocs.  We can't decide accurately between function
16164	 and non-function syms in check-relocs; Objects loaded later in
16165	 the link may change h->type.  So fix it now.  */
16166      h->plt.offset = (bfd_vma) -1;
16167      eh->plt.thumb_refcount = 0;
16168      eh->plt.maybe_thumb_refcount = 0;
16169      eh->plt.noncall_refcount = 0;
16170    }
16171
16172  /* If this is a weak symbol, and there is a real definition, the
16173     processor independent code will have arranged for us to see the
16174     real definition first, and we can just use the same value.  */
16175  if (h->is_weakalias)
16176    {
16177      struct elf_link_hash_entry *def = weakdef (h);
16178      BFD_ASSERT (def->root.type == bfd_link_hash_defined);
16179      h->root.u.def.section = def->root.u.def.section;
16180      h->root.u.def.value = def->root.u.def.value;
16181      return true;
16182    }
16183
16184  /* If there are no non-GOT references, we do not need a copy
16185     relocation.  */
16186  if (!h->non_got_ref)
16187    return true;
16188
16189  /* This is a reference to a symbol defined by a dynamic object which
16190     is not a function.  */
16191
16192  /* If we are creating a shared library, we must presume that the
16193     only references to the symbol are via the global offset table.
16194     For such cases we need not do anything here; the relocations will
16195     be handled correctly by relocate_section.  Relocatable executables
16196     can reference data in shared objects directly, so we don't need to
16197     do anything here.  */
16198  if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
16199    return true;
16200
16201  /* We must allocate the symbol in our .dynbss section, which will
16202     become part of the .bss section of the executable.  There will be
16203     an entry for this symbol in the .dynsym section.  The dynamic
16204     object will contain position independent code, so all references
16205     from the dynamic object to this symbol will go through the global
16206     offset table.  The dynamic linker will use the .dynsym entry to
16207     determine the address it must put in the global offset table, so
16208     both the dynamic object and the regular object will refer to the
16209     same memory location for the variable.  */
16210  /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16211     linker to copy the initial value out of the dynamic object and into
16212     the runtime process image.  We need to remember the offset into the
16213     .rel(a).bss section we are going to use.  */
16214  if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
16215    {
16216      s = globals->root.sdynrelro;
16217      srel = globals->root.sreldynrelro;
16218    }
16219  else
16220    {
16221      s = globals->root.sdynbss;
16222      srel = globals->root.srelbss;
16223    }
16224  if (info->nocopyreloc == 0
16225      && (h->root.u.def.section->flags & SEC_ALLOC) != 0
16226      && h->size != 0)
16227    {
16228      elf32_arm_allocate_dynrelocs (info, srel, 1);
16229      h->needs_copy = 1;
16230    }
16231
16232  return _bfd_elf_adjust_dynamic_copy (info, h, s);
16233}
16234
16235/* Allocate space in .plt, .got and associated reloc sections for
16236   dynamic relocs.  */
16237
16238static bool
16239allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
16240{
16241  struct bfd_link_info *info;
16242  struct elf32_arm_link_hash_table *htab;
16243  struct elf32_arm_link_hash_entry *eh;
16244  struct elf_dyn_relocs *p;
16245
16246  if (h->root.type == bfd_link_hash_indirect)
16247    return true;
16248
16249  eh = (struct elf32_arm_link_hash_entry *) h;
16250
16251  info = (struct bfd_link_info *) inf;
16252  htab = elf32_arm_hash_table (info);
16253  if (htab == NULL)
16254    return false;
16255
16256  if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
16257      && h->plt.refcount > 0)
16258    {
16259      /* Make sure this symbol is output as a dynamic symbol.
16260	 Undefined weak syms won't yet be marked as dynamic.  */
16261      if (h->dynindx == -1 && !h->forced_local
16262	  && h->root.type == bfd_link_hash_undefweak)
16263	{
16264	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
16265	    return false;
16266	}
16267
16268      /* If the call in the PLT entry binds locally, the associated
16269	 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16270	 the usual R_ARM_JUMP_SLOT.  Put it in the .iplt section rather
16271	 than the .plt section.  */
16272      if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
16273	{
16274	  eh->is_iplt = 1;
16275	  if (eh->plt.noncall_refcount == 0
16276	      && SYMBOL_REFERENCES_LOCAL (info, h))
16277	    /* All non-call references can be resolved directly.
16278	       This means that they can (and in some cases, must)
16279	       resolve directly to the run-time target, rather than
16280	       to the PLT.  That in turns means that any .got entry
16281	       would be equal to the .igot.plt entry, so there's
16282	       no point having both.  */
16283	    h->got.refcount = 0;
16284	}
16285
16286      if (bfd_link_pic (info)
16287	  || eh->is_iplt
16288	  || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
16289	{
16290	  elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
16291
16292	  /* If this symbol is not defined in a regular file, and we are
16293	     not generating a shared library, then set the symbol to this
16294	     location in the .plt.  This is required to make function
16295	     pointers compare as equal between the normal executable and
16296	     the shared library.  */
16297	  if (! bfd_link_pic (info)
16298	      && !h->def_regular)
16299	    {
16300	      h->root.u.def.section = htab->root.splt;
16301	      h->root.u.def.value = h->plt.offset;
16302
16303	      /* Make sure the function is not marked as Thumb, in case
16304		 it is the target of an ABS32 relocation, which will
16305		 point to the PLT entry.  */
16306	      ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16307	    }
16308
16309	  /* VxWorks executables have a second set of relocations for
16310	     each PLT entry.  They go in a separate relocation section,
16311	     which is processed by the kernel loader.  */
16312	  if (htab->root.target_os == is_vxworks && !bfd_link_pic (info))
16313	    {
16314	      /* There is a relocation for the initial PLT entry:
16315		 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_.  */
16316	      if (h->plt.offset == htab->plt_header_size)
16317		elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16318
16319	      /* There are two extra relocations for each subsequent
16320		 PLT entry: an R_ARM_32 relocation for the GOT entry,
16321		 and an R_ARM_32 relocation for the PLT entry.  */
16322	      elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16323	    }
16324	}
16325      else
16326	{
16327	  h->plt.offset = (bfd_vma) -1;
16328	  h->needs_plt = 0;
16329	}
16330    }
16331  else
16332    {
16333      h->plt.offset = (bfd_vma) -1;
16334      h->needs_plt = 0;
16335    }
16336
16337  eh = (struct elf32_arm_link_hash_entry *) h;
16338  eh->tlsdesc_got = (bfd_vma) -1;
16339
16340  if (h->got.refcount > 0)
16341    {
16342      asection *s;
16343      bool dyn;
16344      int tls_type = elf32_arm_hash_entry (h)->tls_type;
16345      int indx;
16346
16347      /* Make sure this symbol is output as a dynamic symbol.
16348	 Undefined weak syms won't yet be marked as dynamic.  */
16349      if (htab->root.dynamic_sections_created
16350	  && h->dynindx == -1
16351	  && !h->forced_local
16352	  && h->root.type == bfd_link_hash_undefweak)
16353	{
16354	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
16355	    return false;
16356	}
16357
16358      s = htab->root.sgot;
16359      h->got.offset = s->size;
16360
16361      if (tls_type == GOT_UNKNOWN)
16362	abort ();
16363
16364      if (tls_type == GOT_NORMAL)
16365	/* Non-TLS symbols need one GOT slot.  */
16366	s->size += 4;
16367      else
16368	{
16369	  if (tls_type & GOT_TLS_GDESC)
16370	    {
16371	      /* R_ARM_TLS_DESC needs 2 GOT slots.  */
16372	      eh->tlsdesc_got
16373		= (htab->root.sgotplt->size
16374		   - elf32_arm_compute_jump_table_size (htab));
16375	      htab->root.sgotplt->size += 8;
16376	      h->got.offset = (bfd_vma) -2;
16377	      /* plt.got_offset needs to know there's a TLS_DESC
16378		 reloc in the middle of .got.plt.  */
16379	      htab->num_tls_desc++;
16380	    }
16381
16382	  if (tls_type & GOT_TLS_GD)
16383	    {
16384	      /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16385		 consecutive GOT slots.  If the symbol is both GD
16386		 and GDESC, got.offset may have been
16387		 overwritten.  */
16388	      h->got.offset = s->size;
16389	      s->size += 8;
16390	    }
16391
16392	  if (tls_type & GOT_TLS_IE)
16393	    /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16394	       slot.  */
16395	    s->size += 4;
16396	}
16397
16398      dyn = htab->root.dynamic_sections_created;
16399
16400      indx = 0;
16401      if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
16402	  && (!bfd_link_pic (info)
16403	      || !SYMBOL_REFERENCES_LOCAL (info, h)))
16404	indx = h->dynindx;
16405
16406      if (tls_type != GOT_NORMAL
16407	  && (bfd_link_dll (info) || indx != 0)
16408	  && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16409	      || h->root.type != bfd_link_hash_undefweak))
16410	{
16411	  if (tls_type & GOT_TLS_IE)
16412	    elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16413
16414	  if (tls_type & GOT_TLS_GD)
16415	    elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16416
16417	  if (tls_type & GOT_TLS_GDESC)
16418	    {
16419	      elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16420	      /* GDESC needs a trampoline to jump to.  */
16421	      htab->tls_trampoline = -1;
16422	    }
16423
16424	  /* Only GD needs it.  GDESC just emits one relocation per
16425	     2 entries.  */
16426	  if ((tls_type & GOT_TLS_GD) && indx != 0)
16427	    elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16428	}
16429      else if (((indx != -1) || htab->fdpic_p)
16430	       && !SYMBOL_REFERENCES_LOCAL (info, h))
16431	{
16432	  if (htab->root.dynamic_sections_created)
16433	    /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation.  */
16434	    elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16435	}
16436      else if (h->type == STT_GNU_IFUNC
16437	       && eh->plt.noncall_refcount == 0)
16438	/* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16439	   they all resolve dynamically instead.  Reserve room for the
16440	   GOT entry's R_ARM_IRELATIVE relocation.  */
16441	elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16442      else if (bfd_link_pic (info)
16443	       && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16444	/* Reserve room for the GOT entry's R_ARM_RELATIVE relocation.  */
16445	elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16446      else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16447	/* Reserve room for rofixup for FDPIC executable.  */
16448	/* TLS relocs do not need space since they are completely
16449	   resolved.  */
16450	htab->srofixup->size += 4;
16451    }
16452  else
16453    h->got.offset = (bfd_vma) -1;
16454
16455  /* FDPIC support.  */
16456  if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16457    {
16458      /* Symbol musn't be exported.  */
16459      if (h->dynindx != -1)
16460	abort ();
16461
16462      /* We only allocate one function descriptor with its associated
16463	 relocation.  */
16464      if (eh->fdpic_cnts.funcdesc_offset == -1)
16465	{
16466	  asection *s = htab->root.sgot;
16467
16468	  eh->fdpic_cnts.funcdesc_offset = s->size;
16469	  s->size += 8;
16470	  /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups.  */
16471	  if (bfd_link_pic (info))
16472	    elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16473	  else
16474	    htab->srofixup->size += 8;
16475	}
16476    }
16477
16478  if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16479    {
16480      asection *s = htab->root.sgot;
16481
16482      if (htab->root.dynamic_sections_created && h->dynindx == -1
16483	  && !h->forced_local)
16484	if (! bfd_elf_link_record_dynamic_symbol (info, h))
16485	  return false;
16486
16487      if (h->dynindx == -1)
16488	{
16489	  /* We only allocate one function descriptor with its
16490	     associated relocation.  */
16491	  if (eh->fdpic_cnts.funcdesc_offset == -1)
16492	    {
16493
16494	      eh->fdpic_cnts.funcdesc_offset = s->size;
16495	      s->size += 8;
16496	      /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16497		 rofixups.  */
16498	      if (bfd_link_pic (info))
16499		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16500	      else
16501		htab->srofixup->size += 8;
16502	    }
16503	}
16504
16505      /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16506	 R_ARM_RELATIVE/rofixup relocation on it.  */
16507      eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16508      s->size += 4;
16509      if (h->dynindx == -1 && !bfd_link_pic (info))
16510	htab->srofixup->size += 4;
16511      else
16512	elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16513    }
16514
16515  if (eh->fdpic_cnts.funcdesc_cnt > 0)
16516    {
16517      if (htab->root.dynamic_sections_created && h->dynindx == -1
16518	  && !h->forced_local)
16519	if (! bfd_elf_link_record_dynamic_symbol (info, h))
16520	  return false;
16521
16522      if (h->dynindx == -1)
16523	{
16524	  /* We only allocate one function descriptor with its
16525	     associated relocation.  */
16526	  if (eh->fdpic_cnts.funcdesc_offset == -1)
16527	    {
16528	      asection *s = htab->root.sgot;
16529
16530	      eh->fdpic_cnts.funcdesc_offset = s->size;
16531	      s->size += 8;
16532	      /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16533		 rofixups.  */
16534	      if (bfd_link_pic (info))
16535		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16536	      else
16537		htab->srofixup->size += 8;
16538	    }
16539	}
16540      if (h->dynindx == -1 && !bfd_link_pic (info))
16541	{
16542	  /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup.  */
16543	  htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16544	}
16545      else
16546	{
16547	  /* Will need one dynamic reloc per reference. will be either
16548	     R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols.  */
16549	  elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16550					eh->fdpic_cnts.funcdesc_cnt);
16551	}
16552    }
16553
16554  /* Allocate stubs for exported Thumb functions on v4t.  */
16555  if (!htab->use_blx && h->dynindx != -1
16556      && h->def_regular
16557      && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16558      && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16559    {
16560      struct elf_link_hash_entry * th;
16561      struct bfd_link_hash_entry * bh;
16562      struct elf_link_hash_entry * myh;
16563      char name[1024];
16564      asection *s;
16565      bh = NULL;
16566      /* Create a new symbol to regist the real location of the function.  */
16567      s = h->root.u.def.section;
16568      sprintf (name, "__real_%s", h->root.root.string);
16569      _bfd_generic_link_add_one_symbol (info, s->owner,
16570					name, BSF_GLOBAL, s,
16571					h->root.u.def.value,
16572					NULL, true, false, &bh);
16573
16574      myh = (struct elf_link_hash_entry *) bh;
16575      myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16576      myh->forced_local = 1;
16577      ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16578      eh->export_glue = myh;
16579      th = record_arm_to_thumb_glue (info, h);
16580      /* Point the symbol at the stub.  */
16581      h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16582      ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16583      h->root.u.def.section = th->root.u.def.section;
16584      h->root.u.def.value = th->root.u.def.value & ~1;
16585    }
16586
16587  if (h->dyn_relocs == NULL)
16588    return true;
16589
16590  /* In the shared -Bsymbolic case, discard space allocated for
16591     dynamic pc-relative relocs against symbols which turn out to be
16592     defined in regular objects.  For the normal shared case, discard
16593     space for pc-relative relocs that have become local due to symbol
16594     visibility changes.  */
16595
16596  if (bfd_link_pic (info)
16597      || htab->root.is_relocatable_executable
16598      || htab->fdpic_p)
16599    {
16600      /* Relocs that use pc_count are PC-relative forms, which will appear
16601	 on something like ".long foo - ." or "movw REG, foo - .".  We want
16602	 calls to protected symbols to resolve directly to the function
16603	 rather than going via the plt.  If people want function pointer
16604	 comparisons to work as expected then they should avoid writing
16605	 assembly like ".long foo - .".  */
16606      if (SYMBOL_CALLS_LOCAL (info, h))
16607	{
16608	  struct elf_dyn_relocs **pp;
16609
16610	  for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16611	    {
16612	      p->count -= p->pc_count;
16613	      p->pc_count = 0;
16614	      if (p->count == 0)
16615		*pp = p->next;
16616	      else
16617		pp = &p->next;
16618	    }
16619	}
16620
16621      if (htab->root.target_os == is_vxworks)
16622	{
16623	  struct elf_dyn_relocs **pp;
16624
16625	  for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16626	    {
16627	      if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16628		*pp = p->next;
16629	      else
16630		pp = &p->next;
16631	    }
16632	}
16633
16634      /* Also discard relocs on undefined weak syms with non-default
16635	 visibility.  */
16636      if (h->dyn_relocs != NULL
16637	  && h->root.type == bfd_link_hash_undefweak)
16638	{
16639	  if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16640	      || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16641	    h->dyn_relocs = NULL;
16642
16643	  /* Make sure undefined weak symbols are output as a dynamic
16644	     symbol in PIEs.  */
16645	  else if (htab->root.dynamic_sections_created && h->dynindx == -1
16646		   && !h->forced_local)
16647	    {
16648	      if (! bfd_elf_link_record_dynamic_symbol (info, h))
16649		return false;
16650	    }
16651	}
16652
16653      else if (htab->root.is_relocatable_executable && h->dynindx == -1
16654	       && h->root.type == bfd_link_hash_new)
16655	{
16656	  /* Output absolute symbols so that we can create relocations
16657	     against them.  For normal symbols we output a relocation
16658	     against the section that contains them.  */
16659	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
16660	    return false;
16661	}
16662
16663    }
16664  else
16665    {
16666      /* For the non-shared case, discard space for relocs against
16667	 symbols which turn out to need copy relocs or are not
16668	 dynamic.  */
16669
16670      if (!h->non_got_ref
16671	  && ((h->def_dynamic
16672	       && !h->def_regular)
16673	      || (htab->root.dynamic_sections_created
16674		  && (h->root.type == bfd_link_hash_undefweak
16675		      || h->root.type == bfd_link_hash_undefined))))
16676	{
16677	  /* Make sure this symbol is output as a dynamic symbol.
16678	     Undefined weak syms won't yet be marked as dynamic.  */
16679	  if (h->dynindx == -1 && !h->forced_local
16680	      && h->root.type == bfd_link_hash_undefweak)
16681	    {
16682	      if (! bfd_elf_link_record_dynamic_symbol (info, h))
16683		return false;
16684	    }
16685
16686	  /* If that succeeded, we know we'll be keeping all the
16687	     relocs.  */
16688	  if (h->dynindx != -1)
16689	    goto keep;
16690	}
16691
16692      h->dyn_relocs = NULL;
16693
16694    keep: ;
16695    }
16696
16697  /* Finally, allocate space.  */
16698  for (p = h->dyn_relocs; p != NULL; p = p->next)
16699    {
16700      asection *sreloc = elf_section_data (p->sec)->sreloc;
16701
16702      if (h->type == STT_GNU_IFUNC
16703	  && eh->plt.noncall_refcount == 0
16704	  && SYMBOL_REFERENCES_LOCAL (info, h))
16705	elf32_arm_allocate_irelocs (info, sreloc, p->count);
16706      else if (h->dynindx != -1
16707	       && (!bfd_link_pic (info) || !info->symbolic || !h->def_regular))
16708	elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16709      else if (htab->fdpic_p && !bfd_link_pic (info))
16710	htab->srofixup->size += 4 * p->count;
16711      else
16712	elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16713    }
16714
16715  return true;
16716}
16717
16718void
16719bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16720				 int byteswap_code)
16721{
16722  struct elf32_arm_link_hash_table *globals;
16723
16724  globals = elf32_arm_hash_table (info);
16725  if (globals == NULL)
16726    return;
16727
16728  globals->byteswap_code = byteswap_code;
16729}
16730
16731/* Set the sizes of the dynamic sections.  */
16732
16733static bool
16734elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16735				 struct bfd_link_info * info)
16736{
16737  bfd * dynobj;
16738  asection * s;
16739  bool relocs;
16740  bfd *ibfd;
16741  struct elf32_arm_link_hash_table *htab;
16742
16743  htab = elf32_arm_hash_table (info);
16744  if (htab == NULL)
16745    return false;
16746
16747  dynobj = elf_hash_table (info)->dynobj;
16748  BFD_ASSERT (dynobj != NULL);
16749  check_use_blx (htab);
16750
16751  if (elf_hash_table (info)->dynamic_sections_created)
16752    {
16753      /* Set the contents of the .interp section to the interpreter.  */
16754      if (bfd_link_executable (info) && !info->nointerp)
16755	{
16756	  s = bfd_get_linker_section (dynobj, ".interp");
16757	  BFD_ASSERT (s != NULL);
16758	  s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16759	  s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16760	}
16761    }
16762
16763  /* Set up .got offsets for local syms, and space for local dynamic
16764     relocs.  */
16765  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16766    {
16767      bfd_signed_vma *local_got;
16768      bfd_signed_vma *end_local_got;
16769      struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16770      char *local_tls_type;
16771      bfd_vma *local_tlsdesc_gotent;
16772      bfd_size_type locsymcount;
16773      Elf_Internal_Shdr *symtab_hdr;
16774      asection *srel;
16775      unsigned int symndx;
16776      struct fdpic_local *local_fdpic_cnts;
16777
16778      if (! is_arm_elf (ibfd))
16779	continue;
16780
16781      for (s = ibfd->sections; s != NULL; s = s->next)
16782	{
16783	  struct elf_dyn_relocs *p;
16784
16785	  for (p = (struct elf_dyn_relocs *)
16786		   elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16787	    {
16788	      if (!bfd_is_abs_section (p->sec)
16789		  && bfd_is_abs_section (p->sec->output_section))
16790		{
16791		  /* Input section has been discarded, either because
16792		     it is a copy of a linkonce section or due to
16793		     linker script /DISCARD/, so we'll be discarding
16794		     the relocs too.  */
16795		}
16796	      else if (htab->root.target_os == is_vxworks
16797		       && strcmp (p->sec->output_section->name,
16798				  ".tls_vars") == 0)
16799		{
16800		  /* Relocations in vxworks .tls_vars sections are
16801		     handled specially by the loader.  */
16802		}
16803	      else if (p->count != 0)
16804		{
16805		  srel = elf_section_data (p->sec)->sreloc;
16806		  if (htab->fdpic_p && !bfd_link_pic (info))
16807		    htab->srofixup->size += 4 * p->count;
16808		  else
16809		    elf32_arm_allocate_dynrelocs (info, srel, p->count);
16810		  if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16811		    info->flags |= DF_TEXTREL;
16812		}
16813	    }
16814	}
16815
16816      local_got = elf_local_got_refcounts (ibfd);
16817      if (local_got == NULL)
16818	continue;
16819
16820      symtab_hdr = & elf_symtab_hdr (ibfd);
16821      locsymcount = symtab_hdr->sh_info;
16822      end_local_got = local_got + locsymcount;
16823      local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16824      local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16825      local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16826      local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16827      symndx = 0;
16828      s = htab->root.sgot;
16829      srel = htab->root.srelgot;
16830      for (; local_got < end_local_got;
16831	   ++local_got, ++local_iplt_ptr, ++local_tls_type,
16832	   ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16833	{
16834	  if (symndx >= elf32_arm_num_entries (ibfd))
16835	    return false;
16836
16837	  *local_tlsdesc_gotent = (bfd_vma) -1;
16838	  local_iplt = *local_iplt_ptr;
16839
16840	  /* FDPIC support.  */
16841	  if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16842	    {
16843	      if (local_fdpic_cnts->funcdesc_offset == -1)
16844		{
16845		  local_fdpic_cnts->funcdesc_offset = s->size;
16846		  s->size += 8;
16847
16848		  /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups.  */
16849		  if (bfd_link_pic (info))
16850		    elf32_arm_allocate_dynrelocs (info, srel, 1);
16851		  else
16852		    htab->srofixup->size += 8;
16853		}
16854	    }
16855
16856	  if (local_fdpic_cnts->funcdesc_cnt > 0)
16857	    {
16858	      if (local_fdpic_cnts->funcdesc_offset == -1)
16859		{
16860		  local_fdpic_cnts->funcdesc_offset = s->size;
16861		  s->size += 8;
16862
16863		  /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups.  */
16864		  if (bfd_link_pic (info))
16865		    elf32_arm_allocate_dynrelocs (info, srel, 1);
16866		  else
16867		    htab->srofixup->size += 8;
16868		}
16869
16870	      /* We will add n R_ARM_RELATIVE relocations or n rofixups.  */
16871	      if (bfd_link_pic (info))
16872		elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16873	      else
16874		htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16875	    }
16876
16877	  if (local_iplt != NULL)
16878	    {
16879	      struct elf_dyn_relocs *p;
16880
16881	      if (local_iplt->root.refcount > 0)
16882		{
16883		  elf32_arm_allocate_plt_entry (info, true,
16884						&local_iplt->root,
16885						&local_iplt->arm);
16886		  if (local_iplt->arm.noncall_refcount == 0)
16887		    /* All references to the PLT are calls, so all
16888		       non-call references can resolve directly to the
16889		       run-time target.  This means that the .got entry
16890		       would be the same as the .igot.plt entry, so there's
16891		       no point creating both.  */
16892		    *local_got = 0;
16893		}
16894	      else
16895		{
16896		  BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
16897		  local_iplt->root.offset = (bfd_vma) -1;
16898		}
16899
16900	      for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
16901		{
16902		  asection *psrel;
16903
16904		  psrel = elf_section_data (p->sec)->sreloc;
16905		  if (local_iplt->arm.noncall_refcount == 0)
16906		    elf32_arm_allocate_irelocs (info, psrel, p->count);
16907		  else
16908		    elf32_arm_allocate_dynrelocs (info, psrel, p->count);
16909		}
16910	    }
16911	  if (*local_got > 0)
16912	    {
16913	      Elf_Internal_Sym *isym;
16914
16915	      *local_got = s->size;
16916	      if (*local_tls_type & GOT_TLS_GD)
16917		/* TLS_GD relocs need an 8-byte structure in the GOT.  */
16918		s->size += 8;
16919	      if (*local_tls_type & GOT_TLS_GDESC)
16920		{
16921		  *local_tlsdesc_gotent = htab->root.sgotplt->size
16922		    - elf32_arm_compute_jump_table_size (htab);
16923		  htab->root.sgotplt->size += 8;
16924		  *local_got = (bfd_vma) -2;
16925		  /* plt.got_offset needs to know there's a TLS_DESC
16926		     reloc in the middle of .got.plt.  */
16927		  htab->num_tls_desc++;
16928		}
16929	      if (*local_tls_type & GOT_TLS_IE)
16930		s->size += 4;
16931
16932	      if (*local_tls_type & GOT_NORMAL)
16933		{
16934		  /* If the symbol is both GD and GDESC, *local_got
16935		     may have been overwritten.  */
16936		  *local_got = s->size;
16937		  s->size += 4;
16938		}
16939
16940	      isym = bfd_sym_from_r_symndx (&htab->root.sym_cache, ibfd,
16941					    symndx);
16942	      if (isym == NULL)
16943		return false;
16944
16945	      /* If all references to an STT_GNU_IFUNC PLT are calls,
16946		 then all non-call references, including this GOT entry,
16947		 resolve directly to the run-time target.  */
16948	      if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
16949		  && (local_iplt == NULL
16950		      || local_iplt->arm.noncall_refcount == 0))
16951		elf32_arm_allocate_irelocs (info, srel, 1);
16952	      else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
16953		{
16954		  if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
16955		    elf32_arm_allocate_dynrelocs (info, srel, 1);
16956		  else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
16957		    htab->srofixup->size += 4;
16958
16959		  if ((bfd_link_pic (info) || htab->fdpic_p)
16960		      && *local_tls_type & GOT_TLS_GDESC)
16961		    {
16962		      elf32_arm_allocate_dynrelocs (info,
16963						    htab->root.srelplt, 1);
16964		      htab->tls_trampoline = -1;
16965		    }
16966		}
16967	    }
16968	  else
16969	    *local_got = (bfd_vma) -1;
16970	}
16971    }
16972
16973  if (htab->tls_ldm_got.refcount > 0)
16974    {
16975      /* Allocate two GOT entries and one dynamic relocation (if necessary)
16976	 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations.  */
16977      htab->tls_ldm_got.offset = htab->root.sgot->size;
16978      htab->root.sgot->size += 8;
16979      if (bfd_link_pic (info))
16980	elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16981    }
16982  else
16983    htab->tls_ldm_got.offset = -1;
16984
16985  /* At the very end of the .rofixup section is a pointer to the GOT,
16986     reserve space for it. */
16987  if (htab->fdpic_p && htab->srofixup != NULL)
16988    htab->srofixup->size += 4;
16989
16990  /* Allocate global sym .plt and .got entries, and space for global
16991     sym dynamic relocs.  */
16992  elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
16993
16994  /* Here we rummage through the found bfds to collect glue information.  */
16995  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16996    {
16997      if (! is_arm_elf (ibfd))
16998	continue;
16999
17000      /* Initialise mapping tables for code/data.  */
17001      bfd_elf32_arm_init_maps (ibfd);
17002
17003      if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
17004	  || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
17005	  || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
17006	_bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
17007    }
17008
17009  /* Allocate space for the glue sections now that we've sized them.  */
17010  bfd_elf32_arm_allocate_interworking_sections (info);
17011
17012  /* For every jump slot reserved in the sgotplt, reloc_count is
17013     incremented.  However, when we reserve space for TLS descriptors,
17014     it's not incremented, so in order to compute the space reserved
17015     for them, it suffices to multiply the reloc count by the jump
17016     slot size.  */
17017  if (htab->root.srelplt)
17018    htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size (htab);
17019
17020  if (htab->tls_trampoline)
17021    {
17022      if (htab->root.splt->size == 0)
17023	htab->root.splt->size += htab->plt_header_size;
17024
17025      htab->tls_trampoline = htab->root.splt->size;
17026      htab->root.splt->size += htab->plt_entry_size;
17027
17028      /* If we're not using lazy TLS relocations, don't generate the
17029	 PLT and GOT entries they require.  */
17030      if ((info->flags & DF_BIND_NOW))
17031	htab->root.tlsdesc_plt = 0;
17032      else
17033	{
17034	  htab->root.tlsdesc_got = htab->root.sgot->size;
17035	  htab->root.sgot->size += 4;
17036
17037	  htab->root.tlsdesc_plt = htab->root.splt->size;
17038	  htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
17039	}
17040    }
17041
17042  /* The check_relocs and adjust_dynamic_symbol entry points have
17043     determined the sizes of the various dynamic sections.  Allocate
17044     memory for them.  */
17045  relocs = false;
17046  for (s = dynobj->sections; s != NULL; s = s->next)
17047    {
17048      const char * name;
17049
17050      if ((s->flags & SEC_LINKER_CREATED) == 0)
17051	continue;
17052
17053      /* It's OK to base decisions on the section name, because none
17054	 of the dynobj section names depend upon the input files.  */
17055      name = bfd_section_name (s);
17056
17057      if (s == htab->root.splt)
17058	{
17059	  /* Remember whether there is a PLT.  */
17060	  ;
17061	}
17062      else if (startswith (name, ".rel"))
17063	{
17064	  if (s->size != 0)
17065	    {
17066	      /* Remember whether there are any reloc sections other
17067		 than .rel(a).plt and .rela.plt.unloaded.  */
17068	      if (s != htab->root.srelplt && s != htab->srelplt2)
17069		relocs = true;
17070
17071	      /* We use the reloc_count field as a counter if we need
17072		 to copy relocs into the output file.  */
17073	      s->reloc_count = 0;
17074	    }
17075	}
17076      else if (s != htab->root.sgot
17077	       && s != htab->root.sgotplt
17078	       && s != htab->root.iplt
17079	       && s != htab->root.igotplt
17080	       && s != htab->root.sdynbss
17081	       && s != htab->root.sdynrelro
17082	       && s != htab->srofixup)
17083	{
17084	  /* It's not one of our sections, so don't allocate space.  */
17085	  continue;
17086	}
17087
17088      if (s->size == 0)
17089	{
17090	  /* If we don't need this section, strip it from the
17091	     output file.  This is mostly to handle .rel(a).bss and
17092	     .rel(a).plt.  We must create both sections in
17093	     create_dynamic_sections, because they must be created
17094	     before the linker maps input sections to output
17095	     sections.  The linker does that before
17096	     adjust_dynamic_symbol is called, and it is that
17097	     function which decides whether anything needs to go
17098	     into these sections.  */
17099	  s->flags |= SEC_EXCLUDE;
17100	  continue;
17101	}
17102
17103      if ((s->flags & SEC_HAS_CONTENTS) == 0)
17104	continue;
17105
17106      /* Allocate memory for the section contents.  */
17107      s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
17108      if (s->contents == NULL)
17109	return false;
17110    }
17111
17112  return _bfd_elf_maybe_vxworks_add_dynamic_tags (output_bfd, info,
17113						  relocs);
17114}
17115
17116/* Size sections even though they're not dynamic.  We use it to setup
17117   _TLS_MODULE_BASE_, if needed.  */
17118
17119static bool
17120elf32_arm_always_size_sections (bfd *output_bfd,
17121				struct bfd_link_info *info)
17122{
17123  asection *tls_sec;
17124  struct elf32_arm_link_hash_table *htab;
17125
17126  htab = elf32_arm_hash_table (info);
17127
17128  if (bfd_link_relocatable (info))
17129    return true;
17130
17131  tls_sec = elf_hash_table (info)->tls_sec;
17132
17133  if (tls_sec)
17134    {
17135      struct elf_link_hash_entry *tlsbase;
17136
17137      tlsbase = elf_link_hash_lookup
17138	(elf_hash_table (info), "_TLS_MODULE_BASE_", true, true, false);
17139
17140      if (tlsbase)
17141	{
17142	  struct bfd_link_hash_entry *bh = NULL;
17143	  const struct elf_backend_data *bed
17144	    = get_elf_backend_data (output_bfd);
17145
17146	  if (!(_bfd_generic_link_add_one_symbol
17147		(info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
17148		 tls_sec, 0, NULL, false,
17149		 bed->collect, &bh)))
17150	    return false;
17151
17152	  tlsbase->type = STT_TLS;
17153	  tlsbase = (struct elf_link_hash_entry *)bh;
17154	  tlsbase->def_regular = 1;
17155	  tlsbase->other = STV_HIDDEN;
17156	  (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
17157	}
17158    }
17159
17160  if (htab->fdpic_p && !bfd_link_relocatable (info)
17161      && !bfd_elf_stack_segment_size (output_bfd, info,
17162				      "__stacksize", DEFAULT_STACK_SIZE))
17163    return false;
17164
17165  return true;
17166}
17167
17168/* Finish up dynamic symbol handling.  We set the contents of various
17169   dynamic sections here.  */
17170
17171static bool
17172elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
17173				 struct bfd_link_info * info,
17174				 struct elf_link_hash_entry * h,
17175				 Elf_Internal_Sym * sym)
17176{
17177  struct elf32_arm_link_hash_table *htab;
17178  struct elf32_arm_link_hash_entry *eh;
17179
17180  htab = elf32_arm_hash_table (info);
17181  if (htab == NULL)
17182    return false;
17183
17184  eh = (struct elf32_arm_link_hash_entry *) h;
17185
17186  if (h->plt.offset != (bfd_vma) -1)
17187    {
17188      if (!eh->is_iplt)
17189	{
17190	  BFD_ASSERT (h->dynindx != -1);
17191	  if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
17192					      h->dynindx, 0))
17193	    return false;
17194	}
17195
17196      if (!h->def_regular)
17197	{
17198	  /* Mark the symbol as undefined, rather than as defined in
17199	     the .plt section.  */
17200	  sym->st_shndx = SHN_UNDEF;
17201	  /* If the symbol is weak we need to clear the value.
17202	     Otherwise, the PLT entry would provide a definition for
17203	     the symbol even if the symbol wasn't defined anywhere,
17204	     and so the symbol would never be NULL.  Leave the value if
17205	     there were any relocations where pointer equality matters
17206	     (this is a clue for the dynamic linker, to make function
17207	     pointer comparisons work between an application and shared
17208	     library).  */
17209	  if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
17210	    sym->st_value = 0;
17211	}
17212      else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
17213	{
17214	  /* At least one non-call relocation references this .iplt entry,
17215	     so the .iplt entry is the function's canonical address.  */
17216	  sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
17217	  ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
17218	  sym->st_shndx = (_bfd_elf_section_from_bfd_section
17219			   (output_bfd, htab->root.iplt->output_section));
17220	  sym->st_value = (h->plt.offset
17221			   + htab->root.iplt->output_section->vma
17222			   + htab->root.iplt->output_offset);
17223	}
17224    }
17225
17226  if (h->needs_copy)
17227    {
17228      asection * s;
17229      Elf_Internal_Rela rel;
17230
17231      /* This symbol needs a copy reloc.  Set it up.  */
17232      BFD_ASSERT (h->dynindx != -1
17233		  && (h->root.type == bfd_link_hash_defined
17234		      || h->root.type == bfd_link_hash_defweak));
17235
17236      rel.r_addend = 0;
17237      rel.r_offset = (h->root.u.def.value
17238		      + h->root.u.def.section->output_section->vma
17239		      + h->root.u.def.section->output_offset);
17240      rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17241      if (h->root.u.def.section == htab->root.sdynrelro)
17242	s = htab->root.sreldynrelro;
17243      else
17244	s = htab->root.srelbss;
17245      elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17246    }
17247
17248  /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute.  On VxWorks,
17249     and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17250     it is relative to the ".got" section.  */
17251  if (h == htab->root.hdynamic
17252      || (!htab->fdpic_p
17253	  && htab->root.target_os != is_vxworks
17254	  && h == htab->root.hgot))
17255    sym->st_shndx = SHN_ABS;
17256
17257  return true;
17258}
17259
17260static void
17261arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17262		    void *contents,
17263		    const unsigned long *template, unsigned count)
17264{
17265  unsigned ix;
17266
17267  for (ix = 0; ix != count; ix++)
17268    {
17269      unsigned long insn = template[ix];
17270
17271      /* Emit mov pc,rx if bx is not permitted.  */
17272      if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17273	insn = (insn & 0xf000000f) | 0x01a0f000;
17274      put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17275    }
17276}
17277
17278/* Install the special first PLT entry for elf32-arm-nacl.  Unlike
17279   other variants, NaCl needs this entry in a static executable's
17280   .iplt too.  When we're handling that case, GOT_DISPLACEMENT is
17281   zero.  For .iplt really only the last bundle is useful, and .iplt
17282   could have a shorter first entry, with each individual PLT entry's
17283   relative branch calculated differently so it targets the last
17284   bundle instead of the instruction before it (labelled .Lplt_tail
17285   above).  But it's simpler to keep the size and layout of PLT0
17286   consistent with the dynamic case, at the cost of some dead code at
17287   the start of .iplt and the one dead store to the stack at the start
17288   of .Lplt_tail.  */
17289static void
17290arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17291		   asection *plt, bfd_vma got_displacement)
17292{
17293  unsigned int i;
17294
17295  put_arm_insn (htab, output_bfd,
17296		elf32_arm_nacl_plt0_entry[0]
17297		| arm_movw_immediate (got_displacement),
17298		plt->contents + 0);
17299  put_arm_insn (htab, output_bfd,
17300		elf32_arm_nacl_plt0_entry[1]
17301		| arm_movt_immediate (got_displacement),
17302		plt->contents + 4);
17303
17304  for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17305    put_arm_insn (htab, output_bfd,
17306		  elf32_arm_nacl_plt0_entry[i],
17307		  plt->contents + (i * 4));
17308}
17309
17310/* Finish up the dynamic sections.  */
17311
17312static bool
17313elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17314{
17315  bfd * dynobj;
17316  asection * sgot;
17317  asection * sdyn;
17318  struct elf32_arm_link_hash_table *htab;
17319
17320  htab = elf32_arm_hash_table (info);
17321  if (htab == NULL)
17322    return false;
17323
17324  dynobj = elf_hash_table (info)->dynobj;
17325
17326  sgot = htab->root.sgotplt;
17327  /* A broken linker script might have discarded the dynamic sections.
17328     Catch this here so that we do not seg-fault later on.  */
17329  if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17330    return false;
17331  sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17332
17333  if (elf_hash_table (info)->dynamic_sections_created)
17334    {
17335      asection *splt;
17336      Elf32_External_Dyn *dyncon, *dynconend;
17337
17338      splt = htab->root.splt;
17339      BFD_ASSERT (splt != NULL && sdyn != NULL);
17340      BFD_ASSERT (sgot != NULL);
17341
17342      dyncon = (Elf32_External_Dyn *) sdyn->contents;
17343      dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17344
17345      for (; dyncon < dynconend; dyncon++)
17346	{
17347	  Elf_Internal_Dyn dyn;
17348	  const char * name;
17349	  asection * s;
17350
17351	  bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17352
17353	  switch (dyn.d_tag)
17354	    {
17355	    default:
17356	      if (htab->root.target_os == is_vxworks
17357		  && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17358		bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17359	      break;
17360
17361	    case DT_HASH:
17362	    case DT_STRTAB:
17363	    case DT_SYMTAB:
17364	    case DT_VERSYM:
17365	    case DT_VERDEF:
17366	    case DT_VERNEED:
17367	      break;
17368
17369	    case DT_PLTGOT:
17370	      name = ".got.plt";
17371	      goto get_vma;
17372	    case DT_JMPREL:
17373	      name = RELOC_SECTION (htab, ".plt");
17374	    get_vma:
17375	      s = bfd_get_linker_section (dynobj, name);
17376	      if (s == NULL)
17377		{
17378		  _bfd_error_handler
17379		    (_("could not find section %s"), name);
17380		  bfd_set_error (bfd_error_invalid_operation);
17381		  return false;
17382		}
17383	      dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17384	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17385	      break;
17386
17387	    case DT_PLTRELSZ:
17388	      s = htab->root.srelplt;
17389	      BFD_ASSERT (s != NULL);
17390	      dyn.d_un.d_val = s->size;
17391	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17392	      break;
17393
17394	    case DT_RELSZ:
17395	    case DT_RELASZ:
17396	    case DT_REL:
17397	    case DT_RELA:
17398	      break;
17399
17400	    case DT_TLSDESC_PLT:
17401	      s = htab->root.splt;
17402	      dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17403				+ htab->root.tlsdesc_plt);
17404	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17405	      break;
17406
17407	    case DT_TLSDESC_GOT:
17408	      s = htab->root.sgot;
17409	      dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17410				+ htab->root.tlsdesc_got);
17411	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17412	      break;
17413
17414	      /* Set the bottom bit of DT_INIT/FINI if the
17415		 corresponding function is Thumb.  */
17416	    case DT_INIT:
17417	      name = info->init_function;
17418	      goto get_sym;
17419	    case DT_FINI:
17420	      name = info->fini_function;
17421	    get_sym:
17422	      /* If it wasn't set by elf_bfd_final_link
17423		 then there is nothing to adjust.  */
17424	      if (dyn.d_un.d_val != 0)
17425		{
17426		  struct elf_link_hash_entry * eh;
17427
17428		  eh = elf_link_hash_lookup (elf_hash_table (info), name,
17429					     false, false, true);
17430		  if (eh != NULL
17431		      && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17432			 == ST_BRANCH_TO_THUMB)
17433		    {
17434		      dyn.d_un.d_val |= 1;
17435		      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17436		    }
17437		}
17438	      break;
17439	    }
17440	}
17441
17442      /* Fill in the first entry in the procedure linkage table.  */
17443      if (splt->size > 0 && htab->plt_header_size)
17444	{
17445	  const bfd_vma *plt0_entry;
17446	  bfd_vma got_address, plt_address, got_displacement;
17447
17448	  /* Calculate the addresses of the GOT and PLT.  */
17449	  got_address = sgot->output_section->vma + sgot->output_offset;
17450	  plt_address = splt->output_section->vma + splt->output_offset;
17451
17452	  if (htab->root.target_os == is_vxworks)
17453	    {
17454	      /* The VxWorks GOT is relocated by the dynamic linker.
17455		 Therefore, we must emit relocations rather than simply
17456		 computing the values now.  */
17457	      Elf_Internal_Rela rel;
17458
17459	      plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17460	      put_arm_insn (htab, output_bfd, plt0_entry[0],
17461			    splt->contents + 0);
17462	      put_arm_insn (htab, output_bfd, plt0_entry[1],
17463			    splt->contents + 4);
17464	      put_arm_insn (htab, output_bfd, plt0_entry[2],
17465			    splt->contents + 8);
17466	      bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17467
17468	      /* Generate a relocation for _GLOBAL_OFFSET_TABLE_.  */
17469	      rel.r_offset = plt_address + 12;
17470	      rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17471	      rel.r_addend = 0;
17472	      SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17473				     htab->srelplt2->contents);
17474	    }
17475	  else if (htab->root.target_os == is_nacl)
17476	    arm_nacl_put_plt0 (htab, output_bfd, splt,
17477			       got_address + 8 - (plt_address + 16));
17478	  else if (using_thumb_only (htab))
17479	    {
17480	      got_displacement = got_address - (plt_address + 12);
17481
17482	      plt0_entry = elf32_thumb2_plt0_entry;
17483	      put_arm_insn (htab, output_bfd, plt0_entry[0],
17484			    splt->contents + 0);
17485	      put_arm_insn (htab, output_bfd, plt0_entry[1],
17486			    splt->contents + 4);
17487	      put_arm_insn (htab, output_bfd, plt0_entry[2],
17488			    splt->contents + 8);
17489
17490	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17491	    }
17492	  else
17493	    {
17494	      got_displacement = got_address - (plt_address + 16);
17495
17496	      plt0_entry = elf32_arm_plt0_entry;
17497	      put_arm_insn (htab, output_bfd, plt0_entry[0],
17498			    splt->contents + 0);
17499	      put_arm_insn (htab, output_bfd, plt0_entry[1],
17500			    splt->contents + 4);
17501	      put_arm_insn (htab, output_bfd, plt0_entry[2],
17502			    splt->contents + 8);
17503	      put_arm_insn (htab, output_bfd, plt0_entry[3],
17504			    splt->contents + 12);
17505
17506#ifdef FOUR_WORD_PLT
17507	      /* The displacement value goes in the otherwise-unused
17508		 last word of the second entry.  */
17509	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17510#else
17511	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17512#endif
17513	    }
17514	}
17515
17516      /* UnixWare sets the entsize of .plt to 4, although that doesn't
17517	 really seem like the right value.  */
17518      if (splt->output_section->owner == output_bfd)
17519	elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17520
17521      if (htab->root.tlsdesc_plt)
17522	{
17523	  bfd_vma got_address
17524	    = sgot->output_section->vma + sgot->output_offset;
17525	  bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17526				    + htab->root.sgot->output_offset);
17527	  bfd_vma plt_address
17528	    = splt->output_section->vma + splt->output_offset;
17529
17530	  arm_put_trampoline (htab, output_bfd,
17531			      splt->contents + htab->root.tlsdesc_plt,
17532			      dl_tlsdesc_lazy_trampoline, 6);
17533
17534	  bfd_put_32 (output_bfd,
17535		      gotplt_address + htab->root.tlsdesc_got
17536		      - (plt_address + htab->root.tlsdesc_plt)
17537		      - dl_tlsdesc_lazy_trampoline[6],
17538		      splt->contents + htab->root.tlsdesc_plt + 24);
17539	  bfd_put_32 (output_bfd,
17540		      got_address - (plt_address + htab->root.tlsdesc_plt)
17541		      - dl_tlsdesc_lazy_trampoline[7],
17542		      splt->contents + htab->root.tlsdesc_plt + 24 + 4);
17543	}
17544
17545      if (htab->tls_trampoline)
17546	{
17547	  arm_put_trampoline (htab, output_bfd,
17548			      splt->contents + htab->tls_trampoline,
17549			      tls_trampoline, 3);
17550#ifdef FOUR_WORD_PLT
17551	  bfd_put_32 (output_bfd, 0x00000000,
17552		      splt->contents + htab->tls_trampoline + 12);
17553#endif
17554	}
17555
17556      if (htab->root.target_os == is_vxworks
17557	  && !bfd_link_pic (info)
17558	  && htab->root.splt->size > 0)
17559	{
17560	  /* Correct the .rel(a).plt.unloaded relocations.  They will have
17561	     incorrect symbol indexes.  */
17562	  int num_plts;
17563	  unsigned char *p;
17564
17565	  num_plts = ((htab->root.splt->size - htab->plt_header_size)
17566		      / htab->plt_entry_size);
17567	  p = htab->srelplt2->contents + RELOC_SIZE (htab);
17568
17569	  for (; num_plts; num_plts--)
17570	    {
17571	      Elf_Internal_Rela rel;
17572
17573	      SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17574	      rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17575	      SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17576	      p += RELOC_SIZE (htab);
17577
17578	      SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17579	      rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17580	      SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17581	      p += RELOC_SIZE (htab);
17582	    }
17583	}
17584    }
17585
17586  if (htab->root.target_os == is_nacl
17587      && htab->root.iplt != NULL
17588      && htab->root.iplt->size > 0)
17589    /* NaCl uses a special first entry in .iplt too.  */
17590    arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17591
17592  /* Fill in the first three entries in the global offset table.  */
17593  if (sgot)
17594    {
17595      if (sgot->size > 0)
17596	{
17597	  if (sdyn == NULL)
17598	    bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17599	  else
17600	    bfd_put_32 (output_bfd,
17601			sdyn->output_section->vma + sdyn->output_offset,
17602			sgot->contents);
17603	  bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17604	  bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17605	}
17606
17607      elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17608    }
17609
17610  /* At the very end of the .rofixup section is a pointer to the GOT.  */
17611  if (htab->fdpic_p && htab->srofixup != NULL)
17612    {
17613      struct elf_link_hash_entry *hgot = htab->root.hgot;
17614
17615      bfd_vma got_value = hgot->root.u.def.value
17616	+ hgot->root.u.def.section->output_section->vma
17617	+ hgot->root.u.def.section->output_offset;
17618
17619      arm_elf_add_rofixup (output_bfd, htab->srofixup, got_value);
17620
17621      /* Make sure we allocated and generated the same number of fixups.  */
17622      BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17623    }
17624
17625  return true;
17626}
17627
17628static bool
17629elf32_arm_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
17630{
17631  Elf_Internal_Ehdr * i_ehdrp;	/* ELF file header, internal form.  */
17632  struct elf32_arm_link_hash_table *globals;
17633  struct elf_segment_map *m;
17634
17635  if (!_bfd_elf_init_file_header (abfd, link_info))
17636    return false;
17637
17638  i_ehdrp = elf_elfheader (abfd);
17639
17640  if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17641    i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17642  i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17643
17644  if (link_info)
17645    {
17646      globals = elf32_arm_hash_table (link_info);
17647      if (globals != NULL && globals->byteswap_code)
17648	i_ehdrp->e_flags |= EF_ARM_BE8;
17649
17650      if (globals->fdpic_p)
17651	i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17652    }
17653
17654  if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17655      && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17656    {
17657      int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17658      if (abi == AEABI_VFP_args_vfp)
17659	i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17660      else
17661	i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17662    }
17663
17664  /* Scan segment to set p_flags attribute if it contains only sections with
17665     SHF_ARM_PURECODE flag.  */
17666  for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17667    {
17668      unsigned int j;
17669
17670      if (m->count == 0)
17671	continue;
17672      for (j = 0; j < m->count; j++)
17673	{
17674	  if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17675	    break;
17676	}
17677      if (j == m->count)
17678	{
17679	  m->p_flags = PF_X;
17680	  m->p_flags_valid = 1;
17681	}
17682    }
17683  return true;
17684}
17685
17686static enum elf_reloc_type_class
17687elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17688			    const asection *rel_sec ATTRIBUTE_UNUSED,
17689			    const Elf_Internal_Rela *rela)
17690{
17691  switch ((int) ELF32_R_TYPE (rela->r_info))
17692    {
17693    case R_ARM_RELATIVE:
17694      return reloc_class_relative;
17695    case R_ARM_JUMP_SLOT:
17696      return reloc_class_plt;
17697    case R_ARM_COPY:
17698      return reloc_class_copy;
17699    case R_ARM_IRELATIVE:
17700      return reloc_class_ifunc;
17701    default:
17702      return reloc_class_normal;
17703    }
17704}
17705
17706static void
17707arm_final_write_processing (bfd *abfd)
17708{
17709  bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17710}
17711
17712static bool
17713elf32_arm_final_write_processing (bfd *abfd)
17714{
17715  arm_final_write_processing (abfd);
17716  return _bfd_elf_final_write_processing (abfd);
17717}
17718
17719/* Return TRUE if this is an unwinding table entry.  */
17720
17721static bool
17722is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17723{
17724  return (startswith (name, ELF_STRING_ARM_unwind)
17725	  || startswith (name, ELF_STRING_ARM_unwind_once));
17726}
17727
17728
17729/* Set the type and flags for an ARM section.  We do this by
17730   the section name, which is a hack, but ought to work.  */
17731
17732static bool
17733elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17734{
17735  const char * name;
17736
17737  name = bfd_section_name (sec);
17738
17739  if (is_arm_elf_unwind_section_name (abfd, name))
17740    {
17741      hdr->sh_type = SHT_ARM_EXIDX;
17742      hdr->sh_flags |= SHF_LINK_ORDER;
17743    }
17744
17745  if (sec->flags & SEC_ELF_PURECODE)
17746    hdr->sh_flags |= SHF_ARM_PURECODE;
17747
17748  return true;
17749}
17750
17751/* Handle an ARM specific section when reading an object file.  This is
17752   called when bfd_section_from_shdr finds a section with an unknown
17753   type.  */
17754
17755static bool
17756elf32_arm_section_from_shdr (bfd *abfd,
17757			     Elf_Internal_Shdr * hdr,
17758			     const char *name,
17759			     int shindex)
17760{
17761  /* There ought to be a place to keep ELF backend specific flags, but
17762     at the moment there isn't one.  We just keep track of the
17763     sections by their name, instead.  Fortunately, the ABI gives
17764     names for all the ARM specific sections, so we will probably get
17765     away with this.  */
17766  switch (hdr->sh_type)
17767    {
17768    case SHT_ARM_EXIDX:
17769    case SHT_ARM_PREEMPTMAP:
17770    case SHT_ARM_ATTRIBUTES:
17771      break;
17772
17773    default:
17774      return false;
17775    }
17776
17777  if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
17778    return false;
17779
17780  return true;
17781}
17782
17783static _arm_elf_section_data *
17784get_arm_elf_section_data (asection * sec)
17785{
17786  if (sec && sec->owner && is_arm_elf (sec->owner))
17787    return elf32_arm_section_data (sec);
17788  else
17789    return NULL;
17790}
17791
17792typedef struct
17793{
17794  void *flaginfo;
17795  struct bfd_link_info *info;
17796  asection *sec;
17797  int sec_shndx;
17798  int (*func) (void *, const char *, Elf_Internal_Sym *,
17799	       asection *, struct elf_link_hash_entry *);
17800} output_arch_syminfo;
17801
17802enum map_symbol_type
17803{
17804  ARM_MAP_ARM,
17805  ARM_MAP_THUMB,
17806  ARM_MAP_DATA
17807};
17808
17809
17810/* Output a single mapping symbol.  */
17811
17812static bool
17813elf32_arm_output_map_sym (output_arch_syminfo *osi,
17814			  enum map_symbol_type type,
17815			  bfd_vma offset)
17816{
17817  static const char *names[3] = {"$a", "$t", "$d"};
17818  Elf_Internal_Sym sym;
17819
17820  sym.st_value = osi->sec->output_section->vma
17821		 + osi->sec->output_offset
17822		 + offset;
17823  sym.st_size = 0;
17824  sym.st_other = 0;
17825  sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
17826  sym.st_shndx = osi->sec_shndx;
17827  sym.st_target_internal = 0;
17828  elf32_arm_section_map_add (osi->sec, names[type][1], offset);
17829  return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
17830}
17831
17832/* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17833   IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt.  */
17834
17835static bool
17836elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
17837			    bool is_iplt_entry_p,
17838			    union gotplt_union *root_plt,
17839			    struct arm_plt_info *arm_plt)
17840{
17841  struct elf32_arm_link_hash_table *htab;
17842  bfd_vma addr, plt_header_size;
17843
17844  if (root_plt->offset == (bfd_vma) -1)
17845    return true;
17846
17847  htab = elf32_arm_hash_table (osi->info);
17848  if (htab == NULL)
17849    return false;
17850
17851  if (is_iplt_entry_p)
17852    {
17853      osi->sec = htab->root.iplt;
17854      plt_header_size = 0;
17855    }
17856  else
17857    {
17858      osi->sec = htab->root.splt;
17859      plt_header_size = htab->plt_header_size;
17860    }
17861  osi->sec_shndx = (_bfd_elf_section_from_bfd_section
17862		    (osi->info->output_bfd, osi->sec->output_section));
17863
17864  addr = root_plt->offset & -2;
17865  if (htab->root.target_os == is_vxworks)
17866    {
17867      if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17868	return false;
17869      if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
17870	return false;
17871      if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
17872	return false;
17873      if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
17874	return false;
17875    }
17876  else if (htab->root.target_os == is_nacl)
17877    {
17878      if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17879	return false;
17880    }
17881  else if (htab->fdpic_p)
17882    {
17883      enum map_symbol_type type = using_thumb_only (htab)
17884	? ARM_MAP_THUMB
17885	: ARM_MAP_ARM;
17886
17887      if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
17888	if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17889	  return false;
17890      if (!elf32_arm_output_map_sym (osi, type, addr))
17891	return false;
17892      if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
17893	return false;
17894      if (htab->plt_entry_size == 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry))
17895	if (!elf32_arm_output_map_sym (osi, type, addr + 24))
17896	  return false;
17897    }
17898  else if (using_thumb_only (htab))
17899    {
17900      if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
17901	return false;
17902    }
17903  else
17904    {
17905      bool thumb_stub_p;
17906
17907      thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
17908      if (thumb_stub_p)
17909	{
17910	  if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17911	    return false;
17912	}
17913#ifdef FOUR_WORD_PLT
17914      if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17915	return false;
17916      if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
17917	return false;
17918#else
17919      /* A three-word PLT with no Thumb thunk contains only Arm code,
17920	 so only need to output a mapping symbol for the first PLT entry and
17921	 entries with thumb thunks.  */
17922      if (thumb_stub_p || addr == plt_header_size)
17923	{
17924	  if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17925	    return false;
17926	}
17927#endif
17928    }
17929
17930  return true;
17931}
17932
17933/* Output mapping symbols for PLT entries associated with H.  */
17934
17935static bool
17936elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
17937{
17938  output_arch_syminfo *osi = (output_arch_syminfo *) inf;
17939  struct elf32_arm_link_hash_entry *eh;
17940
17941  if (h->root.type == bfd_link_hash_indirect)
17942    return true;
17943
17944  if (h->root.type == bfd_link_hash_warning)
17945    /* When warning symbols are created, they **replace** the "real"
17946       entry in the hash table, thus we never get to see the real
17947       symbol in a hash traversal.  So look at it now.  */
17948    h = (struct elf_link_hash_entry *) h->root.u.i.link;
17949
17950  eh = (struct elf32_arm_link_hash_entry *) h;
17951  return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
17952				     &h->plt, &eh->plt);
17953}
17954
17955/* Bind a veneered symbol to its veneer identified by its hash entry
17956   STUB_ENTRY.  The veneered location thus loose its symbol.  */
17957
17958static void
17959arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
17960{
17961  struct elf32_arm_link_hash_entry *hash = stub_entry->h;
17962
17963  BFD_ASSERT (hash);
17964  hash->root.root.u.def.section = stub_entry->stub_sec;
17965  hash->root.root.u.def.value = stub_entry->stub_offset;
17966  hash->root.size = stub_entry->stub_size;
17967}
17968
17969/* Output a single local symbol for a generated stub.  */
17970
17971static bool
17972elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
17973			   bfd_vma offset, bfd_vma size)
17974{
17975  Elf_Internal_Sym sym;
17976
17977  sym.st_value = osi->sec->output_section->vma
17978		 + osi->sec->output_offset
17979		 + offset;
17980  sym.st_size = size;
17981  sym.st_other = 0;
17982  sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
17983  sym.st_shndx = osi->sec_shndx;
17984  sym.st_target_internal = 0;
17985  return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
17986}
17987
17988static bool
17989arm_map_one_stub (struct bfd_hash_entry * gen_entry,
17990		  void * in_arg)
17991{
17992  struct elf32_arm_stub_hash_entry *stub_entry;
17993  asection *stub_sec;
17994  bfd_vma addr;
17995  char *stub_name;
17996  output_arch_syminfo *osi;
17997  const insn_sequence *template_sequence;
17998  enum stub_insn_type prev_type;
17999  int size;
18000  int i;
18001  enum map_symbol_type sym_type;
18002
18003  /* Massage our args to the form they really have.  */
18004  stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18005  osi = (output_arch_syminfo *) in_arg;
18006
18007  stub_sec = stub_entry->stub_sec;
18008
18009  /* Ensure this stub is attached to the current section being
18010     processed.  */
18011  if (stub_sec != osi->sec)
18012    return true;
18013
18014  addr = (bfd_vma) stub_entry->stub_offset;
18015  template_sequence = stub_entry->stub_template;
18016
18017  if (arm_stub_sym_claimed (stub_entry->stub_type))
18018    arm_stub_claim_sym (stub_entry);
18019  else
18020    {
18021      stub_name = stub_entry->output_name;
18022      switch (template_sequence[0].type)
18023	{
18024	case ARM_TYPE:
18025	  if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
18026					  stub_entry->stub_size))
18027	    return false;
18028	  break;
18029	case THUMB16_TYPE:
18030	case THUMB32_TYPE:
18031	  if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
18032					  stub_entry->stub_size))
18033	    return false;
18034	  break;
18035	default:
18036	  BFD_FAIL ();
18037	  return 0;
18038	}
18039    }
18040
18041  prev_type = DATA_TYPE;
18042  size = 0;
18043  for (i = 0; i < stub_entry->stub_template_size; i++)
18044    {
18045      switch (template_sequence[i].type)
18046	{
18047	case ARM_TYPE:
18048	  sym_type = ARM_MAP_ARM;
18049	  break;
18050
18051	case THUMB16_TYPE:
18052	case THUMB32_TYPE:
18053	  sym_type = ARM_MAP_THUMB;
18054	  break;
18055
18056	case DATA_TYPE:
18057	  sym_type = ARM_MAP_DATA;
18058	  break;
18059
18060	default:
18061	  BFD_FAIL ();
18062	  return false;
18063	}
18064
18065      if (template_sequence[i].type != prev_type)
18066	{
18067	  prev_type = template_sequence[i].type;
18068	  if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
18069	    return false;
18070	}
18071
18072      switch (template_sequence[i].type)
18073	{
18074	case ARM_TYPE:
18075	case THUMB32_TYPE:
18076	  size += 4;
18077	  break;
18078
18079	case THUMB16_TYPE:
18080	  size += 2;
18081	  break;
18082
18083	case DATA_TYPE:
18084	  size += 4;
18085	  break;
18086
18087	default:
18088	  BFD_FAIL ();
18089	  return false;
18090	}
18091    }
18092
18093  return true;
18094}
18095
18096/* Output mapping symbols for linker generated sections,
18097   and for those data-only sections that do not have a
18098   $d.  */
18099
18100static bool
18101elf32_arm_output_arch_local_syms (bfd *output_bfd,
18102				  struct bfd_link_info *info,
18103				  void *flaginfo,
18104				  int (*func) (void *, const char *,
18105					       Elf_Internal_Sym *,
18106					       asection *,
18107					       struct elf_link_hash_entry *))
18108{
18109  output_arch_syminfo osi;
18110  struct elf32_arm_link_hash_table *htab;
18111  bfd_vma offset;
18112  bfd_size_type size;
18113  bfd *input_bfd;
18114
18115  htab = elf32_arm_hash_table (info);
18116  if (htab == NULL)
18117    return false;
18118
18119  check_use_blx (htab);
18120
18121  osi.flaginfo = flaginfo;
18122  osi.info = info;
18123  osi.func = func;
18124
18125  /* Add a $d mapping symbol to data-only sections that
18126     don't have any mapping symbol.  This may result in (harmless) redundant
18127     mapping symbols.  */
18128  for (input_bfd = info->input_bfds;
18129       input_bfd != NULL;
18130       input_bfd = input_bfd->link.next)
18131    {
18132      if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
18133	for (osi.sec = input_bfd->sections;
18134	     osi.sec != NULL;
18135	     osi.sec = osi.sec->next)
18136	  {
18137	    if (osi.sec->output_section != NULL
18138		&& ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
18139		    != 0)
18140		&& (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
18141		   == SEC_HAS_CONTENTS
18142		&& get_arm_elf_section_data (osi.sec) != NULL
18143		&& get_arm_elf_section_data (osi.sec)->mapcount == 0
18144		&& osi.sec->size > 0
18145		&& (osi.sec->flags & SEC_EXCLUDE) == 0)
18146	      {
18147		osi.sec_shndx = _bfd_elf_section_from_bfd_section
18148		  (output_bfd, osi.sec->output_section);
18149		if (osi.sec_shndx != (int)SHN_BAD)
18150		  elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
18151	      }
18152	  }
18153    }
18154
18155  /* ARM->Thumb glue.  */
18156  if (htab->arm_glue_size > 0)
18157    {
18158      osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18159					ARM2THUMB_GLUE_SECTION_NAME);
18160
18161      osi.sec_shndx = _bfd_elf_section_from_bfd_section
18162	  (output_bfd, osi.sec->output_section);
18163      if (bfd_link_pic (info) || htab->root.is_relocatable_executable
18164	  || htab->pic_veneer)
18165	size = ARM2THUMB_PIC_GLUE_SIZE;
18166      else if (htab->use_blx)
18167	size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
18168      else
18169	size = ARM2THUMB_STATIC_GLUE_SIZE;
18170
18171      for (offset = 0; offset < htab->arm_glue_size; offset += size)
18172	{
18173	  elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
18174	  elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
18175	}
18176    }
18177
18178  /* Thumb->ARM glue.  */
18179  if (htab->thumb_glue_size > 0)
18180    {
18181      osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18182					THUMB2ARM_GLUE_SECTION_NAME);
18183
18184      osi.sec_shndx = _bfd_elf_section_from_bfd_section
18185	  (output_bfd, osi.sec->output_section);
18186      size = THUMB2ARM_GLUE_SIZE;
18187
18188      for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18189	{
18190	  elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18191	  elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18192	}
18193    }
18194
18195  /* ARMv4 BX veneers.  */
18196  if (htab->bx_glue_size > 0)
18197    {
18198      osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18199					ARM_BX_GLUE_SECTION_NAME);
18200
18201      osi.sec_shndx = _bfd_elf_section_from_bfd_section
18202	  (output_bfd, osi.sec->output_section);
18203
18204      elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18205    }
18206
18207  /* Long calls stubs.  */
18208  if (htab->stub_bfd && htab->stub_bfd->sections)
18209    {
18210      asection* stub_sec;
18211
18212      for (stub_sec = htab->stub_bfd->sections;
18213	   stub_sec != NULL;
18214	   stub_sec = stub_sec->next)
18215	{
18216	  /* Ignore non-stub sections.  */
18217	  if (!strstr (stub_sec->name, STUB_SUFFIX))
18218	    continue;
18219
18220	  osi.sec = stub_sec;
18221
18222	  osi.sec_shndx = _bfd_elf_section_from_bfd_section
18223	    (output_bfd, osi.sec->output_section);
18224
18225	  bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18226	}
18227    }
18228
18229  /* Finally, output mapping symbols for the PLT.  */
18230  if (htab->root.splt && htab->root.splt->size > 0)
18231    {
18232      osi.sec = htab->root.splt;
18233      osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18234		       (output_bfd, osi.sec->output_section));
18235
18236      /* Output mapping symbols for the plt header.  */
18237      if (htab->root.target_os == is_vxworks)
18238	{
18239	  /* VxWorks shared libraries have no PLT header.  */
18240	  if (!bfd_link_pic (info))
18241	    {
18242	      if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18243		return false;
18244	      if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18245		return false;
18246	    }
18247	}
18248      else if (htab->root.target_os == is_nacl)
18249	{
18250	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18251	    return false;
18252	}
18253      else if (using_thumb_only (htab) && !htab->fdpic_p)
18254	{
18255	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18256	    return false;
18257	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18258	    return false;
18259	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18260	    return false;
18261	}
18262      else if (!htab->fdpic_p)
18263	{
18264	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18265	    return false;
18266#ifndef FOUR_WORD_PLT
18267	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18268	    return false;
18269#endif
18270	}
18271    }
18272  if (htab->root.target_os == is_nacl
18273      && htab->root.iplt
18274      && htab->root.iplt->size > 0)
18275    {
18276      /* NaCl uses a special first entry in .iplt too.  */
18277      osi.sec = htab->root.iplt;
18278      osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18279		       (output_bfd, osi.sec->output_section));
18280      if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18281	return false;
18282    }
18283  if ((htab->root.splt && htab->root.splt->size > 0)
18284      || (htab->root.iplt && htab->root.iplt->size > 0))
18285    {
18286      elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18287      for (input_bfd = info->input_bfds;
18288	   input_bfd != NULL;
18289	   input_bfd = input_bfd->link.next)
18290	{
18291	  struct arm_local_iplt_info **local_iplt;
18292	  unsigned int i, num_syms;
18293
18294	  local_iplt = elf32_arm_local_iplt (input_bfd);
18295	  if (local_iplt != NULL)
18296	    {
18297	      num_syms = elf_symtab_hdr (input_bfd).sh_info;
18298	      if (num_syms > elf32_arm_num_entries (input_bfd))
18299		{
18300		  _bfd_error_handler (_("\
18301%pB: Number of symbols in input file has increased from %lu to %u\n"),
18302				      input_bfd,
18303				      (unsigned long) elf32_arm_num_entries (input_bfd),
18304				      num_syms);
18305		  return false;
18306		}
18307	      for (i = 0; i < num_syms; i++)
18308		if (local_iplt[i] != NULL
18309		    && !elf32_arm_output_plt_map_1 (&osi, true,
18310						    &local_iplt[i]->root,
18311						    &local_iplt[i]->arm))
18312		  return false;
18313	    }
18314	}
18315    }
18316  if (htab->root.tlsdesc_plt != 0)
18317    {
18318      /* Mapping symbols for the lazy tls trampoline.  */
18319      if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM,
18320				     htab->root.tlsdesc_plt))
18321	return false;
18322
18323      if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18324				     htab->root.tlsdesc_plt + 24))
18325	return false;
18326    }
18327  if (htab->tls_trampoline != 0)
18328    {
18329      /* Mapping symbols for the tls trampoline.  */
18330      if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18331	return false;
18332#ifdef FOUR_WORD_PLT
18333      if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18334				     htab->tls_trampoline + 12))
18335	return false;
18336#endif
18337    }
18338
18339  return true;
18340}
18341
18342/* Filter normal symbols of CMSE entry functions of ABFD to include in
18343   the import library.  All SYMCOUNT symbols of ABFD can be examined
18344   from their pointers in SYMS.  Pointers of symbols to keep should be
18345   stored continuously at the beginning of that array.
18346
18347   Returns the number of symbols to keep.  */
18348
18349static unsigned int
18350elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18351			       struct bfd_link_info *info,
18352			       asymbol **syms, long symcount)
18353{
18354  size_t maxnamelen;
18355  char *cmse_name;
18356  long src_count, dst_count = 0;
18357  struct elf32_arm_link_hash_table *htab;
18358
18359  htab = elf32_arm_hash_table (info);
18360  if (!htab->stub_bfd || !htab->stub_bfd->sections)
18361    symcount = 0;
18362
18363  maxnamelen = 128;
18364  cmse_name = (char *) bfd_malloc (maxnamelen);
18365  BFD_ASSERT (cmse_name);
18366
18367  for (src_count = 0; src_count < symcount; src_count++)
18368    {
18369      struct elf32_arm_link_hash_entry *cmse_hash;
18370      asymbol *sym;
18371      flagword flags;
18372      char *name;
18373      size_t namelen;
18374
18375      sym = syms[src_count];
18376      flags = sym->flags;
18377      name = (char *) bfd_asymbol_name (sym);
18378
18379      if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18380	continue;
18381      if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18382	continue;
18383
18384      namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18385      if (namelen > maxnamelen)
18386	{
18387	  cmse_name = (char *)
18388	    bfd_realloc (cmse_name, namelen);
18389	  maxnamelen = namelen;
18390	}
18391      snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18392      cmse_hash = (struct elf32_arm_link_hash_entry *)
18393	elf_link_hash_lookup (&(htab)->root, cmse_name, false, false, true);
18394
18395      if (!cmse_hash
18396	  || (cmse_hash->root.root.type != bfd_link_hash_defined
18397	      && cmse_hash->root.root.type != bfd_link_hash_defweak)
18398	  || cmse_hash->root.type != STT_FUNC)
18399	continue;
18400
18401      syms[dst_count++] = sym;
18402    }
18403  free (cmse_name);
18404
18405  syms[dst_count] = NULL;
18406
18407  return dst_count;
18408}
18409
18410/* Filter symbols of ABFD to include in the import library.  All
18411   SYMCOUNT symbols of ABFD can be examined from their pointers in
18412   SYMS.  Pointers of symbols to keep should be stored continuously at
18413   the beginning of that array.
18414
18415   Returns the number of symbols to keep.  */
18416
18417static unsigned int
18418elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18419				 struct bfd_link_info *info,
18420				 asymbol **syms, long symcount)
18421{
18422  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18423
18424  /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18425     Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18426     library to be a relocatable object file.  */
18427  BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18428  if (globals->cmse_implib)
18429    return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18430  else
18431    return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18432}
18433
18434/* Allocate target specific section data.  */
18435
18436static bool
18437elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18438{
18439  if (!sec->used_by_bfd)
18440    {
18441      _arm_elf_section_data *sdata;
18442      size_t amt = sizeof (*sdata);
18443
18444      sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18445      if (sdata == NULL)
18446	return false;
18447      sec->used_by_bfd = sdata;
18448    }
18449
18450  return _bfd_elf_new_section_hook (abfd, sec);
18451}
18452
18453
18454/* Used to order a list of mapping symbols by address.  */
18455
18456static int
18457elf32_arm_compare_mapping (const void * a, const void * b)
18458{
18459  const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18460  const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18461
18462  if (amap->vma > bmap->vma)
18463    return 1;
18464  else if (amap->vma < bmap->vma)
18465    return -1;
18466  else if (amap->type > bmap->type)
18467    /* Ensure results do not depend on the host qsort for objects with
18468       multiple mapping symbols at the same address by sorting on type
18469       after vma.  */
18470    return 1;
18471  else if (amap->type < bmap->type)
18472    return -1;
18473  else
18474    return 0;
18475}
18476
18477/* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified.  */
18478
18479static unsigned long
18480offset_prel31 (unsigned long addr, bfd_vma offset)
18481{
18482  return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18483}
18484
18485/* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18486   relocations.  */
18487
18488static void
18489copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18490{
18491  unsigned long first_word = bfd_get_32 (output_bfd, from);
18492  unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18493
18494  /* High bit of first word is supposed to be zero.  */
18495  if ((first_word & 0x80000000ul) == 0)
18496    first_word = offset_prel31 (first_word, offset);
18497
18498  /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18499     (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry.  */
18500  if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18501    second_word = offset_prel31 (second_word, offset);
18502
18503  bfd_put_32 (output_bfd, first_word, to);
18504  bfd_put_32 (output_bfd, second_word, to + 4);
18505}
18506
18507/* Data for make_branch_to_a8_stub().  */
18508
18509struct a8_branch_to_stub_data
18510{
18511  asection *writing_section;
18512  bfd_byte *contents;
18513};
18514
18515
18516/* Helper to insert branches to Cortex-A8 erratum stubs in the right
18517   places for a particular section.  */
18518
18519static bool
18520make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18521		       void *in_arg)
18522{
18523  struct elf32_arm_stub_hash_entry *stub_entry;
18524  struct a8_branch_to_stub_data *data;
18525  bfd_byte *contents;
18526  unsigned long branch_insn;
18527  bfd_vma veneered_insn_loc, veneer_entry_loc;
18528  bfd_signed_vma branch_offset;
18529  bfd *abfd;
18530  unsigned int loc;
18531
18532  stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18533  data = (struct a8_branch_to_stub_data *) in_arg;
18534
18535  if (stub_entry->target_section != data->writing_section
18536      || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18537    return true;
18538
18539  contents = data->contents;
18540
18541  /* We use target_section as Cortex-A8 erratum workaround stubs are only
18542     generated when both source and target are in the same section.  */
18543  veneered_insn_loc = stub_entry->target_section->output_section->vma
18544		      + stub_entry->target_section->output_offset
18545		      + stub_entry->source_value;
18546
18547  veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18548		     + stub_entry->stub_sec->output_offset
18549		     + stub_entry->stub_offset;
18550
18551  if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18552    veneered_insn_loc &= ~3u;
18553
18554  branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18555
18556  abfd = stub_entry->target_section->owner;
18557  loc = stub_entry->source_value;
18558
18559  /* We attempt to avoid this condition by setting stubs_always_after_branch
18560     in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18561     This check is just to be on the safe side...  */
18562  if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18563    {
18564      _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18565			    "allocated in unsafe location"), abfd);
18566      return false;
18567    }
18568
18569  switch (stub_entry->stub_type)
18570    {
18571    case arm_stub_a8_veneer_b:
18572    case arm_stub_a8_veneer_b_cond:
18573      branch_insn = 0xf0009000;
18574      goto jump24;
18575
18576    case arm_stub_a8_veneer_blx:
18577      branch_insn = 0xf000e800;
18578      goto jump24;
18579
18580    case arm_stub_a8_veneer_bl:
18581      {
18582	unsigned int i1, j1, i2, j2, s;
18583
18584	branch_insn = 0xf000d000;
18585
18586      jump24:
18587	if (branch_offset < -16777216 || branch_offset > 16777214)
18588	  {
18589	    /* There's not much we can do apart from complain if this
18590	       happens.  */
18591	    _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18592				  "of range (input file too large)"), abfd);
18593	    return false;
18594	  }
18595
18596	/* i1 = not(j1 eor s), so:
18597	   not i1 = j1 eor s
18598	   j1 = (not i1) eor s.  */
18599
18600	branch_insn |= (branch_offset >> 1) & 0x7ff;
18601	branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18602	i2 = (branch_offset >> 22) & 1;
18603	i1 = (branch_offset >> 23) & 1;
18604	s = (branch_offset >> 24) & 1;
18605	j1 = (!i1) ^ s;
18606	j2 = (!i2) ^ s;
18607	branch_insn |= j2 << 11;
18608	branch_insn |= j1 << 13;
18609	branch_insn |= s << 26;
18610      }
18611      break;
18612
18613    default:
18614      BFD_FAIL ();
18615      return false;
18616    }
18617
18618  bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18619  bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18620
18621  return true;
18622}
18623
18624/* Beginning of stm32l4xx work-around.  */
18625
18626/* Functions encoding instructions necessary for the emission of the
18627   fix-stm32l4xx-629360.
18628   Encoding is extracted from the
18629   ARM (C) Architecture Reference Manual
18630   ARMv7-A and ARMv7-R edition
18631   ARM DDI 0406C.b (ID072512).  */
18632
18633static inline bfd_vma
18634create_instruction_branch_absolute (int branch_offset)
18635{
18636  /* A8.8.18 B (A8-334)
18637     B target_address (Encoding T4).  */
18638  /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii.  */
18639  /* jump offset is:  S:I1:I2:imm10:imm11:0.  */
18640  /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S).  */
18641
18642  int s = ((branch_offset & 0x1000000) >> 24);
18643  int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18644  int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18645
18646  if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18647    BFD_ASSERT (0 && "Error: branch out of range.  Cannot create branch.");
18648
18649  bfd_vma patched_inst = 0xf0009000
18650    | s << 26 /* S.  */
18651    | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10.  */
18652    | j1 << 13 /* J1.  */
18653    | j2 << 11 /* J2.  */
18654    | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11.  */
18655
18656  return patched_inst;
18657}
18658
18659static inline bfd_vma
18660create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18661{
18662  /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18663     LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2).  */
18664  bfd_vma patched_inst = 0xe8900000
18665    | (/*W=*/wback << 21)
18666    | (base_reg << 16)
18667    | (reg_mask & 0x0000ffff);
18668
18669  return patched_inst;
18670}
18671
18672static inline bfd_vma
18673create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18674{
18675  /* A8.8.60 LDMDB/LDMEA (A8-402)
18676     LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1).  */
18677  bfd_vma patched_inst = 0xe9100000
18678    | (/*W=*/wback << 21)
18679    | (base_reg << 16)
18680    | (reg_mask & 0x0000ffff);
18681
18682  return patched_inst;
18683}
18684
18685static inline bfd_vma
18686create_instruction_mov (int target_reg, int source_reg)
18687{
18688  /* A8.8.103 MOV (register) (A8-486)
18689     MOV Rd, Rm (Encoding T1).  */
18690  bfd_vma patched_inst = 0x4600
18691    | (target_reg & 0x7)
18692    | ((target_reg & 0x8) >> 3) << 7
18693    | (source_reg << 3);
18694
18695  return patched_inst;
18696}
18697
18698static inline bfd_vma
18699create_instruction_sub (int target_reg, int source_reg, int value)
18700{
18701  /* A8.8.221 SUB (immediate) (A8-708)
18702     SUB Rd, Rn, #value (Encoding T3).  */
18703  bfd_vma patched_inst = 0xf1a00000
18704    | (target_reg << 8)
18705    | (source_reg << 16)
18706    | (/*S=*/0 << 20)
18707    | ((value & 0x800) >> 11) << 26
18708    | ((value & 0x700) >>  8) << 12
18709    | (value & 0x0ff);
18710
18711  return patched_inst;
18712}
18713
18714static inline bfd_vma
18715create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18716			   int first_reg)
18717{
18718  /* A8.8.332 VLDM (A8-922)
18719     VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2).  */
18720  bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18721    | (/*W=*/wback << 21)
18722    | (base_reg << 16)
18723    | (num_words & 0x000000ff)
18724    | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18725    | (first_reg & 0x00000001) << 22;
18726
18727  return patched_inst;
18728}
18729
18730static inline bfd_vma
18731create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18732			   int first_reg)
18733{
18734  /* A8.8.332 VLDM (A8-922)
18735     VLMD{MODE} Rn!, {} (Encoding T1 or T2).  */
18736  bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18737    | (base_reg << 16)
18738    | (num_words & 0x000000ff)
18739    | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18740    | (first_reg & 0x00000001) << 22;
18741
18742  return patched_inst;
18743}
18744
18745static inline bfd_vma
18746create_instruction_udf_w (int value)
18747{
18748  /* A8.8.247 UDF (A8-758)
18749     Undefined (Encoding T2).  */
18750  bfd_vma patched_inst = 0xf7f0a000
18751    | (value & 0x00000fff)
18752    | (value & 0x000f0000) << 16;
18753
18754  return patched_inst;
18755}
18756
18757static inline bfd_vma
18758create_instruction_udf (int value)
18759{
18760  /* A8.8.247 UDF (A8-758)
18761     Undefined (Encoding T1).  */
18762  bfd_vma patched_inst = 0xde00
18763    | (value & 0xff);
18764
18765  return patched_inst;
18766}
18767
18768/* Functions writing an instruction in memory, returning the next
18769   memory position to write to.  */
18770
18771static inline bfd_byte *
18772push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18773		    bfd * output_bfd, bfd_byte *pt, insn32 insn)
18774{
18775  put_thumb2_insn (htab, output_bfd, insn, pt);
18776  return pt + 4;
18777}
18778
18779static inline bfd_byte *
18780push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
18781		    bfd * output_bfd, bfd_byte *pt, insn32 insn)
18782{
18783  put_thumb_insn (htab, output_bfd, insn, pt);
18784  return pt + 2;
18785}
18786
18787/* Function filling up a region in memory with T1 and T2 UDFs taking
18788   care of alignment.  */
18789
18790static bfd_byte *
18791stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
18792			 bfd *			 output_bfd,
18793			 const bfd_byte * const	 base_stub_contents,
18794			 bfd_byte * const	 from_stub_contents,
18795			 const bfd_byte * const	 end_stub_contents)
18796{
18797  bfd_byte *current_stub_contents = from_stub_contents;
18798
18799  /* Fill the remaining of the stub with deterministic contents : UDF
18800     instructions.
18801     Check if realignment is needed on modulo 4 frontier using T1, to
18802     further use T2.  */
18803  if ((current_stub_contents < end_stub_contents)
18804      && !((current_stub_contents - base_stub_contents) % 2)
18805      && ((current_stub_contents - base_stub_contents) % 4))
18806    current_stub_contents =
18807      push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18808			  create_instruction_udf (0));
18809
18810  for (; current_stub_contents < end_stub_contents;)
18811    current_stub_contents =
18812      push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18813			  create_instruction_udf_w (0));
18814
18815  return current_stub_contents;
18816}
18817
18818/* Functions writing the stream of instructions equivalent to the
18819   derived sequence for ldmia, ldmdb, vldm respectively.  */
18820
18821static void
18822stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
18823				       bfd * output_bfd,
18824				       const insn32 initial_insn,
18825				       const bfd_byte *const initial_insn_addr,
18826				       bfd_byte *const base_stub_contents)
18827{
18828  int wback = (initial_insn & 0x00200000) >> 21;
18829  int ri, rn = (initial_insn & 0x000F0000) >> 16;
18830  int insn_all_registers = initial_insn & 0x0000ffff;
18831  int insn_low_registers, insn_high_registers;
18832  int usable_register_mask;
18833  int nb_registers = elf32_arm_popcount (insn_all_registers);
18834  int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18835  int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18836  bfd_byte *current_stub_contents = base_stub_contents;
18837
18838  BFD_ASSERT (is_thumb2_ldmia (initial_insn));
18839
18840  /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18841     smaller than 8 registers load sequences that do not cause the
18842     hardware issue.  */
18843  if (nb_registers <= 8)
18844    {
18845      /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}.  */
18846      current_stub_contents =
18847	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18848			    initial_insn);
18849
18850      /* B initial_insn_addr+4.  */
18851      if (!restore_pc)
18852	current_stub_contents =
18853	  push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18854			      create_instruction_branch_absolute
18855			      (initial_insn_addr - current_stub_contents));
18856
18857      /* Fill the remaining of the stub with deterministic contents.  */
18858      current_stub_contents =
18859	stm32l4xx_fill_stub_udf (htab, output_bfd,
18860				 base_stub_contents, current_stub_contents,
18861				 base_stub_contents +
18862				 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18863
18864      return;
18865    }
18866
18867  /* - reg_list[13] == 0.  */
18868  BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
18869
18870  /* - reg_list[14] & reg_list[15] != 1.  */
18871  BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18872
18873  /* - if (wback==1) reg_list[rn] == 0.  */
18874  BFD_ASSERT (!wback || !restore_rn);
18875
18876  /* - nb_registers > 8.  */
18877  BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18878
18879  /* At this point, LDMxx initial insn loads between 9 and 14 registers.  */
18880
18881  /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18882    - One with the 7 lowest registers (register mask 0x007F)
18883      This LDM will finally contain between 2 and 7 registers
18884    - One with the 7 highest registers (register mask 0xDF80)
18885      This ldm will finally contain between 2 and 7 registers.  */
18886  insn_low_registers = insn_all_registers & 0x007F;
18887  insn_high_registers = insn_all_registers & 0xDF80;
18888
18889  /* A spare register may be needed during this veneer to temporarily
18890     handle the base register.  This register will be restored with the
18891     last LDM operation.
18892     The usable register may be any general purpose register (that
18893     excludes PC, SP, LR : register mask is 0x1FFF).  */
18894  usable_register_mask = 0x1FFF;
18895
18896  /* Generate the stub function.  */
18897  if (wback)
18898    {
18899      /* LDMIA Rn!, {R-low-register-list} : (Encoding T2).  */
18900      current_stub_contents =
18901	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18902			    create_instruction_ldmia
18903			    (rn, /*wback=*/1, insn_low_registers));
18904
18905      /* LDMIA Rn!, {R-high-register-list} : (Encoding T2).  */
18906      current_stub_contents =
18907	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18908			    create_instruction_ldmia
18909			    (rn, /*wback=*/1, insn_high_registers));
18910      if (!restore_pc)
18911	{
18912	  /* B initial_insn_addr+4.  */
18913	  current_stub_contents =
18914	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18915				create_instruction_branch_absolute
18916				(initial_insn_addr - current_stub_contents));
18917       }
18918    }
18919  else /* if (!wback).  */
18920    {
18921      ri = rn;
18922
18923      /* If Rn is not part of the high-register-list, move it there.  */
18924      if (!(insn_high_registers & (1 << rn)))
18925	{
18926	  /* Choose a Ri in the high-register-list that will be restored.  */
18927	  ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18928
18929	  /* MOV Ri, Rn.  */
18930	  current_stub_contents =
18931	    push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18932				create_instruction_mov (ri, rn));
18933	}
18934
18935      /* LDMIA Ri!, {R-low-register-list} : (Encoding T2).  */
18936      current_stub_contents =
18937	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18938			    create_instruction_ldmia
18939			    (ri, /*wback=*/1, insn_low_registers));
18940
18941      /* LDMIA Ri, {R-high-register-list} : (Encoding T2).  */
18942      current_stub_contents =
18943	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18944			    create_instruction_ldmia
18945			    (ri, /*wback=*/0, insn_high_registers));
18946
18947      if (!restore_pc)
18948	{
18949	  /* B initial_insn_addr+4.  */
18950	  current_stub_contents =
18951	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18952				create_instruction_branch_absolute
18953				(initial_insn_addr - current_stub_contents));
18954	}
18955    }
18956
18957  /* Fill the remaining of the stub with deterministic contents.  */
18958  current_stub_contents =
18959    stm32l4xx_fill_stub_udf (htab, output_bfd,
18960			     base_stub_contents, current_stub_contents,
18961			     base_stub_contents +
18962			     STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18963}
18964
18965static void
18966stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
18967				       bfd * output_bfd,
18968				       const insn32 initial_insn,
18969				       const bfd_byte *const initial_insn_addr,
18970				       bfd_byte *const base_stub_contents)
18971{
18972  int wback = (initial_insn & 0x00200000) >> 21;
18973  int ri, rn = (initial_insn & 0x000f0000) >> 16;
18974  int insn_all_registers = initial_insn & 0x0000ffff;
18975  int insn_low_registers, insn_high_registers;
18976  int usable_register_mask;
18977  int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18978  int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18979  int nb_registers = elf32_arm_popcount (insn_all_registers);
18980  bfd_byte *current_stub_contents = base_stub_contents;
18981
18982  BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
18983
18984  /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18985     smaller than 8 registers load sequences that do not cause the
18986     hardware issue.  */
18987  if (nb_registers <= 8)
18988    {
18989      /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}.  */
18990      current_stub_contents =
18991	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18992			    initial_insn);
18993
18994      /* B initial_insn_addr+4.  */
18995      current_stub_contents =
18996	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18997			    create_instruction_branch_absolute
18998			    (initial_insn_addr - current_stub_contents));
18999
19000      /* Fill the remaining of the stub with deterministic contents.  */
19001      current_stub_contents =
19002	stm32l4xx_fill_stub_udf (htab, output_bfd,
19003				 base_stub_contents, current_stub_contents,
19004				 base_stub_contents +
19005				 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19006
19007      return;
19008    }
19009
19010  /* - reg_list[13] == 0.  */
19011  BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
19012
19013  /* - reg_list[14] & reg_list[15] != 1.  */
19014  BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
19015
19016  /* - if (wback==1) reg_list[rn] == 0.  */
19017  BFD_ASSERT (!wback || !restore_rn);
19018
19019  /* - nb_registers > 8.  */
19020  BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
19021
19022  /* At this point, LDMxx initial insn loads between 9 and 14 registers.  */
19023
19024  /* In the following algorithm, we split this wide LDM using 2 LDM insn:
19025    - One with the 7 lowest registers (register mask 0x007F)
19026      This LDM will finally contain between 2 and 7 registers
19027    - One with the 7 highest registers (register mask 0xDF80)
19028      This ldm will finally contain between 2 and 7 registers.  */
19029  insn_low_registers = insn_all_registers & 0x007F;
19030  insn_high_registers = insn_all_registers & 0xDF80;
19031
19032  /* A spare register may be needed during this veneer to temporarily
19033     handle the base register.  This register will be restored with
19034     the last LDM operation.
19035     The usable register may be any general purpose register (that excludes
19036     PC, SP, LR : register mask is 0x1FFF).  */
19037  usable_register_mask = 0x1FFF;
19038
19039  /* Generate the stub function.  */
19040  if (!wback && !restore_pc && !restore_rn)
19041    {
19042      /* Choose a Ri in the low-register-list that will be restored.  */
19043      ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19044
19045      /* MOV Ri, Rn.  */
19046      current_stub_contents =
19047	push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19048			    create_instruction_mov (ri, rn));
19049
19050      /* LDMDB Ri!, {R-high-register-list}.  */
19051      current_stub_contents =
19052	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19053			    create_instruction_ldmdb
19054			    (ri, /*wback=*/1, insn_high_registers));
19055
19056      /* LDMDB Ri, {R-low-register-list}.  */
19057      current_stub_contents =
19058	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19059			    create_instruction_ldmdb
19060			    (ri, /*wback=*/0, insn_low_registers));
19061
19062      /* B initial_insn_addr+4.  */
19063      current_stub_contents =
19064	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19065			    create_instruction_branch_absolute
19066			    (initial_insn_addr - current_stub_contents));
19067    }
19068  else if (wback && !restore_pc && !restore_rn)
19069    {
19070      /* LDMDB Rn!, {R-high-register-list}.  */
19071      current_stub_contents =
19072	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19073			    create_instruction_ldmdb
19074			    (rn, /*wback=*/1, insn_high_registers));
19075
19076      /* LDMDB Rn!, {R-low-register-list}.  */
19077      current_stub_contents =
19078	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19079			    create_instruction_ldmdb
19080			    (rn, /*wback=*/1, insn_low_registers));
19081
19082      /* B initial_insn_addr+4.  */
19083      current_stub_contents =
19084	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19085			    create_instruction_branch_absolute
19086			    (initial_insn_addr - current_stub_contents));
19087    }
19088  else if (!wback && restore_pc && !restore_rn)
19089    {
19090      /* Choose a Ri in the high-register-list that will be restored.  */
19091      ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19092
19093      /* SUB Ri, Rn, #(4*nb_registers).  */
19094      current_stub_contents =
19095	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19096			    create_instruction_sub (ri, rn, (4 * nb_registers)));
19097
19098      /* LDMIA Ri!, {R-low-register-list}.  */
19099      current_stub_contents =
19100	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19101			    create_instruction_ldmia
19102			    (ri, /*wback=*/1, insn_low_registers));
19103
19104      /* LDMIA Ri, {R-high-register-list}.  */
19105      current_stub_contents =
19106	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19107			    create_instruction_ldmia
19108			    (ri, /*wback=*/0, insn_high_registers));
19109    }
19110  else if (wback && restore_pc && !restore_rn)
19111    {
19112      /* Choose a Ri in the high-register-list that will be restored.  */
19113      ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19114
19115      /* SUB Rn, Rn, #(4*nb_registers)  */
19116      current_stub_contents =
19117	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19118			    create_instruction_sub (rn, rn, (4 * nb_registers)));
19119
19120      /* MOV Ri, Rn.  */
19121      current_stub_contents =
19122	push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19123			    create_instruction_mov (ri, rn));
19124
19125      /* LDMIA Ri!, {R-low-register-list}.  */
19126      current_stub_contents =
19127	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19128			    create_instruction_ldmia
19129			    (ri, /*wback=*/1, insn_low_registers));
19130
19131      /* LDMIA Ri, {R-high-register-list}.  */
19132      current_stub_contents =
19133	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19134			    create_instruction_ldmia
19135			    (ri, /*wback=*/0, insn_high_registers));
19136    }
19137  else if (!wback && !restore_pc && restore_rn)
19138    {
19139      ri = rn;
19140      if (!(insn_low_registers & (1 << rn)))
19141	{
19142	  /* Choose a Ri in the low-register-list that will be restored.  */
19143	  ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19144
19145	  /* MOV Ri, Rn.  */
19146	  current_stub_contents =
19147	    push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19148				create_instruction_mov (ri, rn));
19149	}
19150
19151      /* LDMDB Ri!, {R-high-register-list}.  */
19152      current_stub_contents =
19153	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19154			    create_instruction_ldmdb
19155			    (ri, /*wback=*/1, insn_high_registers));
19156
19157      /* LDMDB Ri, {R-low-register-list}.  */
19158      current_stub_contents =
19159	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19160			    create_instruction_ldmdb
19161			    (ri, /*wback=*/0, insn_low_registers));
19162
19163      /* B initial_insn_addr+4.  */
19164      current_stub_contents =
19165	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19166			    create_instruction_branch_absolute
19167			    (initial_insn_addr - current_stub_contents));
19168    }
19169  else if (!wback && restore_pc && restore_rn)
19170    {
19171      ri = rn;
19172      if (!(insn_high_registers & (1 << rn)))
19173	{
19174	  /* Choose a Ri in the high-register-list that will be restored.  */
19175	  ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19176	}
19177
19178      /* SUB Ri, Rn, #(4*nb_registers).  */
19179      current_stub_contents =
19180	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19181			    create_instruction_sub (ri, rn, (4 * nb_registers)));
19182
19183      /* LDMIA Ri!, {R-low-register-list}.  */
19184      current_stub_contents =
19185	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19186			    create_instruction_ldmia
19187			    (ri, /*wback=*/1, insn_low_registers));
19188
19189      /* LDMIA Ri, {R-high-register-list}.  */
19190      current_stub_contents =
19191	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19192			    create_instruction_ldmia
19193			    (ri, /*wback=*/0, insn_high_registers));
19194    }
19195  else if (wback && restore_rn)
19196    {
19197      /* The assembler should not have accepted to encode this.  */
19198      BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19199	"undefined behavior.\n");
19200    }
19201
19202  /* Fill the remaining of the stub with deterministic contents.  */
19203  current_stub_contents =
19204    stm32l4xx_fill_stub_udf (htab, output_bfd,
19205			     base_stub_contents, current_stub_contents,
19206			     base_stub_contents +
19207			     STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19208
19209}
19210
19211static void
19212stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19213				      bfd * output_bfd,
19214				      const insn32 initial_insn,
19215				      const bfd_byte *const initial_insn_addr,
19216				      bfd_byte *const base_stub_contents)
19217{
19218  int num_words = initial_insn & 0xff;
19219  bfd_byte *current_stub_contents = base_stub_contents;
19220
19221  BFD_ASSERT (is_thumb2_vldm (initial_insn));
19222
19223  /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19224     smaller than 8 words load sequences that do not cause the
19225     hardware issue.  */
19226  if (num_words <= 8)
19227    {
19228      /* Untouched instruction.  */
19229      current_stub_contents =
19230	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19231			    initial_insn);
19232
19233      /* B initial_insn_addr+4.  */
19234      current_stub_contents =
19235	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19236			    create_instruction_branch_absolute
19237			    (initial_insn_addr - current_stub_contents));
19238    }
19239  else
19240    {
19241      bool is_dp = /* DP encoding.  */
19242	(initial_insn & 0xfe100f00) == 0xec100b00;
19243      bool is_ia_nobang = /* (IA without !).  */
19244	(((initial_insn << 7) >> 28) & 0xd) == 0x4;
19245      bool is_ia_bang = /* (IA with !) - includes VPOP.  */
19246	(((initial_insn << 7) >> 28) & 0xd) == 0x5;
19247      bool is_db_bang = /* (DB with !).  */
19248	(((initial_insn << 7) >> 28) & 0xd) == 0x9;
19249      int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19250      /* d = UInt (Vd:D);.  */
19251      int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19252	| (((unsigned int)initial_insn << 9) >> 31);
19253
19254      /* Compute the number of 8-words chunks needed to split.  */
19255      int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19256      int chunk;
19257
19258      /* The test coverage has been done assuming the following
19259	 hypothesis that exactly one of the previous is_ predicates is
19260	 true.  */
19261      BFD_ASSERT (    (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19262		  && !(is_ia_nobang & is_ia_bang & is_db_bang));
19263
19264      /* We treat the cutting of the words in one pass for all
19265	 cases, then we emit the adjustments:
19266
19267	 vldm rx, {...}
19268	 -> vldm rx!, {8_words_or_less} for each needed 8_word
19269	 -> sub rx, rx, #size (list)
19270
19271	 vldm rx!, {...}
19272	 -> vldm rx!, {8_words_or_less} for each needed 8_word
19273	 This also handles vpop instruction (when rx is sp)
19274
19275	 vldmd rx!, {...}
19276	 -> vldmb rx!, {8_words_or_less} for each needed 8_word.  */
19277      for (chunk = 0; chunk < chunks; ++chunk)
19278	{
19279	  bfd_vma new_insn = 0;
19280
19281	  if (is_ia_nobang || is_ia_bang)
19282	    {
19283	      new_insn = create_instruction_vldmia
19284		(base_reg,
19285		 is_dp,
19286		 /*wback= .  */1,
19287		 chunks - (chunk + 1) ?
19288		 8 : num_words - chunk * 8,
19289		 first_reg + chunk * 8);
19290	    }
19291	  else if (is_db_bang)
19292	    {
19293	      new_insn = create_instruction_vldmdb
19294		(base_reg,
19295		 is_dp,
19296		 chunks - (chunk + 1) ?
19297		 8 : num_words - chunk * 8,
19298		 first_reg + chunk * 8);
19299	    }
19300
19301	  if (new_insn)
19302	    current_stub_contents =
19303	      push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19304				  new_insn);
19305	}
19306
19307      /* Only this case requires the base register compensation
19308	 subtract.  */
19309      if (is_ia_nobang)
19310	{
19311	  current_stub_contents =
19312	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19313				create_instruction_sub
19314				(base_reg, base_reg, 4*num_words));
19315	}
19316
19317      /* B initial_insn_addr+4.  */
19318      current_stub_contents =
19319	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19320			    create_instruction_branch_absolute
19321			    (initial_insn_addr - current_stub_contents));
19322    }
19323
19324  /* Fill the remaining of the stub with deterministic contents.  */
19325  current_stub_contents =
19326    stm32l4xx_fill_stub_udf (htab, output_bfd,
19327			     base_stub_contents, current_stub_contents,
19328			     base_stub_contents +
19329			     STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19330}
19331
19332static void
19333stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19334				 bfd * output_bfd,
19335				 const insn32 wrong_insn,
19336				 const bfd_byte *const wrong_insn_addr,
19337				 bfd_byte *const stub_contents)
19338{
19339  if (is_thumb2_ldmia (wrong_insn))
19340    stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19341					   wrong_insn, wrong_insn_addr,
19342					   stub_contents);
19343  else if (is_thumb2_ldmdb (wrong_insn))
19344    stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19345					   wrong_insn, wrong_insn_addr,
19346					   stub_contents);
19347  else if (is_thumb2_vldm (wrong_insn))
19348    stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19349					  wrong_insn, wrong_insn_addr,
19350					  stub_contents);
19351}
19352
19353/* End of stm32l4xx work-around.  */
19354
19355
19356/* Do code byteswapping.  Return FALSE afterwards so that the section is
19357   written out as normal.  */
19358
19359static bool
19360elf32_arm_write_section (bfd *output_bfd,
19361			 struct bfd_link_info *link_info,
19362			 asection *sec,
19363			 bfd_byte *contents)
19364{
19365  unsigned int mapcount, errcount;
19366  _arm_elf_section_data *arm_data;
19367  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19368  elf32_arm_section_map *map;
19369  elf32_vfp11_erratum_list *errnode;
19370  elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19371  bfd_vma ptr;
19372  bfd_vma end;
19373  bfd_vma offset = sec->output_section->vma + sec->output_offset;
19374  bfd_byte tmp;
19375  unsigned int i;
19376
19377  if (globals == NULL)
19378    return false;
19379
19380  /* If this section has not been allocated an _arm_elf_section_data
19381     structure then we cannot record anything.  */
19382  arm_data = get_arm_elf_section_data (sec);
19383  if (arm_data == NULL)
19384    return false;
19385
19386  mapcount = arm_data->mapcount;
19387  map = arm_data->map;
19388  errcount = arm_data->erratumcount;
19389
19390  if (errcount != 0)
19391    {
19392      unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19393
19394      for (errnode = arm_data->erratumlist; errnode != 0;
19395	   errnode = errnode->next)
19396	{
19397	  bfd_vma target = errnode->vma - offset;
19398
19399	  switch (errnode->type)
19400	    {
19401	    case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19402	      {
19403		bfd_vma branch_to_veneer;
19404		/* Original condition code of instruction, plus bit mask for
19405		   ARM B instruction.  */
19406		unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19407				  | 0x0a000000;
19408
19409		/* The instruction is before the label.  */
19410		target -= 4;
19411
19412		/* Above offset included in -4 below.  */
19413		branch_to_veneer = errnode->u.b.veneer->vma
19414				   - errnode->vma - 4;
19415
19416		if ((signed) branch_to_veneer < -(1 << 25)
19417		    || (signed) branch_to_veneer >= (1 << 25))
19418		  _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19419					"range"), output_bfd);
19420
19421		insn |= (branch_to_veneer >> 2) & 0xffffff;
19422		contents[endianflip ^ target] = insn & 0xff;
19423		contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19424		contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19425		contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19426	      }
19427	      break;
19428
19429	    case VFP11_ERRATUM_ARM_VENEER:
19430	      {
19431		bfd_vma branch_from_veneer;
19432		unsigned int insn;
19433
19434		/* Take size of veneer into account.  */
19435		branch_from_veneer = errnode->u.v.branch->vma
19436				     - errnode->vma - 12;
19437
19438		if ((signed) branch_from_veneer < -(1 << 25)
19439		    || (signed) branch_from_veneer >= (1 << 25))
19440		  _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19441					"range"), output_bfd);
19442
19443		/* Original instruction.  */
19444		insn = errnode->u.v.branch->u.b.vfp_insn;
19445		contents[endianflip ^ target] = insn & 0xff;
19446		contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19447		contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19448		contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19449
19450		/* Branch back to insn after original insn.  */
19451		insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19452		contents[endianflip ^ (target + 4)] = insn & 0xff;
19453		contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19454		contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19455		contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19456	      }
19457	      break;
19458
19459	    default:
19460	      abort ();
19461	    }
19462	}
19463    }
19464
19465  if (arm_data->stm32l4xx_erratumcount != 0)
19466    {
19467      for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19468	   stm32l4xx_errnode != 0;
19469	   stm32l4xx_errnode = stm32l4xx_errnode->next)
19470	{
19471	  bfd_vma target = stm32l4xx_errnode->vma - offset;
19472
19473	  switch (stm32l4xx_errnode->type)
19474	    {
19475	    case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19476	      {
19477		unsigned int insn;
19478		bfd_vma branch_to_veneer =
19479		  stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19480
19481		if ((signed) branch_to_veneer < -(1 << 24)
19482		    || (signed) branch_to_veneer >= (1 << 24))
19483		  {
19484		    bfd_vma out_of_range =
19485		      ((signed) branch_to_veneer < -(1 << 24)) ?
19486		      - branch_to_veneer - (1 << 24) :
19487		      ((signed) branch_to_veneer >= (1 << 24)) ?
19488		      branch_to_veneer - (1 << 24) : 0;
19489
19490		    _bfd_error_handler
19491		      (_("%pB(%#" PRIx64 "): error: "
19492			 "cannot create STM32L4XX veneer; "
19493			 "jump out of range by %" PRId64 " bytes; "
19494			 "cannot encode branch instruction"),
19495		       output_bfd,
19496		       (uint64_t) (stm32l4xx_errnode->vma - 4),
19497		       (int64_t) out_of_range);
19498		    continue;
19499		  }
19500
19501		insn = create_instruction_branch_absolute
19502		  (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19503
19504		/* The instruction is before the label.  */
19505		target -= 4;
19506
19507		put_thumb2_insn (globals, output_bfd,
19508				 (bfd_vma) insn, contents + target);
19509	      }
19510	      break;
19511
19512	    case STM32L4XX_ERRATUM_VENEER:
19513	      {
19514		bfd_byte * veneer;
19515		bfd_byte * veneer_r;
19516		unsigned int insn;
19517
19518		veneer = contents + target;
19519		veneer_r = veneer
19520		  + stm32l4xx_errnode->u.b.veneer->vma
19521		  - stm32l4xx_errnode->vma - 4;
19522
19523		if ((signed) (veneer_r - veneer -
19524			      STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19525			      STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19526			      STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19527			      STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19528		    || (signed) (veneer_r - veneer) >= (1 << 24))
19529		  {
19530		    _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19531					  "veneer"), output_bfd);
19532		     continue;
19533		  }
19534
19535		/* Original instruction.  */
19536		insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19537
19538		stm32l4xx_create_replacing_stub
19539		  (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19540	      }
19541	      break;
19542
19543	    default:
19544	      abort ();
19545	    }
19546	}
19547    }
19548
19549  if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19550    {
19551      arm_unwind_table_edit *edit_node
19552	= arm_data->u.exidx.unwind_edit_list;
19553      /* Now, sec->size is the size of the section we will write.  The original
19554	 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19555	 markers) was sec->rawsize.  (This isn't the case if we perform no
19556	 edits, then rawsize will be zero and we should use size).  */
19557      bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19558      unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19559      unsigned int in_index, out_index;
19560      bfd_vma add_to_offsets = 0;
19561
19562      if (edited_contents == NULL)
19563	return false;
19564      for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19565	{
19566	  if (edit_node)
19567	    {
19568	      unsigned int edit_index = edit_node->index;
19569
19570	      if (in_index < edit_index && in_index * 8 < input_size)
19571		{
19572		  copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19573				    contents + in_index * 8, add_to_offsets);
19574		  out_index++;
19575		  in_index++;
19576		}
19577	      else if (in_index == edit_index
19578		       || (in_index * 8 >= input_size
19579			   && edit_index == UINT_MAX))
19580		{
19581		  switch (edit_node->type)
19582		    {
19583		    case DELETE_EXIDX_ENTRY:
19584		      in_index++;
19585		      add_to_offsets += 8;
19586		      break;
19587
19588		    case INSERT_EXIDX_CANTUNWIND_AT_END:
19589		      {
19590			asection *text_sec = edit_node->linked_section;
19591			bfd_vma text_offset = text_sec->output_section->vma
19592					      + text_sec->output_offset
19593					      + text_sec->size;
19594			bfd_vma exidx_offset = offset + out_index * 8;
19595			unsigned long prel31_offset;
19596
19597			/* Note: this is meant to be equivalent to an
19598			   R_ARM_PREL31 relocation.  These synthetic
19599			   EXIDX_CANTUNWIND markers are not relocated by the
19600			   usual BFD method.  */
19601			prel31_offset = (text_offset - exidx_offset)
19602					& 0x7ffffffful;
19603			if (bfd_link_relocatable (link_info))
19604			  {
19605			    /* Here relocation for new EXIDX_CANTUNWIND is
19606			       created, so there is no need to
19607			       adjust offset by hand.  */
19608			    prel31_offset = text_sec->output_offset
19609					    + text_sec->size;
19610			  }
19611
19612			/* First address we can't unwind.  */
19613			bfd_put_32 (output_bfd, prel31_offset,
19614				    &edited_contents[out_index * 8]);
19615
19616			/* Code for EXIDX_CANTUNWIND.  */
19617			bfd_put_32 (output_bfd, 0x1,
19618				    &edited_contents[out_index * 8 + 4]);
19619
19620			out_index++;
19621			add_to_offsets -= 8;
19622		      }
19623		      break;
19624		    }
19625
19626		  edit_node = edit_node->next;
19627		}
19628	    }
19629	  else
19630	    {
19631	      /* No more edits, copy remaining entries verbatim.  */
19632	      copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19633				contents + in_index * 8, add_to_offsets);
19634	      out_index++;
19635	      in_index++;
19636	    }
19637	}
19638
19639      if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19640	bfd_set_section_contents (output_bfd, sec->output_section,
19641				  edited_contents,
19642				  (file_ptr) sec->output_offset, sec->size);
19643
19644      return true;
19645    }
19646
19647  /* Fix code to point to Cortex-A8 erratum stubs.  */
19648  if (globals->fix_cortex_a8)
19649    {
19650      struct a8_branch_to_stub_data data;
19651
19652      data.writing_section = sec;
19653      data.contents = contents;
19654
19655      bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19656			 & data);
19657    }
19658
19659  if (mapcount == 0)
19660    return false;
19661
19662  if (globals->byteswap_code)
19663    {
19664      qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19665
19666      ptr = map[0].vma;
19667      for (i = 0; i < mapcount; i++)
19668	{
19669	  if (i == mapcount - 1)
19670	    end = sec->size;
19671	  else
19672	    end = map[i + 1].vma;
19673
19674	  switch (map[i].type)
19675	    {
19676	    case 'a':
19677	      /* Byte swap code words.  */
19678	      while (ptr + 3 < end)
19679		{
19680		  tmp = contents[ptr];
19681		  contents[ptr] = contents[ptr + 3];
19682		  contents[ptr + 3] = tmp;
19683		  tmp = contents[ptr + 1];
19684		  contents[ptr + 1] = contents[ptr + 2];
19685		  contents[ptr + 2] = tmp;
19686		  ptr += 4;
19687		}
19688	      break;
19689
19690	    case 't':
19691	      /* Byte swap code halfwords.  */
19692	      while (ptr + 1 < end)
19693		{
19694		  tmp = contents[ptr];
19695		  contents[ptr] = contents[ptr + 1];
19696		  contents[ptr + 1] = tmp;
19697		  ptr += 2;
19698		}
19699	      break;
19700
19701	    case 'd':
19702	      /* Leave data alone.  */
19703	      break;
19704	    }
19705	  ptr = end;
19706	}
19707    }
19708
19709  free (map);
19710  arm_data->mapcount = -1;
19711  arm_data->mapsize = 0;
19712  arm_data->map = NULL;
19713
19714  return false;
19715}
19716
19717/* Mangle thumb function symbols as we read them in.  */
19718
19719static bool
19720elf32_arm_swap_symbol_in (bfd * abfd,
19721			  const void *psrc,
19722			  const void *pshn,
19723			  Elf_Internal_Sym *dst)
19724{
19725  if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19726    return false;
19727  dst->st_target_internal = 0;
19728
19729  /* New EABI objects mark thumb function symbols by setting the low bit of
19730     the address.  */
19731  if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19732      || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19733    {
19734      if (dst->st_value & 1)
19735	{
19736	  dst->st_value &= ~(bfd_vma) 1;
19737	  ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19738				   ST_BRANCH_TO_THUMB);
19739	}
19740      else
19741	ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19742    }
19743  else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19744    {
19745      dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19746      ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19747    }
19748  else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19749    ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19750  else
19751    ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19752
19753  return true;
19754}
19755
19756
19757/* Mangle thumb function symbols as we write them out.  */
19758
19759static void
19760elf32_arm_swap_symbol_out (bfd *abfd,
19761			   const Elf_Internal_Sym *src,
19762			   void *cdst,
19763			   void *shndx)
19764{
19765  Elf_Internal_Sym newsym;
19766
19767  /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19768     of the address set, as per the new EABI.  We do this unconditionally
19769     because objcopy does not set the elf header flags until after
19770     it writes out the symbol table.  */
19771  if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19772    {
19773      newsym = *src;
19774      if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19775	newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19776      if (newsym.st_shndx != SHN_UNDEF)
19777	{
19778	  /* Do this only for defined symbols. At link type, the static
19779	     linker will simulate the work of dynamic linker of resolving
19780	     symbols and will carry over the thumbness of found symbols to
19781	     the output symbol table. It's not clear how it happens, but
19782	     the thumbness of undefined symbols can well be different at
19783	     runtime, and writing '1' for them will be confusing for users
19784	     and possibly for dynamic linker itself.
19785	  */
19786	  newsym.st_value |= 1;
19787	}
19788
19789      src = &newsym;
19790    }
19791  bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
19792}
19793
19794/* Add the PT_ARM_EXIDX program header.  */
19795
19796static bool
19797elf32_arm_modify_segment_map (bfd *abfd,
19798			      struct bfd_link_info *info ATTRIBUTE_UNUSED)
19799{
19800  struct elf_segment_map *m;
19801  asection *sec;
19802
19803  sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19804  if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19805    {
19806      /* If there is already a PT_ARM_EXIDX header, then we do not
19807	 want to add another one.  This situation arises when running
19808	 "strip"; the input binary already has the header.  */
19809      m = elf_seg_map (abfd);
19810      while (m && m->p_type != PT_ARM_EXIDX)
19811	m = m->next;
19812      if (!m)
19813	{
19814	  m = (struct elf_segment_map *)
19815	      bfd_zalloc (abfd, sizeof (struct elf_segment_map));
19816	  if (m == NULL)
19817	    return false;
19818	  m->p_type = PT_ARM_EXIDX;
19819	  m->count = 1;
19820	  m->sections[0] = sec;
19821
19822	  m->next = elf_seg_map (abfd);
19823	  elf_seg_map (abfd) = m;
19824	}
19825    }
19826
19827  return true;
19828}
19829
19830/* We may add a PT_ARM_EXIDX program header.  */
19831
19832static int
19833elf32_arm_additional_program_headers (bfd *abfd,
19834				      struct bfd_link_info *info ATTRIBUTE_UNUSED)
19835{
19836  asection *sec;
19837
19838  sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19839  if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19840    return 1;
19841  else
19842    return 0;
19843}
19844
19845/* Hook called by the linker routine which adds symbols from an object
19846   file.  */
19847
19848static bool
19849elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
19850			   Elf_Internal_Sym *sym, const char **namep,
19851			   flagword *flagsp, asection **secp, bfd_vma *valp)
19852{
19853  if (elf32_arm_hash_table (info) == NULL)
19854    return false;
19855
19856  if (elf32_arm_hash_table (info)->root.target_os == is_vxworks
19857      && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
19858				       flagsp, secp, valp))
19859    return false;
19860
19861  return true;
19862}
19863
19864/* We use this to override swap_symbol_in and swap_symbol_out.  */
19865const struct elf_size_info elf32_arm_size_info =
19866{
19867  sizeof (Elf32_External_Ehdr),
19868  sizeof (Elf32_External_Phdr),
19869  sizeof (Elf32_External_Shdr),
19870  sizeof (Elf32_External_Rel),
19871  sizeof (Elf32_External_Rela),
19872  sizeof (Elf32_External_Sym),
19873  sizeof (Elf32_External_Dyn),
19874  sizeof (Elf_External_Note),
19875  4,
19876  1,
19877  32, 2,
19878  ELFCLASS32, EV_CURRENT,
19879  bfd_elf32_write_out_phdrs,
19880  bfd_elf32_write_shdrs_and_ehdr,
19881  bfd_elf32_checksum_contents,
19882  bfd_elf32_write_relocs,
19883  elf32_arm_swap_symbol_in,
19884  elf32_arm_swap_symbol_out,
19885  bfd_elf32_slurp_reloc_table,
19886  bfd_elf32_slurp_symbol_table,
19887  bfd_elf32_swap_dyn_in,
19888  bfd_elf32_swap_dyn_out,
19889  bfd_elf32_swap_reloc_in,
19890  bfd_elf32_swap_reloc_out,
19891  bfd_elf32_swap_reloca_in,
19892  bfd_elf32_swap_reloca_out
19893};
19894
19895static bfd_vma
19896read_code32 (const bfd *abfd, const bfd_byte *addr)
19897{
19898  /* V7 BE8 code is always little endian.  */
19899  if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19900    return bfd_getl32 (addr);
19901
19902  return bfd_get_32 (abfd, addr);
19903}
19904
19905static bfd_vma
19906read_code16 (const bfd *abfd, const bfd_byte *addr)
19907{
19908  /* V7 BE8 code is always little endian.  */
19909  if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19910    return bfd_getl16 (addr);
19911
19912  return bfd_get_16 (abfd, addr);
19913}
19914
19915/* Return size of plt0 entry starting at ADDR
19916   or (bfd_vma) -1 if size can not be determined.  */
19917
19918static bfd_vma
19919elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
19920{
19921  bfd_vma first_word;
19922  bfd_vma plt0_size;
19923
19924  first_word = read_code32 (abfd, addr);
19925
19926  if (first_word == elf32_arm_plt0_entry[0])
19927    plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
19928  else if (first_word == elf32_thumb2_plt0_entry[0])
19929    plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
19930  else
19931    /* We don't yet handle this PLT format.  */
19932    return (bfd_vma) -1;
19933
19934  return plt0_size;
19935}
19936
19937/* Return size of plt entry starting at offset OFFSET
19938   of plt section located at address START
19939   or (bfd_vma) -1 if size can not be determined.  */
19940
19941static bfd_vma
19942elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
19943{
19944  bfd_vma first_insn;
19945  bfd_vma plt_size = 0;
19946  const bfd_byte *addr = start + offset;
19947
19948  /* PLT entry size if fixed on Thumb-only platforms.  */
19949  if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
19950      return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
19951
19952  /* Respect Thumb stub if necessary.  */
19953  if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
19954    {
19955      plt_size += 2 * ARRAY_SIZE (elf32_arm_plt_thumb_stub);
19956    }
19957
19958  /* Strip immediate from first add.  */
19959  first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
19960
19961#ifdef FOUR_WORD_PLT
19962  if (first_insn == elf32_arm_plt_entry[0])
19963    plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
19964#else
19965  if (first_insn == elf32_arm_plt_entry_long[0])
19966    plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
19967  else if (first_insn == elf32_arm_plt_entry_short[0])
19968    plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
19969#endif
19970  else
19971    /* We don't yet handle this PLT format.  */
19972    return (bfd_vma) -1;
19973
19974  return plt_size;
19975}
19976
19977/* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab.  */
19978
19979static long
19980elf32_arm_get_synthetic_symtab (bfd *abfd,
19981			       long symcount ATTRIBUTE_UNUSED,
19982			       asymbol **syms ATTRIBUTE_UNUSED,
19983			       long dynsymcount,
19984			       asymbol **dynsyms,
19985			       asymbol **ret)
19986{
19987  asection *relplt;
19988  asymbol *s;
19989  arelent *p;
19990  long count, i, n;
19991  size_t size;
19992  Elf_Internal_Shdr *hdr;
19993  char *names;
19994  asection *plt;
19995  bfd_vma offset;
19996  bfd_byte *data;
19997
19998  *ret = NULL;
19999
20000  if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
20001    return 0;
20002
20003  if (dynsymcount <= 0)
20004    return 0;
20005
20006  relplt = bfd_get_section_by_name (abfd, ".rel.plt");
20007  if (relplt == NULL)
20008    return 0;
20009
20010  hdr = &elf_section_data (relplt)->this_hdr;
20011  if (hdr->sh_link != elf_dynsymtab (abfd)
20012      || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
20013    return 0;
20014
20015  plt = bfd_get_section_by_name (abfd, ".plt");
20016  if (plt == NULL)
20017    return 0;
20018
20019  if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, true))
20020    return -1;
20021
20022  data = plt->contents;
20023  if (data == NULL)
20024    {
20025      if (!bfd_get_full_section_contents (abfd, (asection *) plt, &data) || data == NULL)
20026	return -1;
20027      bfd_cache_section_contents ((asection *) plt, data);
20028    }
20029
20030  count = relplt->size / hdr->sh_entsize;
20031  size = count * sizeof (asymbol);
20032  p = relplt->relocation;
20033  for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20034    {
20035      size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
20036      if (p->addend != 0)
20037	size += sizeof ("+0x") - 1 + 8;
20038    }
20039
20040  s = *ret = (asymbol *) bfd_malloc (size);
20041  if (s == NULL)
20042    return -1;
20043
20044  offset = elf32_arm_plt0_size (abfd, data);
20045  if (offset == (bfd_vma) -1)
20046    return -1;
20047
20048  names = (char *) (s + count);
20049  p = relplt->relocation;
20050  n = 0;
20051  for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20052    {
20053      size_t len;
20054
20055      bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
20056      if (plt_size == (bfd_vma) -1)
20057	break;
20058
20059      *s = **p->sym_ptr_ptr;
20060      /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set.  Since
20061	 we are defining a symbol, ensure one of them is set.  */
20062      if ((s->flags & BSF_LOCAL) == 0)
20063	s->flags |= BSF_GLOBAL;
20064      s->flags |= BSF_SYNTHETIC;
20065      s->section = plt;
20066      s->value = offset;
20067      s->name = names;
20068      s->udata.p = NULL;
20069      len = strlen ((*p->sym_ptr_ptr)->name);
20070      memcpy (names, (*p->sym_ptr_ptr)->name, len);
20071      names += len;
20072      if (p->addend != 0)
20073	{
20074	  char buf[30], *a;
20075
20076	  memcpy (names, "+0x", sizeof ("+0x") - 1);
20077	  names += sizeof ("+0x") - 1;
20078	  bfd_sprintf_vma (abfd, buf, p->addend);
20079	  for (a = buf; *a == '0'; ++a)
20080	    ;
20081	  len = strlen (a);
20082	  memcpy (names, a, len);
20083	  names += len;
20084	}
20085      memcpy (names, "@plt", sizeof ("@plt"));
20086      names += sizeof ("@plt");
20087      ++s, ++n;
20088      offset += plt_size;
20089    }
20090
20091  return n;
20092}
20093
20094static bool
20095elf32_arm_section_flags (const Elf_Internal_Shdr *hdr)
20096{
20097  if (hdr->sh_flags & SHF_ARM_PURECODE)
20098    hdr->bfd_section->flags |= SEC_ELF_PURECODE;
20099  return true;
20100}
20101
20102static flagword
20103elf32_arm_lookup_section_flags (char *flag_name)
20104{
20105  if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
20106    return SHF_ARM_PURECODE;
20107
20108  return SEC_NO_FLAGS;
20109}
20110
20111static unsigned int
20112elf32_arm_count_additional_relocs (asection *sec)
20113{
20114  struct _arm_elf_section_data *arm_data;
20115  arm_data = get_arm_elf_section_data (sec);
20116
20117  return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
20118}
20119
20120/* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20121   has a type >= SHT_LOOS.  Returns TRUE if these fields were initialised
20122   FALSE otherwise.  ISECTION is the best guess matching section from the
20123   input bfd IBFD, but it might be NULL.  */
20124
20125static bool
20126elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
20127				       bfd *obfd ATTRIBUTE_UNUSED,
20128				       const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
20129				       Elf_Internal_Shdr *osection)
20130{
20131  switch (osection->sh_type)
20132    {
20133    case SHT_ARM_EXIDX:
20134      {
20135	Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
20136	Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
20137	unsigned i = 0;
20138
20139	osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
20140	osection->sh_info = 0;
20141
20142	/* The sh_link field must be set to the text section associated with
20143	   this index section.  Unfortunately the ARM EHABI does not specify
20144	   exactly how to determine this association.  Our caller does try
20145	   to match up OSECTION with its corresponding input section however
20146	   so that is a good first guess.  */
20147	if (isection != NULL
20148	    && osection->bfd_section != NULL
20149	    && isection->bfd_section != NULL
20150	    && isection->bfd_section->output_section != NULL
20151	    && isection->bfd_section->output_section == osection->bfd_section
20152	    && iheaders != NULL
20153	    && isection->sh_link > 0
20154	    && isection->sh_link < elf_numsections (ibfd)
20155	    && iheaders[isection->sh_link]->bfd_section != NULL
20156	    && iheaders[isection->sh_link]->bfd_section->output_section != NULL
20157	    )
20158	  {
20159	    for (i = elf_numsections (obfd); i-- > 0;)
20160	      if (oheaders[i]->bfd_section
20161		  == iheaders[isection->sh_link]->bfd_section->output_section)
20162		break;
20163	  }
20164
20165	if (i == 0)
20166	  {
20167	    /* Failing that we have to find a matching section ourselves.  If
20168	       we had the output section name available we could compare that
20169	       with input section names.  Unfortunately we don't.  So instead
20170	       we use a simple heuristic and look for the nearest executable
20171	       section before this one.  */
20172	    for (i = elf_numsections (obfd); i-- > 0;)
20173	      if (oheaders[i] == osection)
20174		break;
20175	    if (i == 0)
20176	      break;
20177
20178	    while (i-- > 0)
20179	      if (oheaders[i]->sh_type == SHT_PROGBITS
20180		  && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
20181		  == (SHF_ALLOC | SHF_EXECINSTR))
20182		break;
20183	  }
20184
20185	if (i)
20186	  {
20187	    osection->sh_link = i;
20188	    /* If the text section was part of a group
20189	       then the index section should be too.  */
20190	    if (oheaders[i]->sh_flags & SHF_GROUP)
20191	      osection->sh_flags |= SHF_GROUP;
20192	    return true;
20193	  }
20194      }
20195      break;
20196
20197    case SHT_ARM_PREEMPTMAP:
20198      osection->sh_flags = SHF_ALLOC;
20199      break;
20200
20201    case SHT_ARM_ATTRIBUTES:
20202    case SHT_ARM_DEBUGOVERLAY:
20203    case SHT_ARM_OVERLAYSECTION:
20204    default:
20205      break;
20206    }
20207
20208  return false;
20209}
20210
20211/* Returns TRUE if NAME is an ARM mapping symbol.
20212   Traditionally the symbols $a, $d and $t have been used.
20213   The ARM ELF standard also defines $x (for A64 code).  It also allows a
20214   period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20215   Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20216   not support them here.  $t.x indicates the start of ThumbEE instructions.  */
20217
20218static bool
20219is_arm_mapping_symbol (const char * name)
20220{
20221  return name != NULL /* Paranoia.  */
20222    && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20223			 the mapping symbols could have acquired a prefix.
20224			 We do not support this here, since such symbols no
20225			 longer conform to the ARM ELF ABI.  */
20226    && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20227    && (name[2] == 0 || name[2] == '.');
20228  /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20229     any characters that follow the period are legal characters for the body
20230     of a symbol's name.  For now we just assume that this is the case.  */
20231}
20232
20233/* Make sure that mapping symbols in object files are not removed via the
20234   "strip --strip-unneeded" tool.  These symbols are needed in order to
20235   correctly generate interworking veneers, and for byte swapping code
20236   regions.  Once an object file has been linked, it is safe to remove the
20237   symbols as they will no longer be needed.  */
20238
20239static void
20240elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20241{
20242  if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20243      && sym->section != bfd_abs_section_ptr
20244      && is_arm_mapping_symbol (sym->name))
20245    sym->flags |= BSF_KEEP;
20246}
20247
20248#undef  elf_backend_copy_special_section_fields
20249#define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20250
20251#define ELF_ARCH			bfd_arch_arm
20252#define ELF_TARGET_ID			ARM_ELF_DATA
20253#define ELF_MACHINE_CODE		EM_ARM
20254#ifdef __QNXTARGET__
20255#define ELF_MAXPAGESIZE			0x1000
20256#else
20257#define ELF_MAXPAGESIZE			0x10000
20258#endif
20259#define ELF_COMMONPAGESIZE		0x1000
20260
20261#define bfd_elf32_mkobject			elf32_arm_mkobject
20262
20263#define bfd_elf32_bfd_copy_private_bfd_data	elf32_arm_copy_private_bfd_data
20264#define bfd_elf32_bfd_merge_private_bfd_data	elf32_arm_merge_private_bfd_data
20265#define bfd_elf32_bfd_set_private_flags		elf32_arm_set_private_flags
20266#define bfd_elf32_bfd_print_private_bfd_data	elf32_arm_print_private_bfd_data
20267#define bfd_elf32_bfd_link_hash_table_create	elf32_arm_link_hash_table_create
20268#define bfd_elf32_bfd_reloc_type_lookup		elf32_arm_reloc_type_lookup
20269#define bfd_elf32_bfd_reloc_name_lookup		elf32_arm_reloc_name_lookup
20270#define bfd_elf32_find_inliner_info		elf32_arm_find_inliner_info
20271#define bfd_elf32_new_section_hook		elf32_arm_new_section_hook
20272#define bfd_elf32_bfd_is_target_special_symbol	elf32_arm_is_target_special_symbol
20273#define bfd_elf32_bfd_final_link		elf32_arm_final_link
20274#define bfd_elf32_get_synthetic_symtab	elf32_arm_get_synthetic_symtab
20275
20276#define elf_backend_get_symbol_type		elf32_arm_get_symbol_type
20277#define elf_backend_maybe_function_sym		elf32_arm_maybe_function_sym
20278#define elf_backend_gc_mark_hook		elf32_arm_gc_mark_hook
20279#define elf_backend_gc_mark_extra_sections	elf32_arm_gc_mark_extra_sections
20280#define elf_backend_check_relocs		elf32_arm_check_relocs
20281#define elf_backend_update_relocs		elf32_arm_update_relocs
20282#define elf_backend_relocate_section		elf32_arm_relocate_section
20283#define elf_backend_write_section		elf32_arm_write_section
20284#define elf_backend_adjust_dynamic_symbol	elf32_arm_adjust_dynamic_symbol
20285#define elf_backend_create_dynamic_sections	elf32_arm_create_dynamic_sections
20286#define elf_backend_finish_dynamic_symbol	elf32_arm_finish_dynamic_symbol
20287#define elf_backend_finish_dynamic_sections	elf32_arm_finish_dynamic_sections
20288#define elf_backend_size_dynamic_sections	elf32_arm_size_dynamic_sections
20289#define elf_backend_always_size_sections	elf32_arm_always_size_sections
20290#define elf_backend_init_index_section		_bfd_elf_init_2_index_sections
20291#define elf_backend_init_file_header		elf32_arm_init_file_header
20292#define elf_backend_reloc_type_class		elf32_arm_reloc_type_class
20293#define elf_backend_object_p			elf32_arm_object_p
20294#define elf_backend_fake_sections		elf32_arm_fake_sections
20295#define elf_backend_section_from_shdr		elf32_arm_section_from_shdr
20296#define elf_backend_final_write_processing	elf32_arm_final_write_processing
20297#define elf_backend_copy_indirect_symbol	elf32_arm_copy_indirect_symbol
20298#define elf_backend_size_info			elf32_arm_size_info
20299#define elf_backend_modify_segment_map		elf32_arm_modify_segment_map
20300#define elf_backend_additional_program_headers	elf32_arm_additional_program_headers
20301#define elf_backend_output_arch_local_syms	elf32_arm_output_arch_local_syms
20302#define elf_backend_filter_implib_symbols	elf32_arm_filter_implib_symbols
20303#define elf_backend_begin_write_processing	elf32_arm_begin_write_processing
20304#define elf_backend_add_symbol_hook		elf32_arm_add_symbol_hook
20305#define elf_backend_count_additional_relocs	elf32_arm_count_additional_relocs
20306#define elf_backend_symbol_processing		elf32_arm_backend_symbol_processing
20307
20308#define elf_backend_can_refcount       1
20309#define elf_backend_can_gc_sections    1
20310#define elf_backend_plt_readonly       1
20311#define elf_backend_want_got_plt       1
20312#define elf_backend_want_plt_sym       0
20313#define elf_backend_want_dynrelro      1
20314#define elf_backend_may_use_rel_p      1
20315#define elf_backend_may_use_rela_p     0
20316#define elf_backend_default_use_rela_p 0
20317#define elf_backend_dtrel_excludes_plt 1
20318
20319#define elf_backend_got_header_size	12
20320#define elf_backend_extern_protected_data 0
20321
20322#undef	elf_backend_obj_attrs_vendor
20323#define elf_backend_obj_attrs_vendor		"aeabi"
20324#undef	elf_backend_obj_attrs_section
20325#define elf_backend_obj_attrs_section		".ARM.attributes"
20326#undef	elf_backend_obj_attrs_arg_type
20327#define elf_backend_obj_attrs_arg_type		elf32_arm_obj_attrs_arg_type
20328#undef	elf_backend_obj_attrs_section_type
20329#define elf_backend_obj_attrs_section_type	SHT_ARM_ATTRIBUTES
20330#define elf_backend_obj_attrs_order		elf32_arm_obj_attrs_order
20331#define elf_backend_obj_attrs_handle_unknown	elf32_arm_obj_attrs_handle_unknown
20332
20333#undef	elf_backend_section_flags
20334#define elf_backend_section_flags		elf32_arm_section_flags
20335#undef	elf_backend_lookup_section_flags_hook
20336#define elf_backend_lookup_section_flags_hook	elf32_arm_lookup_section_flags
20337
20338#define elf_backend_linux_prpsinfo32_ugid16	true
20339
20340#include "elf32-target.h"
20341
20342/* Native Client targets.  */
20343
20344#undef	TARGET_LITTLE_SYM
20345#define TARGET_LITTLE_SYM		arm_elf32_nacl_le_vec
20346#undef	TARGET_LITTLE_NAME
20347#define TARGET_LITTLE_NAME		"elf32-littlearm-nacl"
20348#undef	TARGET_BIG_SYM
20349#define TARGET_BIG_SYM			arm_elf32_nacl_be_vec
20350#undef	TARGET_BIG_NAME
20351#define TARGET_BIG_NAME			"elf32-bigarm-nacl"
20352
20353/* Like elf32_arm_link_hash_table_create -- but overrides
20354   appropriately for NaCl.  */
20355
20356static struct bfd_link_hash_table *
20357elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20358{
20359  struct bfd_link_hash_table *ret;
20360
20361  ret = elf32_arm_link_hash_table_create (abfd);
20362  if (ret)
20363    {
20364      struct elf32_arm_link_hash_table *htab
20365	= (struct elf32_arm_link_hash_table *) ret;
20366
20367      htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20368      htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20369    }
20370  return ret;
20371}
20372
20373/* Since NaCl doesn't use the ARM-specific unwind format, we don't
20374   really need to use elf32_arm_modify_segment_map.  But we do it
20375   anyway just to reduce gratuitous differences with the stock ARM backend.  */
20376
20377static bool
20378elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20379{
20380  return (elf32_arm_modify_segment_map (abfd, info)
20381	  && nacl_modify_segment_map (abfd, info));
20382}
20383
20384static bool
20385elf32_arm_nacl_final_write_processing (bfd *abfd)
20386{
20387  arm_final_write_processing (abfd);
20388  return nacl_final_write_processing (abfd);
20389}
20390
20391static bfd_vma
20392elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20393			    const arelent *rel ATTRIBUTE_UNUSED)
20394{
20395  return plt->vma
20396    + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20397	   i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20398}
20399
20400#undef	elf32_bed
20401#define elf32_bed				elf32_arm_nacl_bed
20402#undef  bfd_elf32_bfd_link_hash_table_create
20403#define bfd_elf32_bfd_link_hash_table_create	\
20404  elf32_arm_nacl_link_hash_table_create
20405#undef	elf_backend_plt_alignment
20406#define elf_backend_plt_alignment		4
20407#undef	elf_backend_modify_segment_map
20408#define	elf_backend_modify_segment_map		elf32_arm_nacl_modify_segment_map
20409#undef	elf_backend_modify_headers
20410#define	elf_backend_modify_headers		nacl_modify_headers
20411#undef  elf_backend_final_write_processing
20412#define elf_backend_final_write_processing	elf32_arm_nacl_final_write_processing
20413#undef bfd_elf32_get_synthetic_symtab
20414#undef  elf_backend_plt_sym_val
20415#define elf_backend_plt_sym_val			elf32_arm_nacl_plt_sym_val
20416#undef  elf_backend_copy_special_section_fields
20417
20418#undef	ELF_MINPAGESIZE
20419#undef	ELF_COMMONPAGESIZE
20420
20421#undef ELF_TARGET_OS
20422#define ELF_TARGET_OS				is_nacl
20423
20424#include "elf32-target.h"
20425
20426/* Reset to defaults.  */
20427#undef	elf_backend_plt_alignment
20428#undef	elf_backend_modify_segment_map
20429#define elf_backend_modify_segment_map		elf32_arm_modify_segment_map
20430#undef	elf_backend_modify_headers
20431#undef  elf_backend_final_write_processing
20432#define elf_backend_final_write_processing	elf32_arm_final_write_processing
20433#undef	ELF_MINPAGESIZE
20434#undef	ELF_COMMONPAGESIZE
20435#define ELF_COMMONPAGESIZE		0x1000
20436
20437
20438/* FDPIC Targets.  */
20439
20440#undef  TARGET_LITTLE_SYM
20441#define TARGET_LITTLE_SYM		arm_elf32_fdpic_le_vec
20442#undef  TARGET_LITTLE_NAME
20443#define TARGET_LITTLE_NAME		"elf32-littlearm-fdpic"
20444#undef  TARGET_BIG_SYM
20445#define TARGET_BIG_SYM			arm_elf32_fdpic_be_vec
20446#undef  TARGET_BIG_NAME
20447#define TARGET_BIG_NAME			"elf32-bigarm-fdpic"
20448#undef elf_match_priority
20449#define elf_match_priority		128
20450#undef ELF_OSABI
20451#define ELF_OSABI		ELFOSABI_ARM_FDPIC
20452
20453/* Like elf32_arm_link_hash_table_create -- but overrides
20454   appropriately for FDPIC.  */
20455
20456static struct bfd_link_hash_table *
20457elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20458{
20459  struct bfd_link_hash_table *ret;
20460
20461  ret = elf32_arm_link_hash_table_create (abfd);
20462  if (ret)
20463    {
20464      struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20465
20466      htab->fdpic_p = 1;
20467    }
20468  return ret;
20469}
20470
20471/* We need dynamic symbols for every section, since segments can
20472   relocate independently.  */
20473static bool
20474elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20475				    struct bfd_link_info *info
20476				    ATTRIBUTE_UNUSED,
20477				    asection *p ATTRIBUTE_UNUSED)
20478{
20479  switch (elf_section_data (p)->this_hdr.sh_type)
20480    {
20481    case SHT_PROGBITS:
20482    case SHT_NOBITS:
20483      /* If sh_type is yet undecided, assume it could be
20484	 SHT_PROGBITS/SHT_NOBITS.  */
20485    case SHT_NULL:
20486      return false;
20487
20488      /* There shouldn't be section relative relocations
20489	 against any other section.  */
20490    default:
20491      return true;
20492    }
20493}
20494
20495#undef  elf32_bed
20496#define elf32_bed				elf32_arm_fdpic_bed
20497
20498#undef  bfd_elf32_bfd_link_hash_table_create
20499#define bfd_elf32_bfd_link_hash_table_create	elf32_arm_fdpic_link_hash_table_create
20500
20501#undef elf_backend_omit_section_dynsym
20502#define elf_backend_omit_section_dynsym		elf32_arm_fdpic_omit_section_dynsym
20503
20504#undef ELF_TARGET_OS
20505
20506#include "elf32-target.h"
20507
20508#undef elf_match_priority
20509#undef ELF_OSABI
20510#undef elf_backend_omit_section_dynsym
20511
20512/* VxWorks Targets.  */
20513
20514#undef	TARGET_LITTLE_SYM
20515#define TARGET_LITTLE_SYM		arm_elf32_vxworks_le_vec
20516#undef	TARGET_LITTLE_NAME
20517#define TARGET_LITTLE_NAME		"elf32-littlearm-vxworks"
20518#undef	TARGET_BIG_SYM
20519#define TARGET_BIG_SYM			arm_elf32_vxworks_be_vec
20520#undef	TARGET_BIG_NAME
20521#define TARGET_BIG_NAME			"elf32-bigarm-vxworks"
20522
20523/* Like elf32_arm_link_hash_table_create -- but overrides
20524   appropriately for VxWorks.  */
20525
20526static struct bfd_link_hash_table *
20527elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20528{
20529  struct bfd_link_hash_table *ret;
20530
20531  ret = elf32_arm_link_hash_table_create (abfd);
20532  if (ret)
20533    {
20534      struct elf32_arm_link_hash_table *htab
20535	= (struct elf32_arm_link_hash_table *) ret;
20536      htab->use_rel = 0;
20537    }
20538  return ret;
20539}
20540
20541static bool
20542elf32_arm_vxworks_final_write_processing (bfd *abfd)
20543{
20544  arm_final_write_processing (abfd);
20545  return elf_vxworks_final_write_processing (abfd);
20546}
20547
20548#undef  elf32_bed
20549#define elf32_bed elf32_arm_vxworks_bed
20550
20551#undef  bfd_elf32_bfd_link_hash_table_create
20552#define bfd_elf32_bfd_link_hash_table_create	elf32_arm_vxworks_link_hash_table_create
20553#undef  elf_backend_final_write_processing
20554#define elf_backend_final_write_processing	elf32_arm_vxworks_final_write_processing
20555#undef  elf_backend_emit_relocs
20556#define elf_backend_emit_relocs			elf_vxworks_emit_relocs
20557
20558#undef  elf_backend_may_use_rel_p
20559#define elf_backend_may_use_rel_p	0
20560#undef  elf_backend_may_use_rela_p
20561#define elf_backend_may_use_rela_p	1
20562#undef  elf_backend_default_use_rela_p
20563#define elf_backend_default_use_rela_p	1
20564#undef  elf_backend_want_plt_sym
20565#define elf_backend_want_plt_sym	1
20566#undef  ELF_MAXPAGESIZE
20567#define ELF_MAXPAGESIZE			0x1000
20568#undef ELF_TARGET_OS
20569#define ELF_TARGET_OS			is_vxworks
20570
20571#include "elf32-target.h"
20572
20573
20574/* Merge backend specific data from an object file to the output
20575   object file when linking.  */
20576
20577static bool
20578elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20579{
20580  bfd *obfd = info->output_bfd;
20581  flagword out_flags;
20582  flagword in_flags;
20583  bool flags_compatible = true;
20584  asection *sec;
20585
20586  /* Check if we have the same endianness.  */
20587  if (! _bfd_generic_verify_endian_match (ibfd, info))
20588    return false;
20589
20590  if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20591    return true;
20592
20593  if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20594    return false;
20595
20596  /* The input BFD must have had its flags initialised.  */
20597  /* The following seems bogus to me -- The flags are initialized in
20598     the assembler but I don't think an elf_flags_init field is
20599     written into the object.  */
20600  /* BFD_ASSERT (elf_flags_init (ibfd)); */
20601
20602  in_flags  = elf_elfheader (ibfd)->e_flags;
20603  out_flags = elf_elfheader (obfd)->e_flags;
20604
20605  /* In theory there is no reason why we couldn't handle this.  However
20606     in practice it isn't even close to working and there is no real
20607     reason to want it.  */
20608  if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20609      && !(ibfd->flags & DYNAMIC)
20610      && (in_flags & EF_ARM_BE8))
20611    {
20612      _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20613			  ibfd);
20614      return false;
20615    }
20616
20617  if (!elf_flags_init (obfd))
20618    {
20619      /* If the input is the default architecture and had the default
20620	 flags then do not bother setting the flags for the output
20621	 architecture, instead allow future merges to do this.  If no
20622	 future merges ever set these flags then they will retain their
20623	 uninitialised values, which surprise surprise, correspond
20624	 to the default values.  */
20625      if (bfd_get_arch_info (ibfd)->the_default
20626	  && elf_elfheader (ibfd)->e_flags == 0)
20627	return true;
20628
20629      elf_flags_init (obfd) = true;
20630      elf_elfheader (obfd)->e_flags = in_flags;
20631
20632      if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20633	  && bfd_get_arch_info (obfd)->the_default)
20634	return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20635
20636      return true;
20637    }
20638
20639  /* Determine what should happen if the input ARM architecture
20640     does not match the output ARM architecture.  */
20641  if (! bfd_arm_merge_machines (ibfd, obfd))
20642    return false;
20643
20644  /* Identical flags must be compatible.  */
20645  if (in_flags == out_flags)
20646    return true;
20647
20648  /* Check to see if the input BFD actually contains any sections.  If
20649     not, its flags may not have been initialised either, but it
20650     cannot actually cause any incompatiblity.  Do not short-circuit
20651     dynamic objects; their section list may be emptied by
20652    elf_link_add_object_symbols.
20653
20654    Also check to see if there are no code sections in the input.
20655    In this case there is no need to check for code specific flags.
20656    XXX - do we need to worry about floating-point format compatability
20657    in data sections ?  */
20658  if (!(ibfd->flags & DYNAMIC))
20659    {
20660      bool null_input_bfd = true;
20661      bool only_data_sections = true;
20662
20663      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20664	{
20665	  /* Ignore synthetic glue sections.  */
20666	  if (strcmp (sec->name, ".glue_7")
20667	      && strcmp (sec->name, ".glue_7t"))
20668	    {
20669	      if ((bfd_section_flags (sec)
20670		   & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20671		  == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20672		only_data_sections = false;
20673
20674	      null_input_bfd = false;
20675	      break;
20676	    }
20677	}
20678
20679      if (null_input_bfd || only_data_sections)
20680	return true;
20681    }
20682
20683  /* Complain about various flag mismatches.  */
20684  if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20685				      EF_ARM_EABI_VERSION (out_flags)))
20686    {
20687      _bfd_error_handler
20688	(_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20689	 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20690	 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20691      return false;
20692    }
20693
20694  /* Not sure what needs to be checked for EABI versions >= 1.  */
20695  /* VxWorks libraries do not use these flags.  */
20696  if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20697      && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20698      && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20699    {
20700      if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20701	{
20702	  _bfd_error_handler
20703	    (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20704	     ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20705	     obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20706	  flags_compatible = false;
20707	}
20708
20709      if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20710	{
20711	  if (in_flags & EF_ARM_APCS_FLOAT)
20712	    _bfd_error_handler
20713	      (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20714	       ibfd, obfd);
20715	  else
20716	    _bfd_error_handler
20717	      (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20718	       ibfd, obfd);
20719
20720	  flags_compatible = false;
20721	}
20722
20723      if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20724	{
20725	  if (in_flags & EF_ARM_VFP_FLOAT)
20726	    _bfd_error_handler
20727	      (_("error: %pB uses %s instructions, whereas %pB does not"),
20728	       ibfd, "VFP", obfd);
20729	  else
20730	    _bfd_error_handler
20731	      (_("error: %pB uses %s instructions, whereas %pB does not"),
20732	       ibfd, "FPA", obfd);
20733
20734	  flags_compatible = false;
20735	}
20736
20737      if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
20738	{
20739	  if (in_flags & EF_ARM_MAVERICK_FLOAT)
20740	    _bfd_error_handler
20741	      (_("error: %pB uses %s instructions, whereas %pB does not"),
20742	       ibfd, "Maverick", obfd);
20743	  else
20744	    _bfd_error_handler
20745	      (_("error: %pB does not use %s instructions, whereas %pB does"),
20746	       ibfd, "Maverick", obfd);
20747
20748	  flags_compatible = false;
20749	}
20750
20751#ifdef EF_ARM_SOFT_FLOAT
20752      if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20753	{
20754	  /* We can allow interworking between code that is VFP format
20755	     layout, and uses either soft float or integer regs for
20756	     passing floating point arguments and results.  We already
20757	     know that the APCS_FLOAT flags match; similarly for VFP
20758	     flags.  */
20759	  if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20760	      || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20761	    {
20762	      if (in_flags & EF_ARM_SOFT_FLOAT)
20763		_bfd_error_handler
20764		  (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20765		   ibfd, obfd);
20766	      else
20767		_bfd_error_handler
20768		  (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20769		   ibfd, obfd);
20770
20771	      flags_compatible = false;
20772	    }
20773	}
20774#endif
20775
20776      /* Interworking mismatch is only a warning.  */
20777      if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20778	{
20779	  if (in_flags & EF_ARM_INTERWORK)
20780	    {
20781	      _bfd_error_handler
20782		(_("warning: %pB supports interworking, whereas %pB does not"),
20783		 ibfd, obfd);
20784	    }
20785	  else
20786	    {
20787	      _bfd_error_handler
20788		(_("warning: %pB does not support interworking, whereas %pB does"),
20789		 ibfd, obfd);
20790	    }
20791	}
20792    }
20793
20794  return flags_compatible;
20795}
20796