1//===- ARM.cpp ------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "InputFiles.h"
10#include "Symbols.h"
11#include "SyntheticSections.h"
12#include "Target.h"
13#include "Thunks.h"
14#include "lld/Common/ErrorHandler.h"
15#include "llvm/Object/ELF.h"
16#include "llvm/Support/Endian.h"
17
18using namespace llvm;
19using namespace llvm::support::endian;
20using namespace llvm::ELF;
21using namespace lld;
22using namespace lld::elf;
23
24namespace {
25class ARM final : public TargetInfo {
26public:
27  ARM();
28  uint32_t calcEFlags() const override;
29  RelExpr getRelExpr(RelType type, const Symbol &s,
30                     const uint8_t *loc) const override;
31  RelType getDynRel(RelType type) const override;
32  int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
33  void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
34  void writeIgotPlt(uint8_t *buf, const Symbol &s) const override;
35  void writePltHeader(uint8_t *buf) const override;
36  void writePlt(uint8_t *buf, const Symbol &sym,
37                uint64_t pltEntryAddr) const override;
38  void addPltSymbols(InputSection &isec, uint64_t off) const override;
39  void addPltHeaderSymbols(InputSection &isd) const override;
40  bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
41                  uint64_t branchAddr, const Symbol &s,
42                  int64_t a) const override;
43  uint32_t getThunkSectionSpacing() const override;
44  bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
45  void relocate(uint8_t *loc, const Relocation &rel,
46                uint64_t val) const override;
47};
48} // namespace
49
50ARM::ARM() {
51  copyRel = R_ARM_COPY;
52  relativeRel = R_ARM_RELATIVE;
53  iRelativeRel = R_ARM_IRELATIVE;
54  gotRel = R_ARM_GLOB_DAT;
55  noneRel = R_ARM_NONE;
56  pltRel = R_ARM_JUMP_SLOT;
57  symbolicRel = R_ARM_ABS32;
58  tlsGotRel = R_ARM_TLS_TPOFF32;
59  tlsModuleIndexRel = R_ARM_TLS_DTPMOD32;
60  tlsOffsetRel = R_ARM_TLS_DTPOFF32;
61  gotBaseSymInGotPlt = false;
62  pltHeaderSize = 32;
63  pltEntrySize = 16;
64  ipltEntrySize = 16;
65  trapInstr = {0xd4, 0xd4, 0xd4, 0xd4};
66  needsThunks = true;
67  defaultMaxPageSize = 65536;
68}
69
70uint32_t ARM::calcEFlags() const {
71  // The ABIFloatType is used by loaders to detect the floating point calling
72  // convention.
73  uint32_t abiFloatType = 0;
74  if (config->armVFPArgs == ARMVFPArgKind::Base ||
75      config->armVFPArgs == ARMVFPArgKind::Default)
76    abiFloatType = EF_ARM_ABI_FLOAT_SOFT;
77  else if (config->armVFPArgs == ARMVFPArgKind::VFP)
78    abiFloatType = EF_ARM_ABI_FLOAT_HARD;
79
80  // We don't currently use any features incompatible with EF_ARM_EABI_VER5,
81  // but we don't have any firm guarantees of conformance. Linux AArch64
82  // kernels (as of 2016) require an EABI version to be set.
83  return EF_ARM_EABI_VER5 | abiFloatType;
84}
85
86RelExpr ARM::getRelExpr(RelType type, const Symbol &s,
87                        const uint8_t *loc) const {
88  switch (type) {
89  case R_ARM_THM_JUMP11:
90    return R_PC;
91  case R_ARM_CALL:
92  case R_ARM_JUMP24:
93  case R_ARM_PC24:
94  case R_ARM_PLT32:
95  case R_ARM_PREL31:
96  case R_ARM_THM_JUMP19:
97  case R_ARM_THM_JUMP24:
98  case R_ARM_THM_CALL:
99    return R_PLT_PC;
100  case R_ARM_GOTOFF32:
101    // (S + A) - GOT_ORG
102    return R_GOTREL;
103  case R_ARM_GOT_BREL:
104    // GOT(S) + A - GOT_ORG
105    return R_GOT_OFF;
106  case R_ARM_GOT_PREL:
107  case R_ARM_TLS_IE32:
108    // GOT(S) + A - P
109    return R_GOT_PC;
110  case R_ARM_SBREL32:
111    return R_ARM_SBREL;
112  case R_ARM_TARGET1:
113    return config->target1Rel ? R_PC : R_ABS;
114  case R_ARM_TARGET2:
115    if (config->target2 == Target2Policy::Rel)
116      return R_PC;
117    if (config->target2 == Target2Policy::Abs)
118      return R_ABS;
119    return R_GOT_PC;
120  case R_ARM_TLS_GD32:
121    return R_TLSGD_PC;
122  case R_ARM_TLS_LDM32:
123    return R_TLSLD_PC;
124  case R_ARM_TLS_LDO32:
125    return R_DTPREL;
126  case R_ARM_BASE_PREL:
127    // B(S) + A - P
128    // FIXME: currently B(S) assumed to be .got, this may not hold for all
129    // platforms.
130    return R_GOTONLY_PC;
131  case R_ARM_MOVW_PREL_NC:
132  case R_ARM_MOVT_PREL:
133  case R_ARM_REL32:
134  case R_ARM_THM_MOVW_PREL_NC:
135  case R_ARM_THM_MOVT_PREL:
136    return R_PC;
137  case R_ARM_ALU_PC_G0:
138  case R_ARM_LDR_PC_G0:
139  case R_ARM_THM_ALU_PREL_11_0:
140  case R_ARM_THM_PC8:
141  case R_ARM_THM_PC12:
142    return R_ARM_PCA;
143  case R_ARM_MOVW_BREL_NC:
144  case R_ARM_MOVW_BREL:
145  case R_ARM_MOVT_BREL:
146  case R_ARM_THM_MOVW_BREL_NC:
147  case R_ARM_THM_MOVW_BREL:
148  case R_ARM_THM_MOVT_BREL:
149    return R_ARM_SBREL;
150  case R_ARM_NONE:
151    return R_NONE;
152  case R_ARM_TLS_LE32:
153    return R_TLS;
154  case R_ARM_V4BX:
155    // V4BX is just a marker to indicate there's a "bx rN" instruction at the
156    // given address. It can be used to implement a special linker mode which
157    // rewrites ARMv4T inputs to ARMv4. Since we support only ARMv4 input and
158    // not ARMv4 output, we can just ignore it.
159    return R_NONE;
160  default:
161    return R_ABS;
162  }
163}
164
165RelType ARM::getDynRel(RelType type) const {
166  if ((type == R_ARM_ABS32) || (type == R_ARM_TARGET1 && !config->target1Rel))
167    return R_ARM_ABS32;
168  return R_ARM_NONE;
169}
170
171void ARM::writeGotPlt(uint8_t *buf, const Symbol &) const {
172  write32le(buf, in.plt->getVA());
173}
174
175void ARM::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
176  // An ARM entry is the address of the ifunc resolver function.
177  write32le(buf, s.getVA());
178}
179
180// Long form PLT Header that does not have any restrictions on the displacement
181// of the .plt from the .plt.got.
182static void writePltHeaderLong(uint8_t *buf) {
183  const uint8_t pltData[] = {
184      0x04, 0xe0, 0x2d, 0xe5, //     str lr, [sp,#-4]!
185      0x04, 0xe0, 0x9f, 0xe5, //     ldr lr, L2
186      0x0e, 0xe0, 0x8f, 0xe0, // L1: add lr, pc, lr
187      0x08, 0xf0, 0xbe, 0xe5, //     ldr pc, [lr, #8]
188      0x00, 0x00, 0x00, 0x00, // L2: .word   &(.got.plt) - L1 - 8
189      0xd4, 0xd4, 0xd4, 0xd4, //     Pad to 32-byte boundary
190      0xd4, 0xd4, 0xd4, 0xd4, //     Pad to 32-byte boundary
191      0xd4, 0xd4, 0xd4, 0xd4};
192  memcpy(buf, pltData, sizeof(pltData));
193  uint64_t gotPlt = in.gotPlt->getVA();
194  uint64_t l1 = in.plt->getVA() + 8;
195  write32le(buf + 16, gotPlt - l1 - 8);
196}
197
198// The default PLT header requires the .plt.got to be within 128 Mb of the
199// .plt in the positive direction.
200void ARM::writePltHeader(uint8_t *buf) const {
201  // Use a similar sequence to that in writePlt(), the difference is the calling
202  // conventions mean we use lr instead of ip. The PLT entry is responsible for
203  // saving lr on the stack, the dynamic loader is responsible for reloading
204  // it.
205  const uint32_t pltData[] = {
206      0xe52de004, // L1: str lr, [sp,#-4]!
207      0xe28fe600, //     add lr, pc,  #0x0NN00000 &(.got.plt - L1 - 4)
208      0xe28eea00, //     add lr, lr,  #0x000NN000 &(.got.plt - L1 - 4)
209      0xe5bef000, //     ldr pc, [lr, #0x00000NNN] &(.got.plt -L1 - 4)
210  };
211
212  uint64_t offset = in.gotPlt->getVA() - in.plt->getVA() - 4;
213  if (!llvm::isUInt<27>(offset)) {
214    // We cannot encode the Offset, use the long form.
215    writePltHeaderLong(buf);
216    return;
217  }
218  write32le(buf + 0, pltData[0]);
219  write32le(buf + 4, pltData[1] | ((offset >> 20) & 0xff));
220  write32le(buf + 8, pltData[2] | ((offset >> 12) & 0xff));
221  write32le(buf + 12, pltData[3] | (offset & 0xfff));
222  memcpy(buf + 16, trapInstr.data(), 4); // Pad to 32-byte boundary
223  memcpy(buf + 20, trapInstr.data(), 4);
224  memcpy(buf + 24, trapInstr.data(), 4);
225  memcpy(buf + 28, trapInstr.data(), 4);
226}
227
228void ARM::addPltHeaderSymbols(InputSection &isec) const {
229  addSyntheticLocal("$a", STT_NOTYPE, 0, 0, isec);
230  addSyntheticLocal("$d", STT_NOTYPE, 16, 0, isec);
231}
232
233// Long form PLT entries that do not have any restrictions on the displacement
234// of the .plt from the .plt.got.
235static void writePltLong(uint8_t *buf, uint64_t gotPltEntryAddr,
236                         uint64_t pltEntryAddr) {
237  const uint8_t pltData[] = {
238      0x04, 0xc0, 0x9f, 0xe5, //     ldr ip, L2
239      0x0f, 0xc0, 0x8c, 0xe0, // L1: add ip, ip, pc
240      0x00, 0xf0, 0x9c, 0xe5, //     ldr pc, [ip]
241      0x00, 0x00, 0x00, 0x00, // L2: .word   Offset(&(.plt.got) - L1 - 8
242  };
243  memcpy(buf, pltData, sizeof(pltData));
244  uint64_t l1 = pltEntryAddr + 4;
245  write32le(buf + 12, gotPltEntryAddr - l1 - 8);
246}
247
248// The default PLT entries require the .plt.got to be within 128 Mb of the
249// .plt in the positive direction.
250void ARM::writePlt(uint8_t *buf, const Symbol &sym,
251                   uint64_t pltEntryAddr) const {
252  // The PLT entry is similar to the example given in Appendix A of ELF for
253  // the Arm Architecture. Instead of using the Group Relocations to find the
254  // optimal rotation for the 8-bit immediate used in the add instructions we
255  // hard code the most compact rotations for simplicity. This saves a load
256  // instruction over the long plt sequences.
257  const uint32_t pltData[] = {
258      0xe28fc600, // L1: add ip, pc,  #0x0NN00000  Offset(&(.plt.got) - L1 - 8
259      0xe28cca00, //     add ip, ip,  #0x000NN000  Offset(&(.plt.got) - L1 - 8
260      0xe5bcf000, //     ldr pc, [ip, #0x00000NNN] Offset(&(.plt.got) - L1 - 8
261  };
262
263  uint64_t offset = sym.getGotPltVA() - pltEntryAddr - 8;
264  if (!llvm::isUInt<27>(offset)) {
265    // We cannot encode the Offset, use the long form.
266    writePltLong(buf, sym.getGotPltVA(), pltEntryAddr);
267    return;
268  }
269  write32le(buf + 0, pltData[0] | ((offset >> 20) & 0xff));
270  write32le(buf + 4, pltData[1] | ((offset >> 12) & 0xff));
271  write32le(buf + 8, pltData[2] | (offset & 0xfff));
272  memcpy(buf + 12, trapInstr.data(), 4); // Pad to 16-byte boundary
273}
274
275void ARM::addPltSymbols(InputSection &isec, uint64_t off) const {
276  addSyntheticLocal("$a", STT_NOTYPE, off, 0, isec);
277  addSyntheticLocal("$d", STT_NOTYPE, off + 12, 0, isec);
278}
279
280bool ARM::needsThunk(RelExpr expr, RelType type, const InputFile *file,
281                     uint64_t branchAddr, const Symbol &s,
282                     int64_t /*a*/) const {
283  // If S is an undefined weak symbol and does not have a PLT entry then it
284  // will be resolved as a branch to the next instruction.
285  if (s.isUndefWeak() && !s.isInPlt())
286    return false;
287  // A state change from ARM to Thumb and vice versa must go through an
288  // interworking thunk if the relocation type is not R_ARM_CALL or
289  // R_ARM_THM_CALL.
290  switch (type) {
291  case R_ARM_PC24:
292  case R_ARM_PLT32:
293  case R_ARM_JUMP24:
294    // Source is ARM, all PLT entries are ARM so no interworking required.
295    // Otherwise we need to interwork if STT_FUNC Symbol has bit 0 set (Thumb).
296    if (s.isFunc() && expr == R_PC && (s.getVA() & 1))
297      return true;
298    LLVM_FALLTHROUGH;
299  case R_ARM_CALL: {
300    uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA();
301    return !inBranchRange(type, branchAddr, dst);
302  }
303  case R_ARM_THM_JUMP19:
304  case R_ARM_THM_JUMP24:
305    // Source is Thumb, all PLT entries are ARM so interworking is required.
306    // Otherwise we need to interwork if STT_FUNC Symbol has bit 0 clear (ARM).
307    if (expr == R_PLT_PC || (s.isFunc() && (s.getVA() & 1) == 0))
308      return true;
309    LLVM_FALLTHROUGH;
310  case R_ARM_THM_CALL: {
311    uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA();
312    return !inBranchRange(type, branchAddr, dst);
313  }
314  }
315  return false;
316}
317
318uint32_t ARM::getThunkSectionSpacing() const {
319  // The placing of pre-created ThunkSections is controlled by the value
320  // thunkSectionSpacing returned by getThunkSectionSpacing(). The aim is to
321  // place the ThunkSection such that all branches from the InputSections
322  // prior to the ThunkSection can reach a Thunk placed at the end of the
323  // ThunkSection. Graphically:
324  // | up to thunkSectionSpacing .text input sections |
325  // | ThunkSection                                   |
326  // | up to thunkSectionSpacing .text input sections |
327  // | ThunkSection                                   |
328
329  // Pre-created ThunkSections are spaced roughly 16MiB apart on ARMv7. This
330  // is to match the most common expected case of a Thumb 2 encoded BL, BLX or
331  // B.W:
332  // ARM B, BL, BLX range +/- 32MiB
333  // Thumb B.W, BL, BLX range +/- 16MiB
334  // Thumb B<cc>.W range +/- 1MiB
335  // If a branch cannot reach a pre-created ThunkSection a new one will be
336  // created so we can handle the rare cases of a Thumb 2 conditional branch.
337  // We intentionally use a lower size for thunkSectionSpacing than the maximum
338  // branch range so the end of the ThunkSection is more likely to be within
339  // range of the branch instruction that is furthest away. The value we shorten
340  // thunkSectionSpacing by is set conservatively to allow us to create 16,384
341  // 12 byte Thunks at any offset in a ThunkSection without risk of a branch to
342  // one of the Thunks going out of range.
343
344  // On Arm the thunkSectionSpacing depends on the range of the Thumb Branch
345  // range. On earlier Architectures such as ARMv4, ARMv5 and ARMv6 (except
346  // ARMv6T2) the range is +/- 4MiB.
347
348  return (config->armJ1J2BranchEncoding) ? 0x1000000 - 0x30000
349                                         : 0x400000 - 0x7500;
350}
351
352bool ARM::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {
353  uint64_t range;
354  uint64_t instrSize;
355
356  switch (type) {
357  case R_ARM_PC24:
358  case R_ARM_PLT32:
359  case R_ARM_JUMP24:
360  case R_ARM_CALL:
361    range = 0x2000000;
362    instrSize = 4;
363    break;
364  case R_ARM_THM_JUMP19:
365    range = 0x100000;
366    instrSize = 2;
367    break;
368  case R_ARM_THM_JUMP24:
369  case R_ARM_THM_CALL:
370    range = config->armJ1J2BranchEncoding ? 0x1000000 : 0x400000;
371    instrSize = 2;
372    break;
373  default:
374    return true;
375  }
376  // PC at Src is 2 instructions ahead, immediate of branch is signed
377  if (src > dst)
378    range -= 2 * instrSize;
379  else
380    range += instrSize;
381
382  if ((dst & 0x1) == 0)
383    // Destination is ARM, if ARM caller then Src is already 4-byte aligned.
384    // If Thumb Caller (BLX) the Src address has bottom 2 bits cleared to ensure
385    // destination will be 4 byte aligned.
386    src &= ~0x3;
387  else
388    // Bit 0 == 1 denotes Thumb state, it is not part of the range
389    dst &= ~0x1;
390
391  uint64_t distance = (src > dst) ? src - dst : dst - src;
392  return distance <= range;
393}
394
395// Helper to produce message text when LLD detects that a CALL relocation to
396// a non STT_FUNC symbol that may result in incorrect interworking between ARM
397// or Thumb.
398static void stateChangeWarning(uint8_t *loc, RelType relt, const Symbol &s) {
399  assert(!s.isFunc());
400  if (s.isSection()) {
401    // Section symbols must be defined and in a section. Users cannot change
402    // the type. Use the section name as getName() returns an empty string.
403    warn(getErrorLocation(loc) + "branch and link relocation: " +
404         toString(relt) + " to STT_SECTION symbol " +
405         cast<Defined>(s).section->name + " ; interworking not performed");
406  } else {
407    // Warn with hint on how to alter the symbol type.
408    warn(getErrorLocation(loc) + "branch and link relocation: " +
409         toString(relt) + " to non STT_FUNC symbol: " + s.getName() +
410         " interworking not performed; consider using directive '.type " +
411         s.getName() +
412         ", %function' to give symbol type STT_FUNC if"
413         " interworking between ARM and Thumb is required");
414  }
415}
416
417// Utility functions taken from ARMAddressingModes.h, only changes are LLD
418// coding style.
419
420// Rotate a 32-bit unsigned value right by a specified amt of bits.
421static uint32_t rotr32(uint32_t val, uint32_t amt) {
422  assert(amt < 32 && "Invalid rotate amount");
423  return (val >> amt) | (val << ((32 - amt) & 31));
424}
425
426// Rotate a 32-bit unsigned value left by a specified amt of bits.
427static uint32_t rotl32(uint32_t val, uint32_t amt) {
428  assert(amt < 32 && "Invalid rotate amount");
429  return (val << amt) | (val >> ((32 - amt) & 31));
430}
431
432// Try to encode a 32-bit unsigned immediate imm with an immediate shifter
433// operand, this form is an 8-bit immediate rotated right by an even number of
434// bits. We compute the rotate amount to use.  If this immediate value cannot be
435// handled with a single shifter-op, determine a good rotate amount that will
436// take a maximal chunk of bits out of the immediate.
437static uint32_t getSOImmValRotate(uint32_t imm) {
438  // 8-bit (or less) immediates are trivially shifter_operands with a rotate
439  // of zero.
440  if ((imm & ~255U) == 0)
441    return 0;
442
443  // Use CTZ to compute the rotate amount.
444  unsigned tz = llvm::countTrailingZeros(imm);
445
446  // Rotate amount must be even.  Something like 0x200 must be rotated 8 bits,
447  // not 9.
448  unsigned rotAmt = tz & ~1;
449
450  // If we can handle this spread, return it.
451  if ((rotr32(imm, rotAmt) & ~255U) == 0)
452    return (32 - rotAmt) & 31; // HW rotates right, not left.
453
454  // For values like 0xF000000F, we should ignore the low 6 bits, then
455  // retry the hunt.
456  if (imm & 63U) {
457    unsigned tz2 = countTrailingZeros(imm & ~63U);
458    unsigned rotAmt2 = tz2 & ~1;
459    if ((rotr32(imm, rotAmt2) & ~255U) == 0)
460      return (32 - rotAmt2) & 31; // HW rotates right, not left.
461  }
462
463  // Otherwise, we have no way to cover this span of bits with a single
464  // shifter_op immediate.  Return a chunk of bits that will be useful to
465  // handle.
466  return (32 - rotAmt) & 31; // HW rotates right, not left.
467}
468
469void ARM::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
470  switch (rel.type) {
471  case R_ARM_ABS32:
472  case R_ARM_BASE_PREL:
473  case R_ARM_GOTOFF32:
474  case R_ARM_GOT_BREL:
475  case R_ARM_GOT_PREL:
476  case R_ARM_REL32:
477  case R_ARM_RELATIVE:
478  case R_ARM_SBREL32:
479  case R_ARM_TARGET1:
480  case R_ARM_TARGET2:
481  case R_ARM_TLS_GD32:
482  case R_ARM_TLS_IE32:
483  case R_ARM_TLS_LDM32:
484  case R_ARM_TLS_LDO32:
485  case R_ARM_TLS_LE32:
486  case R_ARM_TLS_TPOFF32:
487  case R_ARM_TLS_DTPOFF32:
488    write32le(loc, val);
489    break;
490  case R_ARM_PREL31:
491    checkInt(loc, val, 31, rel);
492    write32le(loc, (read32le(loc) & 0x80000000) | (val & ~0x80000000));
493    break;
494  case R_ARM_CALL: {
495    // R_ARM_CALL is used for BL and BLX instructions, for symbols of type
496    // STT_FUNC we choose whether to write a BL or BLX depending on the
497    // value of bit 0 of Val. With bit 0 == 1 denoting Thumb. If the symbol is
498    // not of type STT_FUNC then we must preserve the original instruction.
499    // PLT entries are always ARM state so we know we don't need to interwork.
500    assert(rel.sym); // R_ARM_CALL is always reached via relocate().
501    bool bit0Thumb = val & 1;
502    bool isBlx = (read32le(loc) & 0xfe000000) == 0xfa000000;
503    // lld 10.0 and before always used bit0Thumb when deciding to write a BLX
504    // even when type not STT_FUNC.
505    if (!rel.sym->isFunc() && isBlx != bit0Thumb)
506      stateChangeWarning(loc, rel.type, *rel.sym);
507    if (rel.sym->isFunc() ? bit0Thumb : isBlx) {
508      // The BLX encoding is 0xfa:H:imm24 where Val = imm24:H:'1'
509      checkInt(loc, val, 26, rel);
510      write32le(loc, 0xfa000000 |                    // opcode
511                         ((val & 2) << 23) |         // H
512                         ((val >> 2) & 0x00ffffff)); // imm24
513      break;
514    }
515    // BLX (always unconditional) instruction to an ARM Target, select an
516    // unconditional BL.
517    write32le(loc, 0xeb000000 | (read32le(loc) & 0x00ffffff));
518    // fall through as BL encoding is shared with B
519  }
520    LLVM_FALLTHROUGH;
521  case R_ARM_JUMP24:
522  case R_ARM_PC24:
523  case R_ARM_PLT32:
524    checkInt(loc, val, 26, rel);
525    write32le(loc, (read32le(loc) & ~0x00ffffff) | ((val >> 2) & 0x00ffffff));
526    break;
527  case R_ARM_THM_JUMP11:
528    checkInt(loc, val, 12, rel);
529    write16le(loc, (read32le(loc) & 0xf800) | ((val >> 1) & 0x07ff));
530    break;
531  case R_ARM_THM_JUMP19:
532    // Encoding T3: Val = S:J2:J1:imm6:imm11:0
533    checkInt(loc, val, 21, rel);
534    write16le(loc,
535              (read16le(loc) & 0xfbc0) |   // opcode cond
536                  ((val >> 10) & 0x0400) | // S
537                  ((val >> 12) & 0x003f)); // imm6
538    write16le(loc + 2,
539              0x8000 |                    // opcode
540                  ((val >> 8) & 0x0800) | // J2
541                  ((val >> 5) & 0x2000) | // J1
542                  ((val >> 1) & 0x07ff)); // imm11
543    break;
544  case R_ARM_THM_CALL: {
545    // R_ARM_THM_CALL is used for BL and BLX instructions, for symbols of type
546    // STT_FUNC we choose whether to write a BL or BLX depending on the
547    // value of bit 0 of Val. With bit 0 == 0 denoting ARM, if the symbol is
548    // not of type STT_FUNC then we must preserve the original instruction.
549    // PLT entries are always ARM state so we know we need to interwork.
550    assert(rel.sym); // R_ARM_THM_CALL is always reached via relocate().
551    bool bit0Thumb = val & 1;
552    bool isBlx = (read16le(loc + 2) & 0x1000) == 0;
553    // lld 10.0 and before always used bit0Thumb when deciding to write a BLX
554    // even when type not STT_FUNC. PLT entries generated by LLD are always ARM.
555    if (!rel.sym->isFunc() && !rel.sym->isInPlt() && isBlx == bit0Thumb)
556      stateChangeWarning(loc, rel.type, *rel.sym);
557    if (rel.sym->isFunc() || rel.sym->isInPlt() ? !bit0Thumb : isBlx) {
558      // We are writing a BLX. Ensure BLX destination is 4-byte aligned. As
559      // the BLX instruction may only be two byte aligned. This must be done
560      // before overflow check.
561      val = alignTo(val, 4);
562      write16le(loc + 2, read16le(loc + 2) & ~0x1000);
563    } else {
564      write16le(loc + 2, (read16le(loc + 2) & ~0x1000) | 1 << 12);
565    }
566    if (!config->armJ1J2BranchEncoding) {
567      // Older Arm architectures do not support R_ARM_THM_JUMP24 and have
568      // different encoding rules and range due to J1 and J2 always being 1.
569      checkInt(loc, val, 23, rel);
570      write16le(loc,
571                0xf000 |                     // opcode
572                    ((val >> 12) & 0x07ff)); // imm11
573      write16le(loc + 2,
574                (read16le(loc + 2) & 0xd000) | // opcode
575                    0x2800 |                   // J1 == J2 == 1
576                    ((val >> 1) & 0x07ff));    // imm11
577      break;
578    }
579  }
580    // Fall through as rest of encoding is the same as B.W
581    LLVM_FALLTHROUGH;
582  case R_ARM_THM_JUMP24:
583    // Encoding B  T4, BL T1, BLX T2: Val = S:I1:I2:imm10:imm11:0
584    checkInt(loc, val, 25, rel);
585    write16le(loc,
586              0xf000 |                     // opcode
587                  ((val >> 14) & 0x0400) | // S
588                  ((val >> 12) & 0x03ff)); // imm10
589    write16le(loc + 2,
590              (read16le(loc + 2) & 0xd000) |                  // opcode
591                  (((~(val >> 10)) ^ (val >> 11)) & 0x2000) | // J1
592                  (((~(val >> 11)) ^ (val >> 13)) & 0x0800) | // J2
593                  ((val >> 1) & 0x07ff));                     // imm11
594    break;
595  case R_ARM_MOVW_ABS_NC:
596  case R_ARM_MOVW_PREL_NC:
597  case R_ARM_MOVW_BREL_NC:
598    write32le(loc, (read32le(loc) & ~0x000f0fff) | ((val & 0xf000) << 4) |
599                       (val & 0x0fff));
600    break;
601  case R_ARM_MOVT_ABS:
602  case R_ARM_MOVT_PREL:
603  case R_ARM_MOVT_BREL:
604    write32le(loc, (read32le(loc) & ~0x000f0fff) |
605                       (((val >> 16) & 0xf000) << 4) | ((val >> 16) & 0xfff));
606    break;
607  case R_ARM_THM_MOVT_ABS:
608  case R_ARM_THM_MOVT_PREL:
609  case R_ARM_THM_MOVT_BREL:
610    // Encoding T1: A = imm4:i:imm3:imm8
611    write16le(loc,
612              0xf2c0 |                     // opcode
613                  ((val >> 17) & 0x0400) | // i
614                  ((val >> 28) & 0x000f)); // imm4
615    write16le(loc + 2,
616              (read16le(loc + 2) & 0x8f00) | // opcode
617                  ((val >> 12) & 0x7000) |   // imm3
618                  ((val >> 16) & 0x00ff));   // imm8
619    break;
620  case R_ARM_THM_MOVW_ABS_NC:
621  case R_ARM_THM_MOVW_PREL_NC:
622  case R_ARM_THM_MOVW_BREL_NC:
623    // Encoding T3: A = imm4:i:imm3:imm8
624    write16le(loc,
625              0xf240 |                     // opcode
626                  ((val >> 1) & 0x0400) |  // i
627                  ((val >> 12) & 0x000f)); // imm4
628    write16le(loc + 2,
629              (read16le(loc + 2) & 0x8f00) | // opcode
630                  ((val << 4) & 0x7000) |    // imm3
631                  (val & 0x00ff));           // imm8
632    break;
633  case R_ARM_ALU_PC_G0: {
634    // ADR (literal) add = bit23, sub = bit22
635    // literal is a 12-bit modified immediate, made up of a 4-bit even rotate
636    // right and an 8-bit immediate. The code-sequence here is derived from
637    // ARMAddressingModes.h in llvm/Target/ARM/MCTargetDesc. In our case we
638    // want to give an error if we cannot encode the constant.
639    uint32_t opcode = 0x00800000;
640    if (val >> 63) {
641      opcode = 0x00400000;
642      val = ~val + 1;
643    }
644    if ((val & ~255U) != 0) {
645      uint32_t rotAmt = getSOImmValRotate(val);
646      // Error if we cannot encode this with a single shift
647      if (rotr32(~255U, rotAmt) & val)
648        error(getErrorLocation(loc) + "unencodeable immediate " +
649              Twine(val).str() + " for relocation " + toString(rel.type));
650      val = rotl32(val, rotAmt) | ((rotAmt >> 1) << 8);
651    }
652    write32le(loc, (read32le(loc) & 0xff0ff000) | opcode | val);
653    break;
654  }
655  case R_ARM_LDR_PC_G0: {
656    // R_ARM_LDR_PC_G0 is S + A - P, we have ((S + A) | T) - P, if S is a
657    // function then addr is 0 (modulo 2) and Pa is 0 (modulo 4) so we can clear
658    // bottom bit to recover S + A - P.
659    if (rel.sym->isFunc())
660      val &= ~0x1;
661    // LDR (literal) u = bit23
662    int64_t imm = val;
663    uint32_t u = 0x00800000;
664    if (imm < 0) {
665      imm = -imm;
666      u = 0;
667    }
668    checkUInt(loc, imm, 12, rel);
669    write32le(loc, (read32le(loc) & 0xff7ff000) | u | imm);
670    break;
671  }
672  case R_ARM_THM_ALU_PREL_11_0: {
673    // ADR encoding T2 (sub), T3 (add) i:imm3:imm8
674    int64_t imm = val;
675    uint16_t sub = 0;
676    if (imm < 0) {
677      imm = -imm;
678      sub = 0x00a0;
679    }
680    checkUInt(loc, imm, 12, rel);
681    write16le(loc, (read16le(loc) & 0xfb0f) | sub | (imm & 0x800) >> 1);
682    write16le(loc + 2,
683              (read16le(loc + 2) & 0x8f00) | (imm & 0x700) << 4 | (imm & 0xff));
684    break;
685  }
686  case R_ARM_THM_PC8:
687    // ADR and LDR literal encoding T1 positive offset only imm8:00
688    // R_ARM_THM_PC8 is S + A - Pa, we have ((S + A) | T) - Pa, if S is a
689    // function then addr is 0 (modulo 2) and Pa is 0 (modulo 4) so we can clear
690    // bottom bit to recover S + A - Pa.
691    if (rel.sym->isFunc())
692      val &= ~0x1;
693    checkUInt(loc, val, 10, rel);
694    checkAlignment(loc, val, 4, rel);
695    write16le(loc, (read16le(loc) & 0xff00) | (val & 0x3fc) >> 2);
696    break;
697  case R_ARM_THM_PC12: {
698    // LDR (literal) encoding T2, add = (U == '1') imm12
699    // imm12 is unsigned
700    // R_ARM_THM_PC12 is S + A - Pa, we have ((S + A) | T) - Pa, if S is a
701    // function then addr is 0 (modulo 2) and Pa is 0 (modulo 4) so we can clear
702    // bottom bit to recover S + A - Pa.
703    if (rel.sym->isFunc())
704      val &= ~0x1;
705    int64_t imm12 = val;
706    uint16_t u = 0x0080;
707    if (imm12 < 0) {
708      imm12 = -imm12;
709      u = 0;
710    }
711    checkUInt(loc, imm12, 12, rel);
712    write16le(loc, read16le(loc) | u);
713    write16le(loc + 2, (read16le(loc + 2) & 0xf000) | imm12);
714    break;
715  }
716  default:
717    error(getErrorLocation(loc) + "unrecognized relocation " +
718          toString(rel.type));
719  }
720}
721
722int64_t ARM::getImplicitAddend(const uint8_t *buf, RelType type) const {
723  switch (type) {
724  default:
725    return 0;
726  case R_ARM_ABS32:
727  case R_ARM_BASE_PREL:
728  case R_ARM_GOTOFF32:
729  case R_ARM_GOT_BREL:
730  case R_ARM_GOT_PREL:
731  case R_ARM_REL32:
732  case R_ARM_TARGET1:
733  case R_ARM_TARGET2:
734  case R_ARM_TLS_GD32:
735  case R_ARM_TLS_LDM32:
736  case R_ARM_TLS_LDO32:
737  case R_ARM_TLS_IE32:
738  case R_ARM_TLS_LE32:
739    return SignExtend64<32>(read32le(buf));
740  case R_ARM_PREL31:
741    return SignExtend64<31>(read32le(buf));
742  case R_ARM_CALL:
743  case R_ARM_JUMP24:
744  case R_ARM_PC24:
745  case R_ARM_PLT32:
746    return SignExtend64<26>(read32le(buf) << 2);
747  case R_ARM_THM_JUMP11:
748    return SignExtend64<12>(read16le(buf) << 1);
749  case R_ARM_THM_JUMP19: {
750    // Encoding T3: A = S:J2:J1:imm10:imm6:0
751    uint16_t hi = read16le(buf);
752    uint16_t lo = read16le(buf + 2);
753    return SignExtend64<20>(((hi & 0x0400) << 10) | // S
754                            ((lo & 0x0800) << 8) |  // J2
755                            ((lo & 0x2000) << 5) |  // J1
756                            ((hi & 0x003f) << 12) | // imm6
757                            ((lo & 0x07ff) << 1));  // imm11:0
758  }
759  case R_ARM_THM_CALL:
760    if (!config->armJ1J2BranchEncoding) {
761      // Older Arm architectures do not support R_ARM_THM_JUMP24 and have
762      // different encoding rules and range due to J1 and J2 always being 1.
763      uint16_t hi = read16le(buf);
764      uint16_t lo = read16le(buf + 2);
765      return SignExtend64<22>(((hi & 0x7ff) << 12) | // imm11
766                              ((lo & 0x7ff) << 1));  // imm11:0
767      break;
768    }
769    LLVM_FALLTHROUGH;
770  case R_ARM_THM_JUMP24: {
771    // Encoding B T4, BL T1, BLX T2: A = S:I1:I2:imm10:imm11:0
772    // I1 = NOT(J1 EOR S), I2 = NOT(J2 EOR S)
773    uint16_t hi = read16le(buf);
774    uint16_t lo = read16le(buf + 2);
775    return SignExtend64<24>(((hi & 0x0400) << 14) |                    // S
776                            (~((lo ^ (hi << 3)) << 10) & 0x00800000) | // I1
777                            (~((lo ^ (hi << 1)) << 11) & 0x00400000) | // I2
778                            ((hi & 0x003ff) << 12) |                   // imm0
779                            ((lo & 0x007ff) << 1)); // imm11:0
780  }
781  // ELF for the ARM Architecture 4.6.1.1 the implicit addend for MOVW and
782  // MOVT is in the range -32768 <= A < 32768
783  case R_ARM_MOVW_ABS_NC:
784  case R_ARM_MOVT_ABS:
785  case R_ARM_MOVW_PREL_NC:
786  case R_ARM_MOVT_PREL:
787  case R_ARM_MOVW_BREL_NC:
788  case R_ARM_MOVT_BREL: {
789    uint64_t val = read32le(buf) & 0x000f0fff;
790    return SignExtend64<16>(((val & 0x000f0000) >> 4) | (val & 0x00fff));
791  }
792  case R_ARM_THM_MOVW_ABS_NC:
793  case R_ARM_THM_MOVT_ABS:
794  case R_ARM_THM_MOVW_PREL_NC:
795  case R_ARM_THM_MOVT_PREL:
796  case R_ARM_THM_MOVW_BREL_NC:
797  case R_ARM_THM_MOVT_BREL: {
798    // Encoding T3: A = imm4:i:imm3:imm8
799    uint16_t hi = read16le(buf);
800    uint16_t lo = read16le(buf + 2);
801    return SignExtend64<16>(((hi & 0x000f) << 12) | // imm4
802                            ((hi & 0x0400) << 1) |  // i
803                            ((lo & 0x7000) >> 4) |  // imm3
804                            (lo & 0x00ff));         // imm8
805  }
806  case R_ARM_ALU_PC_G0: {
807    // 12-bit immediate is a modified immediate made up of a 4-bit even
808    // right rotation and 8-bit constant. After the rotation the value
809    // is zero-extended. When bit 23 is set the instruction is an add, when
810    // bit 22 is set it is a sub.
811    uint32_t instr = read32le(buf);
812    uint32_t val = rotr32(instr & 0xff, ((instr & 0xf00) >> 8) * 2);
813    return (instr & 0x00400000) ? -val : val;
814  }
815  case R_ARM_LDR_PC_G0: {
816    // ADR (literal) add = bit23, sub = bit22
817    // LDR (literal) u = bit23 unsigned imm12
818    bool u = read32le(buf) & 0x00800000;
819    uint32_t imm12 = read32le(buf) & 0xfff;
820    return u ? imm12 : -imm12;
821  }
822  case R_ARM_THM_ALU_PREL_11_0: {
823    // Thumb2 ADR, which is an alias for a sub or add instruction with an
824    // unsigned immediate.
825    // ADR encoding T2 (sub), T3 (add) i:imm3:imm8
826    uint16_t hi = read16le(buf);
827    uint16_t lo = read16le(buf + 2);
828    uint64_t imm = (hi & 0x0400) << 1 | // i
829                   (lo & 0x7000) >> 4 | // imm3
830                   (lo & 0x00ff);       // imm8
831    // For sub, addend is negative, add is positive.
832    return (hi & 0x00f0) ? -imm : imm;
833  }
834  case R_ARM_THM_PC8:
835    // ADR and LDR (literal) encoding T1
836    // From ELF for the ARM Architecture the initial signed addend is formed
837    // from an unsigned field using expression (((imm8:00 + 4) & 0x3ff) ��� 4)
838    // this trick permits the PC bias of -4 to be encoded using imm8 = 0xff
839    return ((((read16le(buf) & 0xff) << 2) + 4) & 0x3ff) - 4;
840  case R_ARM_THM_PC12: {
841    // LDR (literal) encoding T2, add = (U == '1') imm12
842    bool u = read16le(buf) & 0x0080;
843    uint64_t imm12 = read16le(buf + 2) & 0x0fff;
844    return u ? imm12 : -imm12;
845  }
846  }
847}
848
849TargetInfo *elf::getARMTargetInfo() {
850  static ARM target;
851  return &target;
852}
853