1//===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/ARMAsmBackend.h"
10#include "MCTargetDesc/ARMAddressingModes.h"
11#include "MCTargetDesc/ARMAsmBackendDarwin.h"
12#include "MCTargetDesc/ARMAsmBackendELF.h"
13#include "MCTargetDesc/ARMAsmBackendWinCOFF.h"
14#include "MCTargetDesc/ARMFixupKinds.h"
15#include "MCTargetDesc/ARMMCTargetDesc.h"
16#include "llvm/ADT/StringSwitch.h"
17#include "llvm/BinaryFormat/ELF.h"
18#include "llvm/BinaryFormat/MachO.h"
19#include "llvm/MC/MCAsmBackend.h"
20#include "llvm/MC/MCAssembler.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCDirectives.h"
23#include "llvm/MC/MCELFObjectWriter.h"
24#include "llvm/MC/MCExpr.h"
25#include "llvm/MC/MCFixupKindInfo.h"
26#include "llvm/MC/MCObjectWriter.h"
27#include "llvm/MC/MCRegisterInfo.h"
28#include "llvm/MC/MCSectionELF.h"
29#include "llvm/MC/MCSectionMachO.h"
30#include "llvm/MC/MCSubtargetInfo.h"
31#include "llvm/MC/MCValue.h"
32#include "llvm/MC/MCAsmLayout.h"
33#include "llvm/Support/Debug.h"
34#include "llvm/Support/EndianStream.h"
35#include "llvm/Support/ErrorHandling.h"
36#include "llvm/Support/Format.h"
37#include "llvm/Support/TargetParser.h"
38#include "llvm/Support/raw_ostream.h"
39using namespace llvm;
40
41namespace {
42class ARMELFObjectWriter : public MCELFObjectTargetWriter {
43public:
44  ARMELFObjectWriter(uint8_t OSABI)
45      : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM,
46                                /*HasRelocationAddend*/ false) {}
47};
48} // end anonymous namespace
49
50Optional<MCFixupKind> ARMAsmBackend::getFixupKind(StringRef Name) const {
51  if (!STI.getTargetTriple().isOSBinFormatELF())
52    return None;
53
54  unsigned Type = llvm::StringSwitch<unsigned>(Name)
55#define ELF_RELOC(X, Y) .Case(#X, Y)
56#include "llvm/BinaryFormat/ELFRelocs/ARM.def"
57#undef ELF_RELOC
58                      .Default(-1u);
59  if (Type == -1u)
60    return None;
61  return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
62}
63
64const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
65  unsigned IsPCRelConstant =
66      MCFixupKindInfo::FKF_IsPCRel | MCFixupKindInfo::FKF_Constant;
67  const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = {
68      // This table *must* be in the order that the fixup_* kinds are defined in
69      // ARMFixupKinds.h.
70      //
71      // Name                      Offset (bits) Size (bits)     Flags
72      {"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant},
73      {"fixup_t2_ldst_pcrel_12", 0, 32,
74       IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
75      {"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant},
76      {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant},
77      {"fixup_t2_pcrel_10", 0, 32,
78       MCFixupKindInfo::FKF_IsPCRel |
79           MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
80      {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
81      {"fixup_t2_pcrel_9", 0, 32,
82       IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
83      {"fixup_thumb_adr_pcrel_10", 0, 8,
84       IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
85      {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant},
86      {"fixup_t2_adr_pcrel_12", 0, 32,
87       IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
88      {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
89      {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
90      {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
91      {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
92      {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
93      {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
94      {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
95      {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
96      {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
97      {"fixup_arm_thumb_blx", 0, 32,
98       MCFixupKindInfo::FKF_IsPCRel |
99           MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
100      {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
101      {"fixup_arm_thumb_cp", 0, 8,
102       MCFixupKindInfo::FKF_IsPCRel |
103           MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
104      {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel},
105      // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
106      // - 19.
107      {"fixup_arm_movt_hi16", 0, 20, 0},
108      {"fixup_arm_movw_lo16", 0, 20, 0},
109      {"fixup_t2_movt_hi16", 0, 20, 0},
110      {"fixup_t2_movw_lo16", 0, 20, 0},
111      {"fixup_arm_mod_imm", 0, 12, 0},
112      {"fixup_t2_so_imm", 0, 26, 0},
113      {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
114      {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
115      {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
116      {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
117      {"fixup_bfcsel_else_target", 0, 32, 0},
118      {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
119      {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}
120  };
121  const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = {
122      // This table *must* be in the order that the fixup_* kinds are defined in
123      // ARMFixupKinds.h.
124      //
125      // Name                      Offset (bits) Size (bits)     Flags
126      {"fixup_arm_ldst_pcrel_12", 0, 32, IsPCRelConstant},
127      {"fixup_t2_ldst_pcrel_12", 0, 32,
128       IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
129      {"fixup_arm_pcrel_10_unscaled", 0, 32, IsPCRelConstant},
130      {"fixup_arm_pcrel_10", 0, 32, IsPCRelConstant},
131      {"fixup_t2_pcrel_10", 0, 32,
132       MCFixupKindInfo::FKF_IsPCRel |
133           MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
134      {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
135      {"fixup_t2_pcrel_9", 0, 32,
136       IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
137      {"fixup_thumb_adr_pcrel_10", 8, 8,
138       IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
139      {"fixup_arm_adr_pcrel_12", 0, 32, IsPCRelConstant},
140      {"fixup_t2_adr_pcrel_12", 0, 32,
141       IsPCRelConstant | MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
142      {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
143      {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
144      {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
145      {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
146      {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
147      {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
148      {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
149      {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
150      {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
151      {"fixup_arm_thumb_blx", 0, 32,
152       MCFixupKindInfo::FKF_IsPCRel |
153           MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
154      {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
155      {"fixup_arm_thumb_cp", 8, 8,
156       MCFixupKindInfo::FKF_IsPCRel |
157           MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
158      {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel},
159      // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
160      // - 19.
161      {"fixup_arm_movt_hi16", 12, 20, 0},
162      {"fixup_arm_movw_lo16", 12, 20, 0},
163      {"fixup_t2_movt_hi16", 12, 20, 0},
164      {"fixup_t2_movw_lo16", 12, 20, 0},
165      {"fixup_arm_mod_imm", 20, 12, 0},
166      {"fixup_t2_so_imm", 26, 6, 0},
167      {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
168      {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
169      {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
170      {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
171      {"fixup_bfcsel_else_target", 0, 32, 0},
172      {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
173      {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}
174  };
175
176  // Fixup kinds from .reloc directive are like R_ARM_NONE. They do not require
177  // any extra processing.
178  if (Kind >= FirstLiteralRelocationKind)
179    return MCAsmBackend::getFixupKindInfo(FK_NONE);
180
181  if (Kind < FirstTargetFixupKind)
182    return MCAsmBackend::getFixupKindInfo(Kind);
183
184  assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
185         "Invalid kind!");
186  return (Endian == support::little ? InfosLE
187                                    : InfosBE)[Kind - FirstTargetFixupKind];
188}
189
190void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) {
191  switch (Flag) {
192  default:
193    break;
194  case MCAF_Code16:
195    setIsThumb(true);
196    break;
197  case MCAF_Code32:
198    setIsThumb(false);
199    break;
200  }
201}
202
203unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op,
204                                         const MCSubtargetInfo &STI) const {
205  bool HasThumb2 = STI.getFeatureBits()[ARM::FeatureThumb2];
206  bool HasV8MBaselineOps = STI.getFeatureBits()[ARM::HasV8MBaselineOps];
207
208  switch (Op) {
209  default:
210    return Op;
211  case ARM::tBcc:
212    return HasThumb2 ? (unsigned)ARM::t2Bcc : Op;
213  case ARM::tLDRpci:
214    return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op;
215  case ARM::tADR:
216    return HasThumb2 ? (unsigned)ARM::t2ADR : Op;
217  case ARM::tB:
218    return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op;
219  case ARM::tCBZ:
220    return ARM::tHINT;
221  case ARM::tCBNZ:
222    return ARM::tHINT;
223  }
224}
225
226bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst,
227                                      const MCSubtargetInfo &STI) const {
228  if (getRelaxedOpcode(Inst.getOpcode(), STI) != Inst.getOpcode())
229    return true;
230  return false;
231}
232
233static const char *checkPCRelOffset(uint64_t Value, int64_t Min, int64_t Max) {
234  int64_t Offset = int64_t(Value) - 4;
235  if (Offset < Min || Offset > Max)
236    return "out of range pc-relative fixup value";
237  return nullptr;
238}
239
240const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup,
241                                                    uint64_t Value) const {
242  switch (Fixup.getTargetKind()) {
243  case ARM::fixup_arm_thumb_br: {
244    // Relaxing tB to t2B. tB has a signed 12-bit displacement with the
245    // low bit being an implied zero. There's an implied +4 offset for the
246    // branch, so we adjust the other way here to determine what's
247    // encodable.
248    //
249    // Relax if the value is too big for a (signed) i8.
250    int64_t Offset = int64_t(Value) - 4;
251    if (Offset > 2046 || Offset < -2048)
252      return "out of range pc-relative fixup value";
253    break;
254  }
255  case ARM::fixup_arm_thumb_bcc: {
256    // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the
257    // low bit being an implied zero. There's an implied +4 offset for the
258    // branch, so we adjust the other way here to determine what's
259    // encodable.
260    //
261    // Relax if the value is too big for a (signed) i8.
262    int64_t Offset = int64_t(Value) - 4;
263    if (Offset > 254 || Offset < -256)
264      return "out of range pc-relative fixup value";
265    break;
266  }
267  case ARM::fixup_thumb_adr_pcrel_10:
268  case ARM::fixup_arm_thumb_cp: {
269    // If the immediate is negative, greater than 1020, or not a multiple
270    // of four, the wide version of the instruction must be used.
271    int64_t Offset = int64_t(Value) - 4;
272    if (Offset & 3)
273      return "misaligned pc-relative fixup value";
274    else if (Offset > 1020 || Offset < 0)
275      return "out of range pc-relative fixup value";
276    break;
277  }
278  case ARM::fixup_arm_thumb_cb: {
279    // If we have a Thumb CBZ or CBNZ instruction and its target is the next
280    // instruction it is actually out of range for the instruction.
281    // It will be changed to a NOP.
282    int64_t Offset = (Value & ~1);
283    if (Offset == 2)
284      return "will be converted to nop";
285    break;
286  }
287  case ARM::fixup_bf_branch:
288    return checkPCRelOffset(Value, 0, 30);
289  case ARM::fixup_bf_target:
290    return checkPCRelOffset(Value, -0x10000, +0xfffe);
291  case ARM::fixup_bfl_target:
292    return checkPCRelOffset(Value, -0x40000, +0x3fffe);
293  case ARM::fixup_bfc_target:
294    return checkPCRelOffset(Value, -0x1000, +0xffe);
295  case ARM::fixup_wls:
296    return checkPCRelOffset(Value, 0, +0xffe);
297  case ARM::fixup_le:
298    // The offset field in the LE and LETP instructions is an 11-bit
299    // value shifted left by 2 (i.e. 0,2,4,...,4094), and it is
300    // interpreted as a negative offset from the value read from pc,
301    // i.e. from instruction_address+4.
302    //
303    // So an LE instruction can in principle address the instruction
304    // immediately after itself, or (not very usefully) the address
305    // half way through the 4-byte LE.
306    return checkPCRelOffset(Value, -0xffe, 0);
307  case ARM::fixup_bfcsel_else_target: {
308    if (Value != 2 && Value != 4)
309      return "out of range label-relative fixup value";
310    break;
311  }
312
313  default:
314    llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!");
315  }
316  return nullptr;
317}
318
319bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
320                                         const MCRelaxableFragment *DF,
321                                         const MCAsmLayout &Layout) const {
322  return reasonForFixupRelaxation(Fixup, Value);
323}
324
325void ARMAsmBackend::relaxInstruction(MCInst &Inst,
326                                     const MCSubtargetInfo &STI) const {
327  unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode(), STI);
328
329  // Sanity check w/ diagnostic if we get here w/ a bogus instruction.
330  if (RelaxedOp == Inst.getOpcode()) {
331    SmallString<256> Tmp;
332    raw_svector_ostream OS(Tmp);
333    Inst.dump_pretty(OS);
334    OS << "\n";
335    report_fatal_error("unexpected instruction to relax: " + OS.str());
336  }
337
338  // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we
339  // have to change the operands too.
340  if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) &&
341      RelaxedOp == ARM::tHINT) {
342    MCInst Res;
343    Res.setOpcode(RelaxedOp);
344    Res.addOperand(MCOperand::createImm(0));
345    Res.addOperand(MCOperand::createImm(14));
346    Res.addOperand(MCOperand::createReg(0));
347    Inst = std::move(Res);
348    return;
349  }
350
351  // The rest of instructions we're relaxing have the same operands.
352  // We just need to update to the proper opcode.
353  Inst.setOpcode(RelaxedOp);
354}
355
356bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
357  const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8
358  const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP
359  const uint32_t ARMv4_NopEncoding = 0xe1a00000;   // using MOV r0,r0
360  const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP
361  if (isThumb()) {
362    const uint16_t nopEncoding =
363        hasNOP() ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding;
364    uint64_t NumNops = Count / 2;
365    for (uint64_t i = 0; i != NumNops; ++i)
366      support::endian::write(OS, nopEncoding, Endian);
367    if (Count & 1)
368      OS << '\0';
369    return true;
370  }
371  // ARM mode
372  const uint32_t nopEncoding =
373      hasNOP() ? ARMv6T2_NopEncoding : ARMv4_NopEncoding;
374  uint64_t NumNops = Count / 4;
375  for (uint64_t i = 0; i != NumNops; ++i)
376    support::endian::write(OS, nopEncoding, Endian);
377  // FIXME: should this function return false when unable to write exactly
378  // 'Count' bytes with NOP encodings?
379  switch (Count % 4) {
380  default:
381    break; // No leftover bytes to write
382  case 1:
383    OS << '\0';
384    break;
385  case 2:
386    OS.write("\0\0", 2);
387    break;
388  case 3:
389    OS.write("\0\0\xa0", 3);
390    break;
391  }
392
393  return true;
394}
395
396static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) {
397  if (IsLittleEndian) {
398    // Note that the halfwords are stored high first and low second in thumb;
399    // so we need to swap the fixup value here to map properly.
400    uint32_t Swapped = (Value & 0xFFFF0000) >> 16;
401    Swapped |= (Value & 0x0000FFFF) << 16;
402    return Swapped;
403  } else
404    return Value;
405}
406
407static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
408                              bool IsLittleEndian) {
409  uint32_t Value;
410
411  if (IsLittleEndian) {
412    Value = (SecondHalf & 0xFFFF) << 16;
413    Value |= (FirstHalf & 0xFFFF);
414  } else {
415    Value = (SecondHalf & 0xFFFF);
416    Value |= (FirstHalf & 0xFFFF) << 16;
417  }
418
419  return Value;
420}
421
422unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
423                                         const MCFixup &Fixup,
424                                         const MCValue &Target, uint64_t Value,
425                                         bool IsResolved, MCContext &Ctx,
426                                         const MCSubtargetInfo* STI) const {
427  unsigned Kind = Fixup.getKind();
428
429  // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT
430  // and .word relocations they put the Thumb bit into the addend if possible.
431  // Other relocation types don't want this bit though (branches couldn't encode
432  // it if it *was* present, and no other relocations exist) and it can
433  // interfere with checking valid expressions.
434  if (const MCSymbolRefExpr *A = Target.getSymA()) {
435    if (A->hasSubsectionsViaSymbols() && Asm.isThumbFunc(&A->getSymbol()) &&
436        A->getSymbol().isExternal() &&
437        (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 ||
438         Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 ||
439         Kind == ARM::fixup_t2_movt_hi16))
440      Value |= 1;
441  }
442
443  switch (Kind) {
444  default:
445    Ctx.reportError(Fixup.getLoc(), "bad relocation fixup type");
446    return 0;
447  case FK_Data_1:
448  case FK_Data_2:
449  case FK_Data_4:
450    return Value;
451  case FK_SecRel_2:
452    return Value;
453  case FK_SecRel_4:
454    return Value;
455  case ARM::fixup_arm_movt_hi16:
456    assert(STI != nullptr);
457    if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
458      Value >>= 16;
459    LLVM_FALLTHROUGH;
460  case ARM::fixup_arm_movw_lo16: {
461    unsigned Hi4 = (Value & 0xF000) >> 12;
462    unsigned Lo12 = Value & 0x0FFF;
463    // inst{19-16} = Hi4;
464    // inst{11-0} = Lo12;
465    Value = (Hi4 << 16) | (Lo12);
466    return Value;
467  }
468  case ARM::fixup_t2_movt_hi16:
469    assert(STI != nullptr);
470    if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
471      Value >>= 16;
472    LLVM_FALLTHROUGH;
473  case ARM::fixup_t2_movw_lo16: {
474    unsigned Hi4 = (Value & 0xF000) >> 12;
475    unsigned i = (Value & 0x800) >> 11;
476    unsigned Mid3 = (Value & 0x700) >> 8;
477    unsigned Lo8 = Value & 0x0FF;
478    // inst{19-16} = Hi4;
479    // inst{26} = i;
480    // inst{14-12} = Mid3;
481    // inst{7-0} = Lo8;
482    Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
483    return swapHalfWords(Value, Endian == support::little);
484  }
485  case ARM::fixup_arm_ldst_pcrel_12:
486    // ARM PC-relative values are offset by 8.
487    Value -= 4;
488    LLVM_FALLTHROUGH;
489  case ARM::fixup_t2_ldst_pcrel_12: {
490    // Offset by 4, adjusted by two due to the half-word ordering of thumb.
491    Value -= 4;
492    bool isAdd = true;
493    if ((int64_t)Value < 0) {
494      Value = -Value;
495      isAdd = false;
496    }
497    if (Value >= 4096) {
498      Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
499      return 0;
500    }
501    Value |= isAdd << 23;
502
503    // Same addressing mode as fixup_arm_pcrel_10,
504    // but with 16-bit halfwords swapped.
505    if (Kind == ARM::fixup_t2_ldst_pcrel_12)
506      return swapHalfWords(Value, Endian == support::little);
507
508    return Value;
509  }
510  case ARM::fixup_arm_adr_pcrel_12: {
511    // ARM PC-relative values are offset by 8.
512    Value -= 8;
513    unsigned opc = 4; // bits {24-21}. Default to add: 0b0100
514    if ((int64_t)Value < 0) {
515      Value = -Value;
516      opc = 2; // 0b0010
517    }
518    if (ARM_AM::getSOImmVal(Value) == -1) {
519      Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
520      return 0;
521    }
522    // Encode the immediate and shift the opcode into place.
523    return ARM_AM::getSOImmVal(Value) | (opc << 21);
524  }
525
526  case ARM::fixup_t2_adr_pcrel_12: {
527    Value -= 4;
528    unsigned opc = 0;
529    if ((int64_t)Value < 0) {
530      Value = -Value;
531      opc = 5;
532    }
533
534    uint32_t out = (opc << 21);
535    out |= (Value & 0x800) << 15;
536    out |= (Value & 0x700) << 4;
537    out |= (Value & 0x0FF);
538
539    return swapHalfWords(out, Endian == support::little);
540  }
541
542  case ARM::fixup_arm_condbranch:
543  case ARM::fixup_arm_uncondbranch:
544  case ARM::fixup_arm_uncondbl:
545  case ARM::fixup_arm_condbl:
546  case ARM::fixup_arm_blx:
547    // These values don't encode the low two bits since they're always zero.
548    // Offset by 8 just as above.
549    if (const MCSymbolRefExpr *SRE =
550            dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
551      if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
552        return 0;
553    return 0xffffff & ((Value - 8) >> 2);
554  case ARM::fixup_t2_uncondbranch: {
555    Value = Value - 4;
556    if (!isInt<25>(Value)) {
557      Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
558      return 0;
559    }
560
561    Value >>= 1; // Low bit is not encoded.
562
563    uint32_t out = 0;
564    bool I = Value & 0x800000;
565    bool J1 = Value & 0x400000;
566    bool J2 = Value & 0x200000;
567    J1 ^= I;
568    J2 ^= I;
569
570    out |= I << 26;                 // S bit
571    out |= !J1 << 13;               // J1 bit
572    out |= !J2 << 11;               // J2 bit
573    out |= (Value & 0x1FF800) << 5; // imm6 field
574    out |= (Value & 0x0007FF);      // imm11 field
575
576    return swapHalfWords(out, Endian == support::little);
577  }
578  case ARM::fixup_t2_condbranch: {
579    Value = Value - 4;
580    if (!isInt<21>(Value)) {
581      Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
582      return 0;
583    }
584
585    Value >>= 1; // Low bit is not encoded.
586
587    uint64_t out = 0;
588    out |= (Value & 0x80000) << 7; // S bit
589    out |= (Value & 0x40000) >> 7; // J2 bit
590    out |= (Value & 0x20000) >> 4; // J1 bit
591    out |= (Value & 0x1F800) << 5; // imm6 field
592    out |= (Value & 0x007FF);      // imm11 field
593
594    return swapHalfWords(out, Endian == support::little);
595  }
596  case ARM::fixup_arm_thumb_bl: {
597    if (!isInt<25>(Value - 4) ||
598        (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
599         !STI->getFeatureBits()[ARM::HasV8MBaselineOps] &&
600         !STI->getFeatureBits()[ARM::HasV6MOps] &&
601         !isInt<23>(Value - 4))) {
602      Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
603      return 0;
604    }
605
606    // The value doesn't encode the low bit (always zero) and is offset by
607    // four. The 32-bit immediate value is encoded as
608    //   imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
609    // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
610    // The value is encoded into disjoint bit positions in the destination
611    // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
612    // J = either J1 or J2 bit
613    //
614    //   BL:  xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
615    //
616    // Note that the halfwords are stored high first, low second; so we need
617    // to transpose the fixup value here to map properly.
618    uint32_t offset = (Value - 4) >> 1;
619    uint32_t signBit = (offset & 0x800000) >> 23;
620    uint32_t I1Bit = (offset & 0x400000) >> 22;
621    uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
622    uint32_t I2Bit = (offset & 0x200000) >> 21;
623    uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
624    uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
625    uint32_t imm11Bits = (offset & 0x000007FF);
626
627    uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
628    uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
629                           (uint16_t)imm11Bits);
630    return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
631  }
632  case ARM::fixup_arm_thumb_blx: {
633    // The value doesn't encode the low two bits (always zero) and is offset by
634    // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
635    //   imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
636    // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
637    // The value is encoded into disjoint bit positions in the destination
638    // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
639    // J = either J1 or J2 bit, 0 = zero.
640    //
641    //   BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
642    //
643    // Note that the halfwords are stored high first, low second; so we need
644    // to transpose the fixup value here to map properly.
645    if (Value % 4 != 0) {
646      Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination");
647      return 0;
648    }
649
650    uint32_t offset = (Value - 4) >> 2;
651    if (const MCSymbolRefExpr *SRE =
652            dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
653      if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
654        offset = 0;
655    uint32_t signBit = (offset & 0x400000) >> 22;
656    uint32_t I1Bit = (offset & 0x200000) >> 21;
657    uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
658    uint32_t I2Bit = (offset & 0x100000) >> 20;
659    uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
660    uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
661    uint32_t imm10LBits = (offset & 0x3FF);
662
663    uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
664    uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
665                           ((uint16_t)imm10LBits) << 1);
666    return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
667  }
668  case ARM::fixup_thumb_adr_pcrel_10:
669  case ARM::fixup_arm_thumb_cp:
670    // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we
671    // could have an error on our hands.
672    assert(STI != nullptr);
673    if (!STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) {
674      const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
675      if (FixupDiagnostic) {
676        Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
677        return 0;
678      }
679    }
680    // Offset by 4, and don't encode the low two bits.
681    return ((Value - 4) >> 2) & 0xff;
682  case ARM::fixup_arm_thumb_cb: {
683    // CB instructions can only branch to offsets in [4, 126] in multiples of 2
684    // so ensure that the raw value LSB is zero and it lies in [2, 130].
685    // An offset of 2 will be relaxed to a NOP.
686    if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) {
687      Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
688      return 0;
689    }
690    // Offset by 4 and don't encode the lower bit, which is always 0.
691    // FIXME: diagnose if no Thumb2
692    uint32_t Binary = (Value - 4) >> 1;
693    return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3);
694  }
695  case ARM::fixup_arm_thumb_br:
696    // Offset by 4 and don't encode the lower bit, which is always 0.
697    assert(STI != nullptr);
698    if (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
699        !STI->getFeatureBits()[ARM::HasV8MBaselineOps]) {
700      const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
701      if (FixupDiagnostic) {
702        Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
703        return 0;
704      }
705    }
706    return ((Value - 4) >> 1) & 0x7ff;
707  case ARM::fixup_arm_thumb_bcc:
708    // Offset by 4 and don't encode the lower bit, which is always 0.
709    assert(STI != nullptr);
710    if (!STI->getFeatureBits()[ARM::FeatureThumb2]) {
711      const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
712      if (FixupDiagnostic) {
713        Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
714        return 0;
715      }
716    }
717    return ((Value - 4) >> 1) & 0xff;
718  case ARM::fixup_arm_pcrel_10_unscaled: {
719    Value = Value - 8; // ARM fixups offset by an additional word and don't
720                       // need to adjust for the half-word ordering.
721    bool isAdd = true;
722    if ((int64_t)Value < 0) {
723      Value = -Value;
724      isAdd = false;
725    }
726    // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
727    if (Value >= 256) {
728      Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
729      return 0;
730    }
731    Value = (Value & 0xf) | ((Value & 0xf0) << 4);
732    return Value | (isAdd << 23);
733  }
734  case ARM::fixup_arm_pcrel_10:
735    Value = Value - 4; // ARM fixups offset by an additional word and don't
736                       // need to adjust for the half-word ordering.
737    LLVM_FALLTHROUGH;
738  case ARM::fixup_t2_pcrel_10: {
739    // Offset by 4, adjusted by two due to the half-word ordering of thumb.
740    Value = Value - 4;
741    bool isAdd = true;
742    if ((int64_t)Value < 0) {
743      Value = -Value;
744      isAdd = false;
745    }
746    // These values don't encode the low two bits since they're always zero.
747    Value >>= 2;
748    if (Value >= 256) {
749      Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
750      return 0;
751    }
752    Value |= isAdd << 23;
753
754    // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
755    // swapped.
756    if (Kind == ARM::fixup_t2_pcrel_10)
757      return swapHalfWords(Value, Endian == support::little);
758
759    return Value;
760  }
761  case ARM::fixup_arm_pcrel_9:
762    Value = Value - 4; // ARM fixups offset by an additional word and don't
763                       // need to adjust for the half-word ordering.
764    LLVM_FALLTHROUGH;
765  case ARM::fixup_t2_pcrel_9: {
766    // Offset by 4, adjusted by two due to the half-word ordering of thumb.
767    Value = Value - 4;
768    bool isAdd = true;
769    if ((int64_t)Value < 0) {
770      Value = -Value;
771      isAdd = false;
772    }
773    // These values don't encode the low bit since it's always zero.
774    if (Value & 1) {
775      Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup");
776      return 0;
777    }
778    Value >>= 1;
779    if (Value >= 256) {
780      Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
781      return 0;
782    }
783    Value |= isAdd << 23;
784
785    // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords
786    // swapped.
787    if (Kind == ARM::fixup_t2_pcrel_9)
788      return swapHalfWords(Value, Endian == support::little);
789
790    return Value;
791  }
792  case ARM::fixup_arm_mod_imm:
793    Value = ARM_AM::getSOImmVal(Value);
794    if (Value >> 12) {
795      Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
796      return 0;
797    }
798    return Value;
799  case ARM::fixup_t2_so_imm: {
800    Value = ARM_AM::getT2SOImmVal(Value);
801    if ((int64_t)Value < 0) {
802      Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
803      return 0;
804    }
805    // Value will contain a 12-bit value broken up into a 4-bit shift in bits
806    // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate
807    // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit
808    // 10 of the upper half-word and imm3 is placed at 14:12 of the lower
809    // half-word.
810    uint64_t EncValue = 0;
811    EncValue |= (Value & 0x800) << 15;
812    EncValue |= (Value & 0x700) << 4;
813    EncValue |= (Value & 0xff);
814    return swapHalfWords(EncValue, Endian == support::little);
815  }
816  case ARM::fixup_bf_branch: {
817    const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
818    if (FixupDiagnostic) {
819      Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
820      return 0;
821    }
822    uint32_t out = (((Value - 4) >> 1) & 0xf) << 23;
823    return swapHalfWords(out, Endian == support::little);
824  }
825  case ARM::fixup_bf_target:
826  case ARM::fixup_bfl_target:
827  case ARM::fixup_bfc_target: {
828    const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
829    if (FixupDiagnostic) {
830      Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
831      return 0;
832    }
833    uint32_t out = 0;
834    uint32_t HighBitMask = (Kind == ARM::fixup_bf_target ? 0xf800 :
835                            Kind == ARM::fixup_bfl_target ? 0x3f800 : 0x800);
836    out |= (((Value - 4) >> 1) & 0x1) << 11;
837    out |= (((Value - 4) >> 1) & 0x7fe);
838    out |= (((Value - 4) >> 1) & HighBitMask) << 5;
839    return swapHalfWords(out, Endian == support::little);
840  }
841  case ARM::fixup_bfcsel_else_target: {
842    // If this is a fixup of a branch future's else target then it should be a
843    // constant MCExpr representing the distance between the branch targetted
844    // and the instruction after that same branch.
845    Value = Target.getConstant();
846
847    const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
848    if (FixupDiagnostic) {
849      Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
850      return 0;
851    }
852    uint32_t out = ((Value >> 2) & 1) << 17;
853    return swapHalfWords(out, Endian == support::little);
854  }
855  case ARM::fixup_wls:
856  case ARM::fixup_le: {
857    const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
858    if (FixupDiagnostic) {
859      Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
860      return 0;
861    }
862    uint64_t real_value = Value - 4;
863    uint32_t out = 0;
864    if (Kind == ARM::fixup_le)
865      real_value = -real_value;
866    out |= ((real_value >> 1) & 0x1) << 11;
867    out |= ((real_value >> 1) & 0x7fe);
868    return swapHalfWords(out, Endian == support::little);
869  }
870  }
871}
872
873bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
874                                          const MCFixup &Fixup,
875                                          const MCValue &Target) {
876  const MCSymbolRefExpr *A = Target.getSymA();
877  const MCSymbol *Sym = A ? &A->getSymbol() : nullptr;
878  const unsigned FixupKind = Fixup.getKind();
879  if (FixupKind >= FirstLiteralRelocationKind)
880    return true;
881  if (FixupKind == ARM::fixup_arm_thumb_bl) {
882    assert(Sym && "How did we resolve this?");
883
884    // If the symbol is external the linker will handle it.
885    // FIXME: Should we handle it as an optimization?
886
887    // If the symbol is out of range, produce a relocation and hope the
888    // linker can handle it. GNU AS produces an error in this case.
889    if (Sym->isExternal())
890      return true;
891  }
892  // Create relocations for unconditional branches to function symbols with
893  // different execution mode in ELF binaries.
894  if (Sym && Sym->isELF()) {
895    unsigned Type = cast<MCSymbolELF>(Sym)->getType();
896    if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) {
897      if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch))
898        return true;
899      if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br ||
900                                    FixupKind == ARM::fixup_arm_thumb_bl ||
901                                    FixupKind == ARM::fixup_t2_condbranch ||
902                                    FixupKind == ARM::fixup_t2_uncondbranch))
903        return true;
904    }
905  }
906  // We must always generate a relocation for BL/BLX instructions if we have
907  // a symbol to reference, as the linker relies on knowing the destination
908  // symbol's thumb-ness to get interworking right.
909  if (A && (FixupKind == ARM::fixup_arm_thumb_blx ||
910            FixupKind == ARM::fixup_arm_blx ||
911            FixupKind == ARM::fixup_arm_uncondbl ||
912            FixupKind == ARM::fixup_arm_condbl))
913    return true;
914  return false;
915}
916
917/// getFixupKindNumBytes - The number of bytes the fixup may change.
918static unsigned getFixupKindNumBytes(unsigned Kind) {
919  switch (Kind) {
920  default:
921    llvm_unreachable("Unknown fixup kind!");
922
923  case FK_Data_1:
924  case ARM::fixup_arm_thumb_bcc:
925  case ARM::fixup_arm_thumb_cp:
926  case ARM::fixup_thumb_adr_pcrel_10:
927    return 1;
928
929  case FK_Data_2:
930  case ARM::fixup_arm_thumb_br:
931  case ARM::fixup_arm_thumb_cb:
932  case ARM::fixup_arm_mod_imm:
933    return 2;
934
935  case ARM::fixup_arm_pcrel_10_unscaled:
936  case ARM::fixup_arm_ldst_pcrel_12:
937  case ARM::fixup_arm_pcrel_10:
938  case ARM::fixup_arm_pcrel_9:
939  case ARM::fixup_arm_adr_pcrel_12:
940  case ARM::fixup_arm_uncondbl:
941  case ARM::fixup_arm_condbl:
942  case ARM::fixup_arm_blx:
943  case ARM::fixup_arm_condbranch:
944  case ARM::fixup_arm_uncondbranch:
945    return 3;
946
947  case FK_Data_4:
948  case ARM::fixup_t2_ldst_pcrel_12:
949  case ARM::fixup_t2_condbranch:
950  case ARM::fixup_t2_uncondbranch:
951  case ARM::fixup_t2_pcrel_10:
952  case ARM::fixup_t2_pcrel_9:
953  case ARM::fixup_t2_adr_pcrel_12:
954  case ARM::fixup_arm_thumb_bl:
955  case ARM::fixup_arm_thumb_blx:
956  case ARM::fixup_arm_movt_hi16:
957  case ARM::fixup_arm_movw_lo16:
958  case ARM::fixup_t2_movt_hi16:
959  case ARM::fixup_t2_movw_lo16:
960  case ARM::fixup_t2_so_imm:
961  case ARM::fixup_bf_branch:
962  case ARM::fixup_bf_target:
963  case ARM::fixup_bfl_target:
964  case ARM::fixup_bfc_target:
965  case ARM::fixup_bfcsel_else_target:
966  case ARM::fixup_wls:
967  case ARM::fixup_le:
968    return 4;
969
970  case FK_SecRel_2:
971    return 2;
972  case FK_SecRel_4:
973    return 4;
974  }
975}
976
977/// getFixupKindContainerSizeBytes - The number of bytes of the
978/// container involved in big endian.
979static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
980  switch (Kind) {
981  default:
982    llvm_unreachable("Unknown fixup kind!");
983
984  case FK_Data_1:
985    return 1;
986  case FK_Data_2:
987    return 2;
988  case FK_Data_4:
989    return 4;
990
991  case ARM::fixup_arm_thumb_bcc:
992  case ARM::fixup_arm_thumb_cp:
993  case ARM::fixup_thumb_adr_pcrel_10:
994  case ARM::fixup_arm_thumb_br:
995  case ARM::fixup_arm_thumb_cb:
996    // Instruction size is 2 bytes.
997    return 2;
998
999  case ARM::fixup_arm_pcrel_10_unscaled:
1000  case ARM::fixup_arm_ldst_pcrel_12:
1001  case ARM::fixup_arm_pcrel_10:
1002  case ARM::fixup_arm_pcrel_9:
1003  case ARM::fixup_arm_adr_pcrel_12:
1004  case ARM::fixup_arm_uncondbl:
1005  case ARM::fixup_arm_condbl:
1006  case ARM::fixup_arm_blx:
1007  case ARM::fixup_arm_condbranch:
1008  case ARM::fixup_arm_uncondbranch:
1009  case ARM::fixup_t2_ldst_pcrel_12:
1010  case ARM::fixup_t2_condbranch:
1011  case ARM::fixup_t2_uncondbranch:
1012  case ARM::fixup_t2_pcrel_10:
1013  case ARM::fixup_t2_adr_pcrel_12:
1014  case ARM::fixup_arm_thumb_bl:
1015  case ARM::fixup_arm_thumb_blx:
1016  case ARM::fixup_arm_movt_hi16:
1017  case ARM::fixup_arm_movw_lo16:
1018  case ARM::fixup_t2_movt_hi16:
1019  case ARM::fixup_t2_movw_lo16:
1020  case ARM::fixup_arm_mod_imm:
1021  case ARM::fixup_t2_so_imm:
1022  case ARM::fixup_bf_branch:
1023  case ARM::fixup_bf_target:
1024  case ARM::fixup_bfl_target:
1025  case ARM::fixup_bfc_target:
1026  case ARM::fixup_bfcsel_else_target:
1027  case ARM::fixup_wls:
1028  case ARM::fixup_le:
1029    // Instruction size is 4 bytes.
1030    return 4;
1031  }
1032}
1033
1034void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
1035                               const MCValue &Target,
1036                               MutableArrayRef<char> Data, uint64_t Value,
1037                               bool IsResolved,
1038                               const MCSubtargetInfo* STI) const {
1039  unsigned Kind = Fixup.getKind();
1040  if (Kind >= FirstLiteralRelocationKind)
1041    return;
1042  unsigned NumBytes = getFixupKindNumBytes(Kind);
1043  MCContext &Ctx = Asm.getContext();
1044  Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx, STI);
1045  if (!Value)
1046    return; // Doesn't change encoding.
1047
1048  unsigned Offset = Fixup.getOffset();
1049  assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
1050
1051  // Used to point to big endian bytes.
1052  unsigned FullSizeBytes;
1053  if (Endian == support::big) {
1054    FullSizeBytes = getFixupKindContainerSizeBytes(Kind);
1055    assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!");
1056    assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
1057  }
1058
1059  // For each byte of the fragment that the fixup touches, mask in the bits from
1060  // the fixup value. The Value has been "split up" into the appropriate
1061  // bitfields above.
1062  for (unsigned i = 0; i != NumBytes; ++i) {
1063    unsigned Idx = Endian == support::little ? i : (FullSizeBytes - 1 - i);
1064    Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
1065  }
1066}
1067
1068namespace CU {
1069
1070/// Compact unwind encoding values.
1071enum CompactUnwindEncodings {
1072  UNWIND_ARM_MODE_MASK                         = 0x0F000000,
1073  UNWIND_ARM_MODE_FRAME                        = 0x01000000,
1074  UNWIND_ARM_MODE_FRAME_D                      = 0x02000000,
1075  UNWIND_ARM_MODE_DWARF                        = 0x04000000,
1076
1077  UNWIND_ARM_FRAME_STACK_ADJUST_MASK           = 0x00C00000,
1078
1079  UNWIND_ARM_FRAME_FIRST_PUSH_R4               = 0x00000001,
1080  UNWIND_ARM_FRAME_FIRST_PUSH_R5               = 0x00000002,
1081  UNWIND_ARM_FRAME_FIRST_PUSH_R6               = 0x00000004,
1082
1083  UNWIND_ARM_FRAME_SECOND_PUSH_R8              = 0x00000008,
1084  UNWIND_ARM_FRAME_SECOND_PUSH_R9              = 0x00000010,
1085  UNWIND_ARM_FRAME_SECOND_PUSH_R10             = 0x00000020,
1086  UNWIND_ARM_FRAME_SECOND_PUSH_R11             = 0x00000040,
1087  UNWIND_ARM_FRAME_SECOND_PUSH_R12             = 0x00000080,
1088
1089  UNWIND_ARM_FRAME_D_REG_COUNT_MASK            = 0x00000F00,
1090
1091  UNWIND_ARM_DWARF_SECTION_OFFSET              = 0x00FFFFFF
1092};
1093
1094} // end CU namespace
1095
1096/// Generate compact unwind encoding for the function based on the CFI
1097/// instructions. If the CFI instructions describe a frame that cannot be
1098/// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which
1099/// tells the runtime to fallback and unwind using dwarf.
1100uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding(
1101    ArrayRef<MCCFIInstruction> Instrs) const {
1102  DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n");
1103  // Only armv7k uses CFI based unwinding.
1104  if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K)
1105    return 0;
1106  // No .cfi directives means no frame.
1107  if (Instrs.empty())
1108    return 0;
1109  // Start off assuming CFA is at SP+0.
1110  unsigned CFARegister = ARM::SP;
1111  int CFARegisterOffset = 0;
1112  // Mark savable registers as initially unsaved
1113  DenseMap<unsigned, int> RegOffsets;
1114  int FloatRegCount = 0;
1115  // Process each .cfi directive and build up compact unwind info.
1116  for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
1117    unsigned Reg;
1118    const MCCFIInstruction &Inst = Instrs[i];
1119    switch (Inst.getOperation()) {
1120    case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa
1121      CFARegisterOffset = Inst.getOffset();
1122      CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1123      break;
1124    case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset
1125      CFARegisterOffset = Inst.getOffset();
1126      break;
1127    case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register
1128      CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1129      break;
1130    case MCCFIInstruction::OpOffset: // DW_CFA_offset
1131      Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1132      if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
1133        RegOffsets[Reg] = Inst.getOffset();
1134      else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
1135        RegOffsets[Reg] = Inst.getOffset();
1136        ++FloatRegCount;
1137      } else {
1138        DEBUG_WITH_TYPE("compact-unwind",
1139                        llvm::dbgs() << ".cfi_offset on unknown register="
1140                                     << Inst.getRegister() << "\n");
1141        return CU::UNWIND_ARM_MODE_DWARF;
1142      }
1143      break;
1144    case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc
1145      // Ignore
1146      break;
1147    default:
1148      // Directive not convertable to compact unwind, bail out.
1149      DEBUG_WITH_TYPE("compact-unwind",
1150                      llvm::dbgs()
1151                          << "CFI directive not compatiable with comact "
1152                             "unwind encoding, opcode=" << Inst.getOperation()
1153                          << "\n");
1154      return CU::UNWIND_ARM_MODE_DWARF;
1155      break;
1156    }
1157  }
1158
1159  // If no frame set up, return no unwind info.
1160  if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0))
1161    return 0;
1162
1163  // Verify standard frame (lr/r7) was used.
1164  if (CFARegister != ARM::R7) {
1165    DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is "
1166                                                   << CFARegister
1167                                                   << " instead of r7\n");
1168    return CU::UNWIND_ARM_MODE_DWARF;
1169  }
1170  int StackAdjust = CFARegisterOffset - 8;
1171  if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) {
1172    DEBUG_WITH_TYPE("compact-unwind",
1173                    llvm::dbgs()
1174                        << "LR not saved as standard frame, StackAdjust="
1175                        << StackAdjust
1176                        << ", CFARegisterOffset=" << CFARegisterOffset
1177                        << ", lr save at offset=" << RegOffsets[14] << "\n");
1178    return CU::UNWIND_ARM_MODE_DWARF;
1179  }
1180  if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) {
1181    DEBUG_WITH_TYPE("compact-unwind",
1182                    llvm::dbgs() << "r7 not saved as standard frame\n");
1183    return CU::UNWIND_ARM_MODE_DWARF;
1184  }
1185  uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME;
1186
1187  // If var-args are used, there may be a stack adjust required.
1188  switch (StackAdjust) {
1189  case 0:
1190    break;
1191  case 4:
1192    CompactUnwindEncoding |= 0x00400000;
1193    break;
1194  case 8:
1195    CompactUnwindEncoding |= 0x00800000;
1196    break;
1197  case 12:
1198    CompactUnwindEncoding |= 0x00C00000;
1199    break;
1200  default:
1201    DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs()
1202                                          << ".cfi_def_cfa stack adjust ("
1203                                          << StackAdjust << ") out of range\n");
1204    return CU::UNWIND_ARM_MODE_DWARF;
1205  }
1206
1207  // If r6 is saved, it must be right below r7.
1208  static struct {
1209    unsigned Reg;
1210    unsigned Encoding;
1211  } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6},
1212                   {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5},
1213                   {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4},
1214                   {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12},
1215                   {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11},
1216                   {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10},
1217                   {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9},
1218                   {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}};
1219
1220  int CurOffset = -8 - StackAdjust;
1221  for (auto CSReg : GPRCSRegs) {
1222    auto Offset = RegOffsets.find(CSReg.Reg);
1223    if (Offset == RegOffsets.end())
1224      continue;
1225
1226    int RegOffset = Offset->second;
1227    if (RegOffset != CurOffset - 4) {
1228      DEBUG_WITH_TYPE("compact-unwind",
1229                      llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at "
1230                                   << RegOffset << " but only supported at "
1231                                   << CurOffset << "\n");
1232      return CU::UNWIND_ARM_MODE_DWARF;
1233    }
1234    CompactUnwindEncoding |= CSReg.Encoding;
1235    CurOffset -= 4;
1236  }
1237
1238  // If no floats saved, we are done.
1239  if (FloatRegCount == 0)
1240    return CompactUnwindEncoding;
1241
1242  // Switch mode to include D register saving.
1243  CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK;
1244  CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D;
1245
1246  // FIXME: supporting more than 4 saved D-registers compactly would be trivial,
1247  // but needs coordination with the linker and libunwind.
1248  if (FloatRegCount > 4) {
1249    DEBUG_WITH_TYPE("compact-unwind",
1250                    llvm::dbgs() << "unsupported number of D registers saved ("
1251                                 << FloatRegCount << ")\n");
1252      return CU::UNWIND_ARM_MODE_DWARF;
1253  }
1254
1255  // Floating point registers must either be saved sequentially, or we defer to
1256  // DWARF. No gaps allowed here so check that each saved d-register is
1257  // precisely where it should be.
1258  static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 };
1259  for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) {
1260    auto Offset = RegOffsets.find(FPRCSRegs[Idx]);
1261    if (Offset == RegOffsets.end()) {
1262      DEBUG_WITH_TYPE("compact-unwind",
1263                      llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1264                                   << MRI.getName(FPRCSRegs[Idx])
1265                                   << " not saved\n");
1266      return CU::UNWIND_ARM_MODE_DWARF;
1267    } else if (Offset->second != CurOffset - 8) {
1268      DEBUG_WITH_TYPE("compact-unwind",
1269                      llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1270                                   << MRI.getName(FPRCSRegs[Idx])
1271                                   << " saved at " << Offset->second
1272                                   << ", expected at " << CurOffset - 8
1273                                   << "\n");
1274      return CU::UNWIND_ARM_MODE_DWARF;
1275    }
1276    CurOffset -= 8;
1277  }
1278
1279  return CompactUnwindEncoding | ((FloatRegCount - 1) << 8);
1280}
1281
1282static MCAsmBackend *createARMAsmBackend(const Target &T,
1283                                         const MCSubtargetInfo &STI,
1284                                         const MCRegisterInfo &MRI,
1285                                         const MCTargetOptions &Options,
1286                                         support::endianness Endian) {
1287  const Triple &TheTriple = STI.getTargetTriple();
1288  switch (TheTriple.getObjectFormat()) {
1289  default:
1290    llvm_unreachable("unsupported object format");
1291  case Triple::MachO:
1292    return new ARMAsmBackendDarwin(T, STI, MRI);
1293  case Triple::COFF:
1294    assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported");
1295    return new ARMAsmBackendWinCOFF(T, STI);
1296  case Triple::ELF:
1297    assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target");
1298    uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1299    return new ARMAsmBackendELF(T, STI, OSABI, Endian);
1300  }
1301}
1302
1303MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T,
1304                                          const MCSubtargetInfo &STI,
1305                                          const MCRegisterInfo &MRI,
1306                                          const MCTargetOptions &Options) {
1307  return createARMAsmBackend(T, STI, MRI, Options, support::little);
1308}
1309
1310MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T,
1311                                          const MCSubtargetInfo &STI,
1312                                          const MCRegisterInfo &MRI,
1313                                          const MCTargetOptions &Options) {
1314  return createARMAsmBackend(T, STI, MRI, Options, support::big);
1315}
1316