RuntimeDyldMachOAArch64.h revision 296417
1//===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
11#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
12
13#include "../RuntimeDyldMachO.h"
14#include "llvm/Support/Endian.h"
15
16#define DEBUG_TYPE "dyld"
17
18namespace llvm {
19
20class RuntimeDyldMachOAArch64
21    : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
22public:
23
24  typedef uint64_t TargetPtrT;
25
26  RuntimeDyldMachOAArch64(RuntimeDyld::MemoryManager &MM,
27                          RuntimeDyld::SymbolResolver &Resolver)
28      : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
29
30  unsigned getMaxStubSize() override { return 8; }
31
32  unsigned getStubAlignment() override { return 8; }
33
34  /// Extract the addend encoded in the instruction / memory location.
35  int64_t decodeAddend(const RelocationEntry &RE) const {
36    const SectionEntry &Section = Sections[RE.SectionID];
37    uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
38    unsigned NumBytes = 1 << RE.Size;
39    int64_t Addend = 0;
40    // Verify that the relocation has the correct size and alignment.
41    switch (RE.RelType) {
42    default:
43      llvm_unreachable("Unsupported relocation type!");
44    case MachO::ARM64_RELOC_UNSIGNED:
45      assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
46      break;
47    case MachO::ARM64_RELOC_BRANCH26:
48    case MachO::ARM64_RELOC_PAGE21:
49    case MachO::ARM64_RELOC_PAGEOFF12:
50    case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
51    case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
52      assert(NumBytes == 4 && "Invalid relocation size.");
53      assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
54             "Instruction address is not aligned to 4 bytes.");
55      break;
56    }
57
58    switch (RE.RelType) {
59    default:
60      llvm_unreachable("Unsupported relocation type!");
61    case MachO::ARM64_RELOC_UNSIGNED:
62      // This could be an unaligned memory location.
63      if (NumBytes == 4)
64        Addend = *reinterpret_cast<support::ulittle32_t *>(LocalAddress);
65      else
66        Addend = *reinterpret_cast<support::ulittle64_t *>(LocalAddress);
67      break;
68    case MachO::ARM64_RELOC_BRANCH26: {
69      // Verify that the relocation points to the expected branch instruction.
70      auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
71      assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction.");
72
73      // Get the 26 bit addend encoded in the branch instruction and sign-extend
74      // to 64 bit. The lower 2 bits are always zeros and are therefore implicit
75      // (<< 2).
76      Addend = (*p & 0x03FFFFFF) << 2;
77      Addend = SignExtend64(Addend, 28);
78      break;
79    }
80    case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
81    case MachO::ARM64_RELOC_PAGE21: {
82      // Verify that the relocation points to the expected adrp instruction.
83      auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
84      assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
85
86      // Get the 21 bit addend encoded in the adrp instruction and sign-extend
87      // to 64 bit. The lower 12 bits (4096 byte page) are always zeros and are
88      // therefore implicit (<< 12).
89      Addend = ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3) << 12;
90      Addend = SignExtend64(Addend, 33);
91      break;
92    }
93    case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
94      // Verify that the relocation points to one of the expected load / store
95      // instructions.
96      auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
97      (void)p;
98      assert((*p & 0x3B000000) == 0x39000000 &&
99             "Only expected load / store instructions.");
100    } // fall-through
101    case MachO::ARM64_RELOC_PAGEOFF12: {
102      // Verify that the relocation points to one of the expected load / store
103      // or add / sub instructions.
104      auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
105      assert((((*p & 0x3B000000) == 0x39000000) ||
106              ((*p & 0x11C00000) == 0x11000000)   ) &&
107             "Expected load / store  or add/sub instruction.");
108
109      // Get the 12 bit addend encoded in the instruction.
110      Addend = (*p & 0x003FFC00) >> 10;
111
112      // Check which instruction we are decoding to obtain the implicit shift
113      // factor of the instruction.
114      int ImplicitShift = 0;
115      if ((*p & 0x3B000000) == 0x39000000) { // << load / store
116        // For load / store instructions the size is encoded in bits 31:30.
117        ImplicitShift = ((*p >> 30) & 0x3);
118        if (ImplicitShift == 0) {
119          // Check if this a vector op to get the correct shift value.
120          if ((*p & 0x04800000) == 0x04800000)
121            ImplicitShift = 4;
122        }
123      }
124      // Compensate for implicit shift.
125      Addend <<= ImplicitShift;
126      break;
127    }
128    }
129    return Addend;
130  }
131
132  /// Extract the addend encoded in the instruction.
133  void encodeAddend(uint8_t *LocalAddress, unsigned NumBytes,
134                    MachO::RelocationInfoType RelType, int64_t Addend) const {
135    // Verify that the relocation has the correct alignment.
136    switch (RelType) {
137    default:
138      llvm_unreachable("Unsupported relocation type!");
139    case MachO::ARM64_RELOC_UNSIGNED:
140      assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
141      break;
142    case MachO::ARM64_RELOC_BRANCH26:
143    case MachO::ARM64_RELOC_PAGE21:
144    case MachO::ARM64_RELOC_PAGEOFF12:
145    case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
146    case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
147      assert(NumBytes == 4 && "Invalid relocation size.");
148      assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
149             "Instruction address is not aligned to 4 bytes.");
150      break;
151    }
152
153    switch (RelType) {
154    default:
155      llvm_unreachable("Unsupported relocation type!");
156    case MachO::ARM64_RELOC_UNSIGNED:
157      // This could be an unaligned memory location.
158      if (NumBytes == 4)
159        *reinterpret_cast<support::ulittle32_t *>(LocalAddress) = Addend;
160      else
161        *reinterpret_cast<support::ulittle64_t *>(LocalAddress) = Addend;
162      break;
163    case MachO::ARM64_RELOC_BRANCH26: {
164      auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
165      // Verify that the relocation points to the expected branch instruction.
166      assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction.");
167
168      // Verify addend value.
169      assert((Addend & 0x3) == 0 && "Branch target is not aligned");
170      assert(isInt<28>(Addend) && "Branch target is out of range.");
171
172      // Encode the addend as 26 bit immediate in the branch instruction.
173      *p = (*p & 0xFC000000) | ((uint32_t)(Addend >> 2) & 0x03FFFFFF);
174      break;
175    }
176    case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
177    case MachO::ARM64_RELOC_PAGE21: {
178      // Verify that the relocation points to the expected adrp instruction.
179      auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
180      assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
181
182      // Check that the addend fits into 21 bits (+ 12 lower bits).
183      assert((Addend & 0xFFF) == 0 && "ADRP target is not page aligned.");
184      assert(isInt<33>(Addend) && "Invalid page reloc value.");
185
186      // Encode the addend into the instruction.
187      uint32_t ImmLoValue = ((uint64_t)Addend << 17) & 0x60000000;
188      uint32_t ImmHiValue = ((uint64_t)Addend >> 9) & 0x00FFFFE0;
189      *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
190      break;
191    }
192    case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
193      // Verify that the relocation points to one of the expected load / store
194      // instructions.
195      auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
196      assert((*p & 0x3B000000) == 0x39000000 &&
197             "Only expected load / store instructions.");
198      (void)p;
199    } // fall-through
200    case MachO::ARM64_RELOC_PAGEOFF12: {
201      // Verify that the relocation points to one of the expected load / store
202      // or add / sub instructions.
203      auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
204      assert((((*p & 0x3B000000) == 0x39000000) ||
205              ((*p & 0x11C00000) == 0x11000000)   ) &&
206             "Expected load / store  or add/sub instruction.");
207
208      // Check which instruction we are decoding to obtain the implicit shift
209      // factor of the instruction and verify alignment.
210      int ImplicitShift = 0;
211      if ((*p & 0x3B000000) == 0x39000000) { // << load / store
212        // For load / store instructions the size is encoded in bits 31:30.
213        ImplicitShift = ((*p >> 30) & 0x3);
214        switch (ImplicitShift) {
215        case 0:
216          // Check if this a vector op to get the correct shift value.
217          if ((*p & 0x04800000) == 0x04800000) {
218            ImplicitShift = 4;
219            assert(((Addend & 0xF) == 0) &&
220                   "128-bit LDR/STR not 16-byte aligned.");
221          }
222          break;
223        case 1:
224          assert(((Addend & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
225          break;
226        case 2:
227          assert(((Addend & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
228          break;
229        case 3:
230          assert(((Addend & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
231          break;
232        }
233      }
234      // Compensate for implicit shift.
235      Addend >>= ImplicitShift;
236      assert(isUInt<12>(Addend) && "Addend cannot be encoded.");
237
238      // Encode the addend into the instruction.
239      *p = (*p & 0xFFC003FF) | ((uint32_t)(Addend << 10) & 0x003FFC00);
240      break;
241    }
242    }
243  }
244
245  relocation_iterator
246  processRelocationRef(unsigned SectionID, relocation_iterator RelI,
247                       const ObjectFile &BaseObjT,
248                       ObjSectionToIDMap &ObjSectionToID,
249                       StubMap &Stubs) override {
250    const MachOObjectFile &Obj =
251      static_cast<const MachOObjectFile &>(BaseObjT);
252    MachO::any_relocation_info RelInfo =
253        Obj.getRelocation(RelI->getRawDataRefImpl());
254
255    assert(!Obj.isRelocationScattered(RelInfo) && "");
256
257    // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
258    // addend for the following relocation. If found: (1) store the associated
259    // addend, (2) consume the next relocation, and (3) use the stored addend to
260    // override the addend.
261    int64_t ExplicitAddend = 0;
262    if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
263      assert(!Obj.getPlainRelocationExternal(RelInfo));
264      assert(!Obj.getAnyRelocationPCRel(RelInfo));
265      assert(Obj.getAnyRelocationLength(RelInfo) == 2);
266      int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
267      // Sign-extend the 24-bit to 64-bit.
268      ExplicitAddend = SignExtend64(RawAddend, 24);
269      ++RelI;
270      RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
271    }
272
273    RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
274    RE.Addend = decodeAddend(RE);
275
276    assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\
277      "ARM64_RELOC_ADDEND and embedded addend in the instruction.");
278    if (ExplicitAddend)
279      RE.Addend = ExplicitAddend;
280
281    RelocationValueRef Value(
282        getRelocationValueRef(Obj, RelI, RE, ObjSectionToID));
283
284    bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
285    if (!IsExtern && RE.IsPCRel)
286      makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
287
288    RE.Addend = Value.Offset;
289
290    if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
291        RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12)
292      processGOTRelocation(RE, Value, Stubs);
293    else {
294      if (Value.SymbolName)
295        addRelocationForSymbol(RE, Value.SymbolName);
296      else
297        addRelocationForSection(RE, Value.SectionID);
298    }
299
300    return ++RelI;
301  }
302
303  void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
304    DEBUG(dumpRelocationToResolve(RE, Value));
305
306    const SectionEntry &Section = Sections[RE.SectionID];
307    uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
308    MachO::RelocationInfoType RelType =
309      static_cast<MachO::RelocationInfoType>(RE.RelType);
310
311    switch (RelType) {
312    default:
313      llvm_unreachable("Invalid relocation type!");
314    case MachO::ARM64_RELOC_UNSIGNED: {
315      assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
316      // Mask in the target value a byte at a time (we don't have an alignment
317      // guarantee for the target address, so this is safest).
318      if (RE.Size < 2)
319        llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
320
321      encodeAddend(LocalAddress, 1 << RE.Size, RelType, Value + RE.Addend);
322      break;
323    }
324    case MachO::ARM64_RELOC_BRANCH26: {
325      assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
326      // Check if branch is in range.
327      uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
328      int64_t PCRelVal = Value - FinalAddress + RE.Addend;
329      encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
330      break;
331    }
332    case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
333    case MachO::ARM64_RELOC_PAGE21: {
334      assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
335      // Adjust for PC-relative relocation and offset.
336      uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
337      int64_t PCRelVal =
338        ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
339      encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
340      break;
341    }
342    case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
343    case MachO::ARM64_RELOC_PAGEOFF12: {
344      assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
345      // Add the offset from the symbol.
346      Value += RE.Addend;
347      // Mask out the page address and only use the lower 12 bits.
348      Value &= 0xFFF;
349      encodeAddend(LocalAddress, /*Size=*/4, RelType, Value);
350      break;
351    }
352    case MachO::ARM64_RELOC_SUBTRACTOR:
353    case MachO::ARM64_RELOC_POINTER_TO_GOT:
354    case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
355    case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
356      llvm_unreachable("Relocation type not yet implemented!");
357    case MachO::ARM64_RELOC_ADDEND:
358      llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
359                       "processRelocationRef!");
360    }
361  }
362
363  void finalizeSection(const ObjectFile &Obj, unsigned SectionID,
364                       const SectionRef &Section) {}
365
366private:
367  void processGOTRelocation(const RelocationEntry &RE,
368                            RelocationValueRef &Value, StubMap &Stubs) {
369    assert(RE.Size == 2);
370    SectionEntry &Section = Sections[RE.SectionID];
371    StubMap::const_iterator i = Stubs.find(Value);
372    int64_t Offset;
373    if (i != Stubs.end())
374      Offset = static_cast<int64_t>(i->second);
375    else {
376      // FIXME: There must be a better way to do this then to check and fix the
377      // alignment every time!!!
378      uintptr_t BaseAddress = uintptr_t(Section.getAddress());
379      uintptr_t StubAlignment = getStubAlignment();
380      uintptr_t StubAddress =
381          (BaseAddress + Section.getStubOffset() + StubAlignment - 1) &
382          -StubAlignment;
383      unsigned StubOffset = StubAddress - BaseAddress;
384      Stubs[Value] = StubOffset;
385      assert(((StubAddress % getStubAlignment()) == 0) &&
386             "GOT entry not aligned");
387      RelocationEntry GOTRE(RE.SectionID, StubOffset,
388                            MachO::ARM64_RELOC_UNSIGNED, Value.Offset,
389                            /*IsPCRel=*/false, /*Size=*/3);
390      if (Value.SymbolName)
391        addRelocationForSymbol(GOTRE, Value.SymbolName);
392      else
393        addRelocationForSection(GOTRE, Value.SectionID);
394      Section.advanceStubOffset(getMaxStubSize());
395      Offset = static_cast<int64_t>(StubOffset);
396    }
397    RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, Offset,
398                             RE.IsPCRel, RE.Size);
399    addRelocationForSection(TargetRE, RE.SectionID);
400  }
401};
402}
403
404#undef DEBUG_TYPE
405
406#endif
407