1//===- ARM64.cpp ----------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "Arch/ARM64Common.h"
10#include "InputFiles.h"
11#include "Symbols.h"
12#include "SyntheticSections.h"
13#include "Target.h"
14
15#include "lld/Common/ErrorHandler.h"
16#include "mach-o/compact_unwind_encoding.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/StringRef.h"
19#include "llvm/BinaryFormat/MachO.h"
20#include "llvm/Support/Endian.h"
21#include "llvm/Support/LEB128.h"
22#include "llvm/Support/MathExtras.h"
23
24using namespace llvm;
25using namespace llvm::MachO;
26using namespace llvm::support::endian;
27using namespace lld;
28using namespace lld::macho;
29
30namespace {
31
32struct ARM64 : ARM64Common {
33  ARM64();
34  void writeStub(uint8_t *buf, const Symbol &, uint64_t) const override;
35  void writeStubHelperHeader(uint8_t *buf) const override;
36  void writeStubHelperEntry(uint8_t *buf, const Symbol &,
37                            uint64_t entryAddr) const override;
38
39  void writeObjCMsgSendStub(uint8_t *buf, Symbol *sym, uint64_t stubsAddr,
40                            uint64_t &stubOffset, uint64_t selrefsVA,
41                            uint64_t selectorIndex,
42                            Symbol *objcMsgSend) const override;
43  void populateThunk(InputSection *thunk, Symbol *funcSym) override;
44  void applyOptimizationHints(uint8_t *, const ObjFile &) const override;
45};
46
47} // namespace
48
49// Random notes on reloc types:
50// ADDEND always pairs with BRANCH26, PAGE21, or PAGEOFF12
51// POINTER_TO_GOT: ld64 supports a 4-byte pc-relative form as well as an 8-byte
52// absolute version of this relocation. The semantics of the absolute relocation
53// are weird -- it results in the value of the GOT slot being written, instead
54// of the address. Let's not support it unless we find a real-world use case.
55static constexpr std::array<RelocAttrs, 11> relocAttrsArray{{
56#define B(x) RelocAttrBits::x
57    {"UNSIGNED",
58     B(UNSIGNED) | B(ABSOLUTE) | B(EXTERN) | B(LOCAL) | B(BYTE4) | B(BYTE8)},
59    {"SUBTRACTOR", B(SUBTRAHEND) | B(EXTERN) | B(BYTE4) | B(BYTE8)},
60    {"BRANCH26", B(PCREL) | B(EXTERN) | B(BRANCH) | B(BYTE4)},
61    {"PAGE21", B(PCREL) | B(EXTERN) | B(BYTE4)},
62    {"PAGEOFF12", B(ABSOLUTE) | B(EXTERN) | B(BYTE4)},
63    {"GOT_LOAD_PAGE21", B(PCREL) | B(EXTERN) | B(GOT) | B(BYTE4)},
64    {"GOT_LOAD_PAGEOFF12",
65     B(ABSOLUTE) | B(EXTERN) | B(GOT) | B(LOAD) | B(BYTE4)},
66    {"POINTER_TO_GOT", B(PCREL) | B(EXTERN) | B(GOT) | B(POINTER) | B(BYTE4)},
67    {"TLVP_LOAD_PAGE21", B(PCREL) | B(EXTERN) | B(TLV) | B(BYTE4)},
68    {"TLVP_LOAD_PAGEOFF12",
69     B(ABSOLUTE) | B(EXTERN) | B(TLV) | B(LOAD) | B(BYTE4)},
70    {"ADDEND", B(ADDEND)},
71#undef B
72}};
73
74static constexpr uint32_t stubCode[] = {
75    0x90000010, // 00: adrp  x16, __la_symbol_ptr@page
76    0xf9400210, // 04: ldr   x16, [x16, __la_symbol_ptr@pageoff]
77    0xd61f0200, // 08: br    x16
78};
79
80void ARM64::writeStub(uint8_t *buf8, const Symbol &sym,
81                      uint64_t pointerVA) const {
82  ::writeStub(buf8, stubCode, sym, pointerVA);
83}
84
85static constexpr uint32_t stubHelperHeaderCode[] = {
86    0x90000011, // 00: adrp  x17, _dyld_private@page
87    0x91000231, // 04: add   x17, x17, _dyld_private@pageoff
88    0xa9bf47f0, // 08: stp   x16/x17, [sp, #-16]!
89    0x90000010, // 0c: adrp  x16, dyld_stub_binder@page
90    0xf9400210, // 10: ldr   x16, [x16, dyld_stub_binder@pageoff]
91    0xd61f0200, // 14: br    x16
92};
93
94void ARM64::writeStubHelperHeader(uint8_t *buf8) const {
95  ::writeStubHelperHeader<LP64>(buf8, stubHelperHeaderCode);
96}
97
98static constexpr uint32_t stubHelperEntryCode[] = {
99    0x18000050, // 00: ldr  w16, l0
100    0x14000000, // 04: b    stubHelperHeader
101    0x00000000, // 08: l0: .long 0
102};
103
104void ARM64::writeStubHelperEntry(uint8_t *buf8, const Symbol &sym,
105                                 uint64_t entryVA) const {
106  ::writeStubHelperEntry(buf8, stubHelperEntryCode, sym, entryVA);
107}
108
109static constexpr uint32_t objcStubsFastCode[] = {
110    0x90000001, // adrp  x1, __objc_selrefs@page
111    0xf9400021, // ldr   x1, [x1, @selector("foo")@pageoff]
112    0x90000010, // adrp  x16, _got@page
113    0xf9400210, // ldr   x16, [x16, _objc_msgSend@pageoff]
114    0xd61f0200, // br    x16
115    0xd4200020, // brk   #0x1
116    0xd4200020, // brk   #0x1
117    0xd4200020, // brk   #0x1
118};
119
120static constexpr uint32_t objcStubsSmallCode[] = {
121    0x90000001, // adrp  x1, __objc_selrefs@page
122    0xf9400021, // ldr   x1, [x1, @selector("foo")@pageoff]
123    0x14000000, // b     _objc_msgSend
124};
125
126void ARM64::writeObjCMsgSendStub(uint8_t *buf, Symbol *sym, uint64_t stubsAddr,
127                                 uint64_t &stubOffset, uint64_t selrefsVA,
128                                 uint64_t selectorIndex,
129                                 Symbol *objcMsgSend) const {
130  uint64_t objcMsgSendAddr;
131  uint64_t objcStubSize;
132  uint64_t objcMsgSendIndex;
133
134  if (config->objcStubsMode == ObjCStubsMode::fast) {
135    objcStubSize = target->objcStubsFastSize;
136    objcMsgSendAddr = in.got->addr;
137    objcMsgSendIndex = objcMsgSend->gotIndex;
138    ::writeObjCMsgSendFastStub<LP64>(buf, objcStubsFastCode, sym, stubsAddr,
139                                     stubOffset, selrefsVA, selectorIndex,
140                                     objcMsgSendAddr, objcMsgSendIndex);
141  } else {
142    assert(config->objcStubsMode == ObjCStubsMode::small);
143    objcStubSize = target->objcStubsSmallSize;
144    if (auto *d = dyn_cast<Defined>(objcMsgSend)) {
145      objcMsgSendAddr = d->getVA();
146      objcMsgSendIndex = 0;
147    } else {
148      objcMsgSendAddr = in.stubs->addr;
149      objcMsgSendIndex = objcMsgSend->stubsIndex;
150    }
151    ::writeObjCMsgSendSmallStub<LP64>(buf, objcStubsSmallCode, sym, stubsAddr,
152                                      stubOffset, selrefsVA, selectorIndex,
153                                      objcMsgSendAddr, objcMsgSendIndex);
154  }
155  stubOffset += objcStubSize;
156}
157
158// A thunk is the relaxed variation of stubCode. We don't need the
159// extra indirection through a lazy pointer because the target address
160// is known at link time.
161static constexpr uint32_t thunkCode[] = {
162    0x90000010, // 00: adrp  x16, <thunk.ptr>@page
163    0x91000210, // 04: add   x16, [x16,<thunk.ptr>@pageoff]
164    0xd61f0200, // 08: br    x16
165};
166
167void ARM64::populateThunk(InputSection *thunk, Symbol *funcSym) {
168  thunk->align = 4;
169  thunk->data = {reinterpret_cast<const uint8_t *>(thunkCode),
170                 sizeof(thunkCode)};
171  thunk->relocs.emplace_back(/*type=*/ARM64_RELOC_PAGEOFF12,
172                             /*pcrel=*/false, /*length=*/2,
173                             /*offset=*/4, /*addend=*/0,
174                             /*referent=*/funcSym);
175  thunk->relocs.emplace_back(/*type=*/ARM64_RELOC_PAGE21,
176                             /*pcrel=*/true, /*length=*/2,
177                             /*offset=*/0, /*addend=*/0,
178                             /*referent=*/funcSym);
179}
180
181ARM64::ARM64() : ARM64Common(LP64()) {
182  cpuType = CPU_TYPE_ARM64;
183  cpuSubtype = CPU_SUBTYPE_ARM64_ALL;
184
185  stubSize = sizeof(stubCode);
186  thunkSize = sizeof(thunkCode);
187
188  objcStubsFastSize = sizeof(objcStubsFastCode);
189  objcStubsFastAlignment = 32;
190  objcStubsSmallSize = sizeof(objcStubsSmallCode);
191  objcStubsSmallAlignment = 4;
192
193  // Branch immediate is two's complement 26 bits, which is implicitly
194  // multiplied by 4 (since all functions are 4-aligned: The branch range
195  // is -4*(2**(26-1))..4*(2**(26-1) - 1).
196  backwardBranchRange = 128 * 1024 * 1024;
197  forwardBranchRange = backwardBranchRange - 4;
198
199  modeDwarfEncoding = UNWIND_ARM64_MODE_DWARF;
200  subtractorRelocType = ARM64_RELOC_SUBTRACTOR;
201  unsignedRelocType = ARM64_RELOC_UNSIGNED;
202
203  stubHelperHeaderSize = sizeof(stubHelperHeaderCode);
204  stubHelperEntrySize = sizeof(stubHelperEntryCode);
205
206  relocAttrs = {relocAttrsArray.data(), relocAttrsArray.size()};
207}
208
209namespace {
210struct Adrp {
211  uint32_t destRegister;
212  int64_t addend;
213};
214
215struct Add {
216  uint8_t destRegister;
217  uint8_t srcRegister;
218  uint32_t addend;
219};
220
221enum ExtendType { ZeroExtend = 1, Sign64 = 2, Sign32 = 3 };
222
223struct Ldr {
224  uint8_t destRegister;
225  uint8_t baseRegister;
226  uint8_t p2Size;
227  bool isFloat;
228  ExtendType extendType;
229  int64_t offset;
230};
231} // namespace
232
233static bool parseAdrp(uint32_t insn, Adrp &adrp) {
234  if ((insn & 0x9f000000) != 0x90000000)
235    return false;
236  adrp.destRegister = insn & 0x1f;
237  uint64_t immHi = (insn >> 5) & 0x7ffff;
238  uint64_t immLo = (insn >> 29) & 0x3;
239  adrp.addend = SignExtend64<21>(immLo | (immHi << 2)) * 4096;
240  return true;
241}
242
243static bool parseAdd(uint32_t insn, Add &add) {
244  if ((insn & 0xffc00000) != 0x91000000)
245    return false;
246  add.destRegister = insn & 0x1f;
247  add.srcRegister = (insn >> 5) & 0x1f;
248  add.addend = (insn >> 10) & 0xfff;
249  return true;
250}
251
252static bool parseLdr(uint32_t insn, Ldr &ldr) {
253  ldr.destRegister = insn & 0x1f;
254  ldr.baseRegister = (insn >> 5) & 0x1f;
255  uint8_t size = insn >> 30;
256  uint8_t opc = (insn >> 22) & 3;
257
258  if ((insn & 0x3fc00000) == 0x39400000) {
259    // LDR (immediate), LDRB (immediate), LDRH (immediate)
260    ldr.p2Size = size;
261    ldr.extendType = ZeroExtend;
262    ldr.isFloat = false;
263  } else if ((insn & 0x3f800000) == 0x39800000) {
264    // LDRSB (immediate), LDRSH (immediate), LDRSW (immediate)
265    ldr.p2Size = size;
266    ldr.extendType = static_cast<ExtendType>(opc);
267    ldr.isFloat = false;
268  } else if ((insn & 0x3f400000) == 0x3d400000) {
269    // LDR (immediate, SIMD&FP)
270    ldr.extendType = ZeroExtend;
271    ldr.isFloat = true;
272    if (opc == 1)
273      ldr.p2Size = size;
274    else if (size == 0 && opc == 3)
275      ldr.p2Size = 4;
276    else
277      return false;
278  } else {
279    return false;
280  }
281  ldr.offset = ((insn >> 10) & 0xfff) << ldr.p2Size;
282  return true;
283}
284
285static bool isValidAdrOffset(int32_t delta) { return isInt<21>(delta); }
286
287static void writeAdr(void *loc, uint32_t dest, int32_t delta) {
288  assert(isValidAdrOffset(delta));
289  uint32_t opcode = 0x10000000;
290  uint32_t immHi = (delta & 0x001ffffc) << 3;
291  uint32_t immLo = (delta & 0x00000003) << 29;
292  write32le(loc, opcode | immHi | immLo | dest);
293}
294
295static void writeNop(void *loc) { write32le(loc, 0xd503201f); }
296
297static bool isLiteralLdrEligible(const Ldr &ldr) {
298  return ldr.p2Size > 1 && isShiftedInt<19, 2>(ldr.offset);
299}
300
301static void writeLiteralLdr(void *loc, const Ldr &ldr) {
302  assert(isLiteralLdrEligible(ldr));
303  uint32_t imm19 = (ldr.offset / 4 & maskTrailingOnes<uint32_t>(19)) << 5;
304  uint32_t opcode;
305  switch (ldr.p2Size) {
306  case 2:
307    if (ldr.isFloat)
308      opcode = 0x1c000000;
309    else
310      opcode = ldr.extendType == Sign64 ? 0x98000000 : 0x18000000;
311    break;
312  case 3:
313    opcode = ldr.isFloat ? 0x5c000000 : 0x58000000;
314    break;
315  case 4:
316    opcode = 0x9c000000;
317    break;
318  default:
319    llvm_unreachable("Invalid literal ldr size");
320  }
321  write32le(loc, opcode | imm19 | ldr.destRegister);
322}
323
324static bool isImmediateLdrEligible(const Ldr &ldr) {
325  // Note: We deviate from ld64's behavior, which converts to immediate loads
326  // only if ldr.offset < 4096, even though the offset is divided by the load's
327  // size in the 12-bit immediate operand. Only the unsigned offset variant is
328  // supported.
329
330  uint32_t size = 1 << ldr.p2Size;
331  return ldr.offset >= 0 && (ldr.offset % size) == 0 &&
332         isUInt<12>(ldr.offset >> ldr.p2Size);
333}
334
335static void writeImmediateLdr(void *loc, const Ldr &ldr) {
336  assert(isImmediateLdrEligible(ldr));
337  uint32_t opcode = 0x39000000;
338  if (ldr.isFloat) {
339    opcode |= 0x04000000;
340    assert(ldr.extendType == ZeroExtend);
341  }
342  opcode |= ldr.destRegister;
343  opcode |= ldr.baseRegister << 5;
344  uint8_t size, opc;
345  if (ldr.p2Size == 4) {
346    size = 0;
347    opc = 3;
348  } else {
349    opc = ldr.extendType;
350    size = ldr.p2Size;
351  }
352  uint32_t immBits = ldr.offset >> ldr.p2Size;
353  write32le(loc, opcode | (immBits << 10) | (opc << 22) | (size << 30));
354}
355
356// Transforms a pair of adrp+add instructions into an adr instruction if the
357// target is within the +/- 1 MiB range allowed by the adr's 21 bit signed
358// immediate offset.
359//
360//   adrp xN, _foo@PAGE
361//   add  xM, xN, _foo@PAGEOFF
362// ->
363//   adr  xM, _foo
364//   nop
365static void applyAdrpAdd(uint8_t *buf, const ConcatInputSection *isec,
366                         uint64_t offset1, uint64_t offset2) {
367  uint32_t ins1 = read32le(buf + offset1);
368  uint32_t ins2 = read32le(buf + offset2);
369  Adrp adrp;
370  Add add;
371  if (!parseAdrp(ins1, adrp) || !parseAdd(ins2, add))
372    return;
373  if (adrp.destRegister != add.srcRegister)
374    return;
375
376  uint64_t addr1 = isec->getVA() + offset1;
377  uint64_t referent = pageBits(addr1) + adrp.addend + add.addend;
378  int64_t delta = referent - addr1;
379  if (!isValidAdrOffset(delta))
380    return;
381
382  writeAdr(buf + offset1, add.destRegister, delta);
383  writeNop(buf + offset2);
384}
385
386// Transforms two adrp instructions into a single adrp if their referent
387// addresses are located on the same 4096 byte page.
388//
389//   adrp xN, _foo@PAGE
390//   adrp xN, _bar@PAGE
391// ->
392//   adrp xN, _foo@PAGE
393//   nop
394static void applyAdrpAdrp(uint8_t *buf, const ConcatInputSection *isec,
395                          uint64_t offset1, uint64_t offset2) {
396  uint32_t ins1 = read32le(buf + offset1);
397  uint32_t ins2 = read32le(buf + offset2);
398  Adrp adrp1, adrp2;
399  if (!parseAdrp(ins1, adrp1) || !parseAdrp(ins2, adrp2))
400    return;
401  if (adrp1.destRegister != adrp2.destRegister)
402    return;
403
404  uint64_t page1 = pageBits(offset1 + isec->getVA()) + adrp1.addend;
405  uint64_t page2 = pageBits(offset2 + isec->getVA()) + adrp2.addend;
406  if (page1 != page2)
407    return;
408
409  writeNop(buf + offset2);
410}
411
412// Transforms a pair of adrp+ldr (immediate) instructions into an ldr (literal)
413// load from a PC-relative address if it is 4-byte aligned and within +/- 1 MiB,
414// as ldr can encode a signed 19-bit offset that gets multiplied by 4.
415//
416//   adrp xN, _foo@PAGE
417//   ldr  xM, [xN, _foo@PAGEOFF]
418// ->
419//   nop
420//   ldr  xM, _foo
421static void applyAdrpLdr(uint8_t *buf, const ConcatInputSection *isec,
422                         uint64_t offset1, uint64_t offset2) {
423  uint32_t ins1 = read32le(buf + offset1);
424  uint32_t ins2 = read32le(buf + offset2);
425  Adrp adrp;
426  Ldr ldr;
427  if (!parseAdrp(ins1, adrp) || !parseLdr(ins2, ldr))
428    return;
429  if (adrp.destRegister != ldr.baseRegister)
430    return;
431
432  uint64_t addr1 = isec->getVA() + offset1;
433  uint64_t addr2 = isec->getVA() + offset2;
434  uint64_t referent = pageBits(addr1) + adrp.addend + ldr.offset;
435  ldr.offset = referent - addr2;
436  if (!isLiteralLdrEligible(ldr))
437    return;
438
439  writeNop(buf + offset1);
440  writeLiteralLdr(buf + offset2, ldr);
441}
442
443// GOT loads are emitted by the compiler as a pair of adrp and ldr instructions,
444// but they may be changed to adrp+add by relaxGotLoad(). This hint performs
445// the AdrpLdr or AdrpAdd transformation depending on whether it was relaxed.
446static void applyAdrpLdrGot(uint8_t *buf, const ConcatInputSection *isec,
447                            uint64_t offset1, uint64_t offset2) {
448  uint32_t ins2 = read32le(buf + offset2);
449  Add add;
450  Ldr ldr;
451  if (parseAdd(ins2, add))
452    applyAdrpAdd(buf, isec, offset1, offset2);
453  else if (parseLdr(ins2, ldr))
454    applyAdrpLdr(buf, isec, offset1, offset2);
455}
456
457// Optimizes an adrp+add+ldr sequence used for loading from a local symbol's
458// address by loading directly if it's close enough, or to an adrp(p)+ldr
459// sequence if it's not.
460//
461//   adrp x0, _foo@PAGE
462//   add  x1, x0, _foo@PAGEOFF
463//   ldr  x2, [x1, #off]
464static void applyAdrpAddLdr(uint8_t *buf, const ConcatInputSection *isec,
465                            uint64_t offset1, uint64_t offset2,
466                            uint64_t offset3) {
467  uint32_t ins1 = read32le(buf + offset1);
468  Adrp adrp;
469  if (!parseAdrp(ins1, adrp))
470    return;
471  uint32_t ins2 = read32le(buf + offset2);
472  Add add;
473  if (!parseAdd(ins2, add))
474    return;
475  uint32_t ins3 = read32le(buf + offset3);
476  Ldr ldr;
477  if (!parseLdr(ins3, ldr))
478    return;
479  if (adrp.destRegister != add.srcRegister)
480    return;
481  if (add.destRegister != ldr.baseRegister)
482    return;
483
484  // Load from the target address directly.
485  //   nop
486  //   nop
487  //   ldr x2, [_foo + #off]
488  uint64_t addr1 = isec->getVA() + offset1;
489  uint64_t addr3 = isec->getVA() + offset3;
490  uint64_t referent = pageBits(addr1) + adrp.addend + add.addend;
491  Ldr literalLdr = ldr;
492  literalLdr.offset += referent - addr3;
493  if (isLiteralLdrEligible(literalLdr)) {
494    writeNop(buf + offset1);
495    writeNop(buf + offset2);
496    writeLiteralLdr(buf + offset3, literalLdr);
497    return;
498  }
499
500  // Load the target address into a register and load from there indirectly.
501  //   adr x1, _foo
502  //   nop
503  //   ldr x2, [x1, #off]
504  int64_t adrOffset = referent - addr1;
505  if (isValidAdrOffset(adrOffset)) {
506    writeAdr(buf + offset1, ldr.baseRegister, adrOffset);
507    // Note: ld64 moves the offset into the adr instruction for AdrpAddLdr, but
508    // not for AdrpLdrGotLdr. Its effect is the same either way.
509    writeNop(buf + offset2);
510    return;
511  }
512
513  // Move the target's page offset into the ldr's immediate offset.
514  //   adrp x0, _foo@PAGE
515  //   nop
516  //   ldr x2, [x0, _foo@PAGEOFF + #off]
517  Ldr immediateLdr = ldr;
518  immediateLdr.baseRegister = adrp.destRegister;
519  immediateLdr.offset += add.addend;
520  if (isImmediateLdrEligible(immediateLdr)) {
521    writeNop(buf + offset2);
522    writeImmediateLdr(buf + offset3, immediateLdr);
523    return;
524  }
525}
526
527// Relaxes a GOT-indirect load.
528// If the referenced symbol is external and its GOT entry is within +/- 1 MiB,
529// the GOT entry can be loaded with a single literal ldr instruction.
530// If the referenced symbol is local and thus has been relaxed to adrp+add+ldr,
531// we perform the AdrpAddLdr transformation.
532static void applyAdrpLdrGotLdr(uint8_t *buf, const ConcatInputSection *isec,
533                               uint64_t offset1, uint64_t offset2,
534                               uint64_t offset3) {
535  uint32_t ins2 = read32le(buf + offset2);
536  Add add;
537  Ldr ldr2;
538
539  if (parseAdd(ins2, add)) {
540    applyAdrpAddLdr(buf, isec, offset1, offset2, offset3);
541  } else if (parseLdr(ins2, ldr2)) {
542    // adrp x1, _foo@GOTPAGE
543    // ldr  x2, [x1, _foo@GOTPAGEOFF]
544    // ldr  x3, [x2, #off]
545
546    uint32_t ins1 = read32le(buf + offset1);
547    Adrp adrp;
548    if (!parseAdrp(ins1, adrp))
549      return;
550    uint32_t ins3 = read32le(buf + offset3);
551    Ldr ldr3;
552    if (!parseLdr(ins3, ldr3))
553      return;
554
555    if (ldr2.baseRegister != adrp.destRegister)
556      return;
557    if (ldr3.baseRegister != ldr2.destRegister)
558      return;
559    // Loads from the GOT must be pointer sized.
560    if (ldr2.p2Size != 3 || ldr2.isFloat)
561      return;
562
563    uint64_t addr1 = isec->getVA() + offset1;
564    uint64_t addr2 = isec->getVA() + offset2;
565    uint64_t referent = pageBits(addr1) + adrp.addend + ldr2.offset;
566    // Load the GOT entry's address directly.
567    //   nop
568    //   ldr x2, _foo@GOTPAGE + _foo@GOTPAGEOFF
569    //   ldr x3, [x2, #off]
570    Ldr literalLdr = ldr2;
571    literalLdr.offset = referent - addr2;
572    if (isLiteralLdrEligible(literalLdr)) {
573      writeNop(buf + offset1);
574      writeLiteralLdr(buf + offset2, literalLdr);
575    }
576  }
577}
578
579static uint64_t readValue(const uint8_t *&ptr, const uint8_t *end) {
580  unsigned int n = 0;
581  uint64_t value = decodeULEB128(ptr, &n, end);
582  ptr += n;
583  return value;
584}
585
586template <typename Callback>
587static void forEachHint(ArrayRef<uint8_t> data, Callback callback) {
588  std::array<uint64_t, 3> args;
589
590  for (const uint8_t *p = data.begin(), *end = data.end(); p < end;) {
591    uint64_t type = readValue(p, end);
592    if (type == 0)
593      break;
594
595    uint64_t argCount = readValue(p, end);
596    // All known LOH types as of 2022-09 have 3 or fewer arguments; skip others.
597    if (argCount > 3) {
598      for (unsigned i = 0; i < argCount; ++i)
599        readValue(p, end);
600      continue;
601    }
602
603    for (unsigned i = 0; i < argCount; ++i)
604      args[i] = readValue(p, end);
605    callback(type, ArrayRef<uint64_t>(args.data(), argCount));
606  }
607}
608
609// On RISC architectures like arm64, materializing a memory address generally
610// takes multiple instructions. If the referenced symbol is located close enough
611// in memory, fewer instructions are needed.
612//
613// Linker optimization hints record where addresses are computed. After
614// addresses have been assigned, if possible, we change them to a shorter
615// sequence of instructions. The size of the binary is not modified; the
616// eliminated instructions are replaced with NOPs. This still leads to faster
617// code as the CPU can skip over NOPs quickly.
618//
619// LOHs are specified by the LC_LINKER_OPTIMIZATION_HINTS load command, which
620// points to a sequence of ULEB128-encoded numbers. Each entry specifies a
621// transformation kind, and 2 or 3 addresses where the instructions are located.
622void ARM64::applyOptimizationHints(uint8_t *outBuf, const ObjFile &obj) const {
623  ArrayRef<uint8_t> data = obj.getOptimizationHints();
624  if (data.empty())
625    return;
626
627  const ConcatInputSection *section = nullptr;
628  uint64_t sectionAddr = 0;
629  uint8_t *buf = nullptr;
630
631  auto findSection = [&](uint64_t addr) {
632    if (section && addr >= sectionAddr &&
633        addr < sectionAddr + section->getSize())
634      return true;
635
636    if (obj.sections.empty())
637      return false;
638    auto secIt = std::prev(llvm::upper_bound(
639        obj.sections, addr,
640        [](uint64_t off, const Section *sec) { return off < sec->addr; }));
641    const Section *sec = *secIt;
642
643    if (sec->subsections.empty())
644      return false;
645    auto subsecIt = std::prev(llvm::upper_bound(
646        sec->subsections, addr - sec->addr,
647        [](uint64_t off, Subsection subsec) { return off < subsec.offset; }));
648    const Subsection &subsec = *subsecIt;
649    const ConcatInputSection *isec =
650        dyn_cast_or_null<ConcatInputSection>(subsec.isec);
651    if (!isec || isec->shouldOmitFromOutput())
652      return false;
653
654    section = isec;
655    sectionAddr = subsec.offset + sec->addr;
656    buf = outBuf + section->outSecOff + section->parent->fileOff;
657    return true;
658  };
659
660  auto isValidOffset = [&](uint64_t offset) {
661    if (offset < sectionAddr || offset >= sectionAddr + section->getSize()) {
662      error(toString(&obj) +
663            ": linker optimization hint spans multiple sections");
664      return false;
665    }
666    return true;
667  };
668
669  bool hasAdrpAdrp = false;
670  forEachHint(data, [&](uint64_t kind, ArrayRef<uint64_t> args) {
671    if (kind == LOH_ARM64_ADRP_ADRP) {
672      hasAdrpAdrp = true;
673      return;
674    }
675
676    if (!findSection(args[0]))
677      return;
678    switch (kind) {
679    case LOH_ARM64_ADRP_ADD:
680      if (isValidOffset(args[1]))
681        applyAdrpAdd(buf, section, args[0] - sectionAddr,
682                     args[1] - sectionAddr);
683      break;
684    case LOH_ARM64_ADRP_LDR:
685      if (isValidOffset(args[1]))
686        applyAdrpLdr(buf, section, args[0] - sectionAddr,
687                     args[1] - sectionAddr);
688      break;
689    case LOH_ARM64_ADRP_LDR_GOT:
690      if (isValidOffset(args[1]))
691        applyAdrpLdrGot(buf, section, args[0] - sectionAddr,
692                        args[1] - sectionAddr);
693      break;
694    case LOH_ARM64_ADRP_ADD_LDR:
695      if (isValidOffset(args[1]) && isValidOffset(args[2]))
696        applyAdrpAddLdr(buf, section, args[0] - sectionAddr,
697                        args[1] - sectionAddr, args[2] - sectionAddr);
698      break;
699    case LOH_ARM64_ADRP_LDR_GOT_LDR:
700      if (isValidOffset(args[1]) && isValidOffset(args[2]))
701        applyAdrpLdrGotLdr(buf, section, args[0] - sectionAddr,
702                           args[1] - sectionAddr, args[2] - sectionAddr);
703      break;
704    case LOH_ARM64_ADRP_ADD_STR:
705    case LOH_ARM64_ADRP_LDR_GOT_STR:
706      // TODO: Implement these
707      break;
708    }
709  });
710
711  if (!hasAdrpAdrp)
712    return;
713
714  // AdrpAdrp optimization hints are performed in a second pass because they
715  // might interfere with other transformations. For instance, consider the
716  // following input:
717  //
718  //   adrp x0, _foo@PAGE
719  //   add  x1, x0, _foo@PAGEOFF
720  //   adrp x0, _bar@PAGE
721  //   add  x2, x0, _bar@PAGEOFF
722  //
723  // If we perform the AdrpAdrp relaxation first, we get:
724  //
725  //   adrp x0, _foo@PAGE
726  //   add  x1, x0, _foo@PAGEOFF
727  //   nop
728  //   add x2, x0, _bar@PAGEOFF
729  //
730  // If we then apply AdrpAdd to the first two instructions, the add will have a
731  // garbage value in x0:
732  //
733  //   adr  x1, _foo
734  //   nop
735  //   nop
736  //   add  x2, x0, _bar@PAGEOFF
737  forEachHint(data, [&](uint64_t kind, ArrayRef<uint64_t> args) {
738    if (kind != LOH_ARM64_ADRP_ADRP)
739      return;
740    if (!findSection(args[0]))
741      return;
742    if (isValidOffset(args[1]))
743      applyAdrpAdrp(buf, section, args[0] - sectionAddr, args[1] - sectionAddr);
744  });
745}
746
747TargetInfo *macho::createARM64TargetInfo() {
748  static ARM64 t;
749  return &t;
750}
751