MCAssembler.cpp revision 328596
1//===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "llvm/MC/MCAssembler.h"
11#include "llvm/ADT/ArrayRef.h"
12#include "llvm/ADT/SmallString.h"
13#include "llvm/ADT/SmallVector.h"
14#include "llvm/ADT/Statistic.h"
15#include "llvm/ADT/StringRef.h"
16#include "llvm/ADT/Twine.h"
17#include "llvm/MC/MCAsmBackend.h"
18#include "llvm/MC/MCAsmInfo.h"
19#include "llvm/MC/MCAsmLayout.h"
20#include "llvm/MC/MCCodeEmitter.h"
21#include "llvm/MC/MCCodeView.h"
22#include "llvm/MC/MCContext.h"
23#include "llvm/MC/MCDwarf.h"
24#include "llvm/MC/MCExpr.h"
25#include "llvm/MC/MCFixup.h"
26#include "llvm/MC/MCFixupKindInfo.h"
27#include "llvm/MC/MCFragment.h"
28#include "llvm/MC/MCInst.h"
29#include "llvm/MC/MCObjectWriter.h"
30#include "llvm/MC/MCSection.h"
31#include "llvm/MC/MCSectionELF.h"
32#include "llvm/MC/MCSymbol.h"
33#include "llvm/MC/MCValue.h"
34#include "llvm/Support/Casting.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/ErrorHandling.h"
37#include "llvm/Support/LEB128.h"
38#include "llvm/Support/MathExtras.h"
39#include "llvm/Support/raw_ostream.h"
40#include <cassert>
41#include <cstdint>
42#include <cstring>
43#include <tuple>
44#include <utility>
45
46using namespace llvm;
47
48#define DEBUG_TYPE "assembler"
49
50namespace {
51namespace stats {
52
53STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total");
54STATISTIC(EmittedRelaxableFragments,
55          "Number of emitted assembler fragments - relaxable");
56STATISTIC(EmittedDataFragments,
57          "Number of emitted assembler fragments - data");
58STATISTIC(EmittedCompactEncodedInstFragments,
59          "Number of emitted assembler fragments - compact encoded inst");
60STATISTIC(EmittedAlignFragments,
61          "Number of emitted assembler fragments - align");
62STATISTIC(EmittedFillFragments,
63          "Number of emitted assembler fragments - fill");
64STATISTIC(EmittedOrgFragments,
65          "Number of emitted assembler fragments - org");
66STATISTIC(evaluateFixup, "Number of evaluated fixups");
67STATISTIC(FragmentLayouts, "Number of fragment layouts");
68STATISTIC(ObjectBytes, "Number of emitted object file bytes");
69STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps");
70STATISTIC(RelaxedInstructions, "Number of relaxed instructions");
71STATISTIC(PaddingFragmentsRelaxations,
72          "Number of Padding Fragments relaxations");
73STATISTIC(PaddingFragmentsBytes,
74          "Total size of all padding from adding Fragments");
75
76} // end namespace stats
77} // end anonymous namespace
78
79// FIXME FIXME FIXME: There are number of places in this file where we convert
80// what is a 64-bit assembler value used for computation into a value in the
81// object file, which may truncate it. We should detect that truncation where
82// invalid and report errors back.
83
84/* *** */
85
86MCAssembler::MCAssembler(MCContext &Context, MCAsmBackend &Backend,
87                         MCCodeEmitter &Emitter, MCObjectWriter &Writer)
88    : Context(Context), Backend(Backend), Emitter(Emitter), Writer(Writer),
89      BundleAlignSize(0), RelaxAll(false), SubsectionsViaSymbols(false),
90      IncrementalLinkerCompatible(false), ELFHeaderEFlags(0) {
91  VersionInfo.Major = 0; // Major version == 0 for "none specified"
92}
93
94MCAssembler::~MCAssembler() = default;
95
96void MCAssembler::reset() {
97  Sections.clear();
98  Symbols.clear();
99  IndirectSymbols.clear();
100  DataRegions.clear();
101  LinkerOptions.clear();
102  FileNames.clear();
103  ThumbFuncs.clear();
104  BundleAlignSize = 0;
105  RelaxAll = false;
106  SubsectionsViaSymbols = false;
107  IncrementalLinkerCompatible = false;
108  ELFHeaderEFlags = 0;
109  LOHContainer.reset();
110  VersionInfo.Major = 0;
111
112  // reset objects owned by us
113  getBackend().reset();
114  getEmitter().reset();
115  getWriter().reset();
116  getLOHContainer().reset();
117}
118
119bool MCAssembler::registerSection(MCSection &Section) {
120  if (Section.isRegistered())
121    return false;
122  Sections.push_back(&Section);
123  Section.setIsRegistered(true);
124  return true;
125}
126
127bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const {
128  if (ThumbFuncs.count(Symbol))
129    return true;
130
131  if (!Symbol->isVariable())
132    return false;
133
134  const MCExpr *Expr = Symbol->getVariableValue();
135
136  MCValue V;
137  if (!Expr->evaluateAsRelocatable(V, nullptr, nullptr))
138    return false;
139
140  if (V.getSymB() || V.getRefKind() != MCSymbolRefExpr::VK_None)
141    return false;
142
143  const MCSymbolRefExpr *Ref = V.getSymA();
144  if (!Ref)
145    return false;
146
147  if (Ref->getKind() != MCSymbolRefExpr::VK_None)
148    return false;
149
150  const MCSymbol &Sym = Ref->getSymbol();
151  if (!isThumbFunc(&Sym))
152    return false;
153
154  ThumbFuncs.insert(Symbol); // Cache it.
155  return true;
156}
157
158bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const {
159  // Non-temporary labels should always be visible to the linker.
160  if (!Symbol.isTemporary())
161    return true;
162
163  // Absolute temporary labels are never visible.
164  if (!Symbol.isInSection())
165    return false;
166
167  if (Symbol.isUsedInReloc())
168    return true;
169
170  return false;
171}
172
173const MCSymbol *MCAssembler::getAtom(const MCSymbol &S) const {
174  // Linker visible symbols define atoms.
175  if (isSymbolLinkerVisible(S))
176    return &S;
177
178  // Absolute and undefined symbols have no defining atom.
179  if (!S.isInSection())
180    return nullptr;
181
182  // Non-linker visible symbols in sections which can't be atomized have no
183  // defining atom.
184  if (!getContext().getAsmInfo()->isSectionAtomizableBySymbols(
185          *S.getFragment()->getParent()))
186    return nullptr;
187
188  // Otherwise, return the atom for the containing fragment.
189  return S.getFragment()->getAtom();
190}
191
192bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout,
193                                const MCFixup &Fixup, const MCFragment *DF,
194                                MCValue &Target, uint64_t &Value) const {
195  ++stats::evaluateFixup;
196
197  // FIXME: This code has some duplication with recordRelocation. We should
198  // probably merge the two into a single callback that tries to evaluate a
199  // fixup and records a relocation if one is needed.
200
201  // On error claim to have completely evaluated the fixup, to prevent any
202  // further processing from being done.
203  const MCExpr *Expr = Fixup.getValue();
204  MCContext &Ctx = getContext();
205  Value = 0;
206  if (!Expr->evaluateAsRelocatable(Target, &Layout, &Fixup)) {
207    Ctx.reportError(Fixup.getLoc(), "expected relocatable expression");
208    return true;
209  }
210  if (const MCSymbolRefExpr *RefB = Target.getSymB()) {
211    if (RefB->getKind() != MCSymbolRefExpr::VK_None) {
212      Ctx.reportError(Fixup.getLoc(),
213                      "unsupported subtraction of qualified symbol");
214      return true;
215    }
216  }
217
218  bool IsPCRel = Backend.getFixupKindInfo(
219    Fixup.getKind()).Flags & MCFixupKindInfo::FKF_IsPCRel;
220
221  bool IsResolved;
222  if (IsPCRel) {
223    if (Target.getSymB()) {
224      IsResolved = false;
225    } else if (!Target.getSymA()) {
226      IsResolved = false;
227    } else {
228      const MCSymbolRefExpr *A = Target.getSymA();
229      const MCSymbol &SA = A->getSymbol();
230      if (A->getKind() != MCSymbolRefExpr::VK_None || SA.isUndefined()) {
231        IsResolved = false;
232      } else {
233        IsResolved = getWriter().isSymbolRefDifferenceFullyResolvedImpl(
234            *this, SA, *DF, false, true);
235      }
236    }
237  } else {
238    IsResolved = Target.isAbsolute();
239  }
240
241  Value = Target.getConstant();
242
243  if (const MCSymbolRefExpr *A = Target.getSymA()) {
244    const MCSymbol &Sym = A->getSymbol();
245    if (Sym.isDefined())
246      Value += Layout.getSymbolOffset(Sym);
247  }
248  if (const MCSymbolRefExpr *B = Target.getSymB()) {
249    const MCSymbol &Sym = B->getSymbol();
250    if (Sym.isDefined())
251      Value -= Layout.getSymbolOffset(Sym);
252  }
253
254  bool ShouldAlignPC = Backend.getFixupKindInfo(Fixup.getKind()).Flags &
255                         MCFixupKindInfo::FKF_IsAlignedDownTo32Bits;
256  assert((ShouldAlignPC ? IsPCRel : true) &&
257    "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!");
258
259  if (IsPCRel) {
260    uint32_t Offset = Layout.getFragmentOffset(DF) + Fixup.getOffset();
261
262    // A number of ARM fixups in Thumb mode require that the effective PC
263    // address be determined as the 32-bit aligned version of the actual offset.
264    if (ShouldAlignPC) Offset &= ~0x3;
265    Value -= Offset;
266  }
267
268  // Let the backend force a relocation if needed.
269  if (IsResolved && Backend.shouldForceRelocation(*this, Fixup, Target))
270    IsResolved = false;
271
272  return IsResolved;
273}
274
275uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout,
276                                          const MCFragment &F) const {
277  switch (F.getKind()) {
278  case MCFragment::FT_Data:
279    return cast<MCDataFragment>(F).getContents().size();
280  case MCFragment::FT_Relaxable:
281    return cast<MCRelaxableFragment>(F).getContents().size();
282  case MCFragment::FT_CompactEncodedInst:
283    return cast<MCCompactEncodedInstFragment>(F).getContents().size();
284  case MCFragment::FT_Fill: {
285    auto &FF = cast<MCFillFragment>(F);
286    int64_t Size = 0;
287    if (!FF.getSize().evaluateAsAbsolute(Size, Layout))
288      getContext().reportError(FF.getLoc(),
289                               "expected assembly-time absolute expression");
290    if (Size < 0) {
291      getContext().reportError(FF.getLoc(), "invalid number of bytes");
292      return 0;
293    }
294    return Size;
295  }
296
297  case MCFragment::FT_LEB:
298    return cast<MCLEBFragment>(F).getContents().size();
299
300  case MCFragment::FT_Padding:
301    return cast<MCPaddingFragment>(F).getSize();
302
303  case MCFragment::FT_SymbolId:
304    return 4;
305
306  case MCFragment::FT_Align: {
307    const MCAlignFragment &AF = cast<MCAlignFragment>(F);
308    unsigned Offset = Layout.getFragmentOffset(&AF);
309    unsigned Size = OffsetToAlignment(Offset, AF.getAlignment());
310    // If we are padding with nops, force the padding to be larger than the
311    // minimum nop size.
312    if (Size > 0 && AF.hasEmitNops()) {
313      while (Size % getBackend().getMinimumNopSize())
314        Size += AF.getAlignment();
315    }
316    if (Size > AF.getMaxBytesToEmit())
317      return 0;
318    return Size;
319  }
320
321  case MCFragment::FT_Org: {
322    const MCOrgFragment &OF = cast<MCOrgFragment>(F);
323    MCValue Value;
324    if (!OF.getOffset().evaluateAsValue(Value, Layout)) {
325      getContext().reportError(OF.getLoc(),
326                               "expected assembly-time absolute expression");
327        return 0;
328    }
329
330    uint64_t FragmentOffset = Layout.getFragmentOffset(&OF);
331    int64_t TargetLocation = Value.getConstant();
332    if (const MCSymbolRefExpr *A = Value.getSymA()) {
333      uint64_t Val;
334      if (!Layout.getSymbolOffset(A->getSymbol(), Val)) {
335        getContext().reportError(OF.getLoc(), "expected absolute expression");
336        return 0;
337      }
338      TargetLocation += Val;
339    }
340    int64_t Size = TargetLocation - FragmentOffset;
341    if (Size < 0 || Size >= 0x40000000) {
342      getContext().reportError(
343          OF.getLoc(), "invalid .org offset '" + Twine(TargetLocation) +
344                           "' (at offset '" + Twine(FragmentOffset) + "')");
345      return 0;
346    }
347    return Size;
348  }
349
350  case MCFragment::FT_Dwarf:
351    return cast<MCDwarfLineAddrFragment>(F).getContents().size();
352  case MCFragment::FT_DwarfFrame:
353    return cast<MCDwarfCallFrameFragment>(F).getContents().size();
354  case MCFragment::FT_CVInlineLines:
355    return cast<MCCVInlineLineTableFragment>(F).getContents().size();
356  case MCFragment::FT_CVDefRange:
357    return cast<MCCVDefRangeFragment>(F).getContents().size();
358  case MCFragment::FT_Dummy:
359    llvm_unreachable("Should not have been added");
360  }
361
362  llvm_unreachable("invalid fragment kind");
363}
364
365void MCAsmLayout::layoutFragment(MCFragment *F) {
366  MCFragment *Prev = F->getPrevNode();
367
368  // We should never try to recompute something which is valid.
369  assert(!isFragmentValid(F) && "Attempt to recompute a valid fragment!");
370  // We should never try to compute the fragment layout if its predecessor
371  // isn't valid.
372  assert((!Prev || isFragmentValid(Prev)) &&
373         "Attempt to compute fragment before its predecessor!");
374
375  ++stats::FragmentLayouts;
376
377  // Compute fragment offset and size.
378  if (Prev)
379    F->Offset = Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev);
380  else
381    F->Offset = 0;
382  LastValidFragment[F->getParent()] = F;
383
384  // If bundling is enabled and this fragment has instructions in it, it has to
385  // obey the bundling restrictions. With padding, we'll have:
386  //
387  //
388  //        BundlePadding
389  //             |||
390  // -------------------------------------
391  //   Prev  |##########|       F        |
392  // -------------------------------------
393  //                    ^
394  //                    |
395  //                    F->Offset
396  //
397  // The fragment's offset will point to after the padding, and its computed
398  // size won't include the padding.
399  //
400  // When the -mc-relax-all flag is used, we optimize bundling by writting the
401  // padding directly into fragments when the instructions are emitted inside
402  // the streamer. When the fragment is larger than the bundle size, we need to
403  // ensure that it's bundle aligned. This means that if we end up with
404  // multiple fragments, we must emit bundle padding between fragments.
405  //
406  // ".align N" is an example of a directive that introduces multiple
407  // fragments. We could add a special case to handle ".align N" by emitting
408  // within-fragment padding (which would produce less padding when N is less
409  // than the bundle size), but for now we don't.
410  //
411  if (Assembler.isBundlingEnabled() && F->hasInstructions()) {
412    assert(isa<MCEncodedFragment>(F) &&
413           "Only MCEncodedFragment implementations have instructions");
414    uint64_t FSize = Assembler.computeFragmentSize(*this, *F);
415
416    if (!Assembler.getRelaxAll() && FSize > Assembler.getBundleAlignSize())
417      report_fatal_error("Fragment can't be larger than a bundle size");
418
419    uint64_t RequiredBundlePadding = computeBundlePadding(Assembler, F,
420                                                          F->Offset, FSize);
421    if (RequiredBundlePadding > UINT8_MAX)
422      report_fatal_error("Padding cannot exceed 255 bytes");
423    F->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding));
424    F->Offset += RequiredBundlePadding;
425  }
426}
427
428void MCAssembler::registerSymbol(const MCSymbol &Symbol, bool *Created) {
429  bool New = !Symbol.isRegistered();
430  if (Created)
431    *Created = New;
432  if (New) {
433    Symbol.setIsRegistered(true);
434    Symbols.push_back(&Symbol);
435  }
436}
437
438void MCAssembler::writeFragmentPadding(const MCFragment &F, uint64_t FSize,
439                                       MCObjectWriter *OW) const {
440  // Should NOP padding be written out before this fragment?
441  unsigned BundlePadding = F.getBundlePadding();
442  if (BundlePadding > 0) {
443    assert(isBundlingEnabled() &&
444           "Writing bundle padding with disabled bundling");
445    assert(F.hasInstructions() &&
446           "Writing bundle padding for a fragment without instructions");
447
448    unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize);
449    if (F.alignToBundleEnd() && TotalLength > getBundleAlignSize()) {
450      // If the padding itself crosses a bundle boundary, it must be emitted
451      // in 2 pieces, since even nop instructions must not cross boundaries.
452      //             v--------------v   <- BundleAlignSize
453      //        v---------v             <- BundlePadding
454      // ----------------------------
455      // | Prev |####|####|    F    |
456      // ----------------------------
457      //        ^-------------------^   <- TotalLength
458      unsigned DistanceToBoundary = TotalLength - getBundleAlignSize();
459      if (!getBackend().writeNopData(DistanceToBoundary, OW))
460          report_fatal_error("unable to write NOP sequence of " +
461                             Twine(DistanceToBoundary) + " bytes");
462      BundlePadding -= DistanceToBoundary;
463    }
464    if (!getBackend().writeNopData(BundlePadding, OW))
465      report_fatal_error("unable to write NOP sequence of " +
466                         Twine(BundlePadding) + " bytes");
467  }
468}
469
470/// \brief Write the fragment \p F to the output file.
471static void writeFragment(const MCAssembler &Asm, const MCAsmLayout &Layout,
472                          const MCFragment &F) {
473  MCObjectWriter *OW = &Asm.getWriter();
474
475  // FIXME: Embed in fragments instead?
476  uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F);
477
478  Asm.writeFragmentPadding(F, FragmentSize, OW);
479
480  // This variable (and its dummy usage) is to participate in the assert at
481  // the end of the function.
482  uint64_t Start = OW->getStream().tell();
483  (void) Start;
484
485  ++stats::EmittedFragments;
486
487  switch (F.getKind()) {
488  case MCFragment::FT_Align: {
489    ++stats::EmittedAlignFragments;
490    const MCAlignFragment &AF = cast<MCAlignFragment>(F);
491    assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!");
492
493    uint64_t Count = FragmentSize / AF.getValueSize();
494
495    // FIXME: This error shouldn't actually occur (the front end should emit
496    // multiple .align directives to enforce the semantics it wants), but is
497    // severe enough that we want to report it. How to handle this?
498    if (Count * AF.getValueSize() != FragmentSize)
499      report_fatal_error("undefined .align directive, value size '" +
500                        Twine(AF.getValueSize()) +
501                        "' is not a divisor of padding size '" +
502                        Twine(FragmentSize) + "'");
503
504    // See if we are aligning with nops, and if so do that first to try to fill
505    // the Count bytes.  Then if that did not fill any bytes or there are any
506    // bytes left to fill use the Value and ValueSize to fill the rest.
507    // If we are aligning with nops, ask that target to emit the right data.
508    if (AF.hasEmitNops()) {
509      if (!Asm.getBackend().writeNopData(Count, OW))
510        report_fatal_error("unable to write nop sequence of " +
511                          Twine(Count) + " bytes");
512      break;
513    }
514
515    // Otherwise, write out in multiples of the value size.
516    for (uint64_t i = 0; i != Count; ++i) {
517      switch (AF.getValueSize()) {
518      default: llvm_unreachable("Invalid size!");
519      case 1: OW->write8 (uint8_t (AF.getValue())); break;
520      case 2: OW->write16(uint16_t(AF.getValue())); break;
521      case 4: OW->write32(uint32_t(AF.getValue())); break;
522      case 8: OW->write64(uint64_t(AF.getValue())); break;
523      }
524    }
525    break;
526  }
527
528  case MCFragment::FT_Data:
529    ++stats::EmittedDataFragments;
530    OW->writeBytes(cast<MCDataFragment>(F).getContents());
531    break;
532
533  case MCFragment::FT_Relaxable:
534    ++stats::EmittedRelaxableFragments;
535    OW->writeBytes(cast<MCRelaxableFragment>(F).getContents());
536    break;
537
538  case MCFragment::FT_CompactEncodedInst:
539    ++stats::EmittedCompactEncodedInstFragments;
540    OW->writeBytes(cast<MCCompactEncodedInstFragment>(F).getContents());
541    break;
542
543  case MCFragment::FT_Fill: {
544    ++stats::EmittedFillFragments;
545    const MCFillFragment &FF = cast<MCFillFragment>(F);
546    uint8_t V = FF.getValue();
547    const unsigned MaxChunkSize = 16;
548    char Data[MaxChunkSize];
549    memcpy(Data, &V, 1);
550    for (unsigned I = 1; I < MaxChunkSize; ++I)
551      Data[I] = Data[0];
552
553    uint64_t Size = FragmentSize;
554    for (unsigned ChunkSize = MaxChunkSize; ChunkSize; ChunkSize /= 2) {
555      StringRef Ref(Data, ChunkSize);
556      for (uint64_t I = 0, E = Size / ChunkSize; I != E; ++I)
557        OW->writeBytes(Ref);
558      Size = Size % ChunkSize;
559    }
560    break;
561  }
562
563  case MCFragment::FT_LEB: {
564    const MCLEBFragment &LF = cast<MCLEBFragment>(F);
565    OW->writeBytes(LF.getContents());
566    break;
567  }
568
569  case MCFragment::FT_Padding: {
570    if (!Asm.getBackend().writeNopData(FragmentSize, OW))
571      report_fatal_error("unable to write nop sequence of " +
572                         Twine(FragmentSize) + " bytes");
573    break;
574  }
575
576  case MCFragment::FT_SymbolId: {
577    const MCSymbolIdFragment &SF = cast<MCSymbolIdFragment>(F);
578    OW->write32(SF.getSymbol()->getIndex());
579    break;
580  }
581
582  case MCFragment::FT_Org: {
583    ++stats::EmittedOrgFragments;
584    const MCOrgFragment &OF = cast<MCOrgFragment>(F);
585
586    for (uint64_t i = 0, e = FragmentSize; i != e; ++i)
587      OW->write8(uint8_t(OF.getValue()));
588
589    break;
590  }
591
592  case MCFragment::FT_Dwarf: {
593    const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F);
594    OW->writeBytes(OF.getContents());
595    break;
596  }
597  case MCFragment::FT_DwarfFrame: {
598    const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F);
599    OW->writeBytes(CF.getContents());
600    break;
601  }
602  case MCFragment::FT_CVInlineLines: {
603    const auto &OF = cast<MCCVInlineLineTableFragment>(F);
604    OW->writeBytes(OF.getContents());
605    break;
606  }
607  case MCFragment::FT_CVDefRange: {
608    const auto &DRF = cast<MCCVDefRangeFragment>(F);
609    OW->writeBytes(DRF.getContents());
610    break;
611  }
612  case MCFragment::FT_Dummy:
613    llvm_unreachable("Should not have been added");
614  }
615
616  assert(OW->getStream().tell() - Start == FragmentSize &&
617         "The stream should advance by fragment size");
618}
619
620void MCAssembler::writeSectionData(const MCSection *Sec,
621                                   const MCAsmLayout &Layout) const {
622  // Ignore virtual sections.
623  if (Sec->isVirtualSection()) {
624    assert(Layout.getSectionFileSize(Sec) == 0 && "Invalid size for section!");
625
626    // Check that contents are only things legal inside a virtual section.
627    for (const MCFragment &F : *Sec) {
628      switch (F.getKind()) {
629      default: llvm_unreachable("Invalid fragment in virtual section!");
630      case MCFragment::FT_Data: {
631        // Check that we aren't trying to write a non-zero contents (or fixups)
632        // into a virtual section. This is to support clients which use standard
633        // directives to fill the contents of virtual sections.
634        const MCDataFragment &DF = cast<MCDataFragment>(F);
635        if (DF.fixup_begin() != DF.fixup_end())
636          report_fatal_error("cannot have fixups in virtual section!");
637        for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i)
638          if (DF.getContents()[i]) {
639            if (auto *ELFSec = dyn_cast<const MCSectionELF>(Sec))
640              report_fatal_error("non-zero initializer found in section '" +
641                  ELFSec->getSectionName() + "'");
642            else
643              report_fatal_error("non-zero initializer found in virtual section");
644          }
645        break;
646      }
647      case MCFragment::FT_Align:
648        // Check that we aren't trying to write a non-zero value into a virtual
649        // section.
650        assert((cast<MCAlignFragment>(F).getValueSize() == 0 ||
651                cast<MCAlignFragment>(F).getValue() == 0) &&
652               "Invalid align in virtual section!");
653        break;
654      case MCFragment::FT_Fill:
655        assert((cast<MCFillFragment>(F).getValue() == 0) &&
656               "Invalid fill in virtual section!");
657        break;
658      }
659    }
660
661    return;
662  }
663
664  uint64_t Start = getWriter().getStream().tell();
665  (void)Start;
666
667  for (const MCFragment &F : *Sec)
668    writeFragment(*this, Layout, F);
669
670  assert(getWriter().getStream().tell() - Start ==
671         Layout.getSectionAddressSize(Sec));
672}
673
674std::tuple<MCValue, uint64_t, bool>
675MCAssembler::handleFixup(const MCAsmLayout &Layout, MCFragment &F,
676                         const MCFixup &Fixup) {
677  // Evaluate the fixup.
678  MCValue Target;
679  uint64_t FixedValue;
680  bool IsResolved = evaluateFixup(Layout, Fixup, &F, Target, FixedValue);
681  if (!IsResolved) {
682    // The fixup was unresolved, we need a relocation. Inform the object
683    // writer of the relocation, and give it an opportunity to adjust the
684    // fixup value if need be.
685    getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, FixedValue);
686  }
687  return std::make_tuple(Target, FixedValue, IsResolved);
688}
689
690void MCAssembler::layout(MCAsmLayout &Layout) {
691  DEBUG_WITH_TYPE("mc-dump", {
692      errs() << "assembler backend - pre-layout\n--\n";
693      dump(); });
694
695  // Create dummy fragments and assign section ordinals.
696  unsigned SectionIndex = 0;
697  for (MCSection &Sec : *this) {
698    // Create dummy fragments to eliminate any empty sections, this simplifies
699    // layout.
700    if (Sec.getFragmentList().empty())
701      new MCDataFragment(&Sec);
702
703    Sec.setOrdinal(SectionIndex++);
704  }
705
706  // Assign layout order indices to sections and fragments.
707  for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) {
708    MCSection *Sec = Layout.getSectionOrder()[i];
709    Sec->setLayoutOrder(i);
710
711    unsigned FragmentIndex = 0;
712    for (MCFragment &Frag : *Sec)
713      Frag.setLayoutOrder(FragmentIndex++);
714  }
715
716  // Layout until everything fits.
717  while (layoutOnce(Layout))
718    if (getContext().hadError())
719      return;
720
721  DEBUG_WITH_TYPE("mc-dump", {
722      errs() << "assembler backend - post-relaxation\n--\n";
723      dump(); });
724
725  // Finalize the layout, including fragment lowering.
726  finishLayout(Layout);
727
728  DEBUG_WITH_TYPE("mc-dump", {
729      errs() << "assembler backend - final-layout\n--\n";
730      dump(); });
731
732  // Allow the object writer a chance to perform post-layout binding (for
733  // example, to set the index fields in the symbol data).
734  getWriter().executePostLayoutBinding(*this, Layout);
735
736  // Evaluate and apply the fixups, generating relocation entries as necessary.
737  for (MCSection &Sec : *this) {
738    for (MCFragment &Frag : Sec) {
739      // Data and relaxable fragments both have fixups.  So only process
740      // those here.
741      // FIXME: Is there a better way to do this?  MCEncodedFragmentWithFixups
742      // being templated makes this tricky.
743      if (isa<MCEncodedFragment>(&Frag) &&
744          isa<MCCompactEncodedInstFragment>(&Frag))
745        continue;
746      if (!isa<MCEncodedFragment>(&Frag) && !isa<MCCVDefRangeFragment>(&Frag))
747        continue;
748      ArrayRef<MCFixup> Fixups;
749      MutableArrayRef<char> Contents;
750      if (auto *FragWithFixups = dyn_cast<MCDataFragment>(&Frag)) {
751        Fixups = FragWithFixups->getFixups();
752        Contents = FragWithFixups->getContents();
753      } else if (auto *FragWithFixups = dyn_cast<MCRelaxableFragment>(&Frag)) {
754        Fixups = FragWithFixups->getFixups();
755        Contents = FragWithFixups->getContents();
756      } else if (auto *FragWithFixups = dyn_cast<MCCVDefRangeFragment>(&Frag)) {
757        Fixups = FragWithFixups->getFixups();
758        Contents = FragWithFixups->getContents();
759      } else
760        llvm_unreachable("Unknown fragment with fixups!");
761      for (const MCFixup &Fixup : Fixups) {
762        uint64_t FixedValue;
763        bool IsResolved;
764        MCValue Target;
765        std::tie(Target, FixedValue, IsResolved) =
766            handleFixup(Layout, Frag, Fixup);
767        getBackend().applyFixup(*this, Fixup, Target, Contents, FixedValue,
768                                IsResolved);
769      }
770    }
771  }
772}
773
774void MCAssembler::Finish() {
775  // Create the layout object.
776  MCAsmLayout Layout(*this);
777  layout(Layout);
778
779  raw_ostream &OS = getWriter().getStream();
780  uint64_t StartOffset = OS.tell();
781
782  // Write the object file.
783  getWriter().writeObject(*this, Layout);
784
785  stats::ObjectBytes += OS.tell() - StartOffset;
786}
787
788bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup,
789                                       const MCRelaxableFragment *DF,
790                                       const MCAsmLayout &Layout) const {
791  MCValue Target;
792  uint64_t Value;
793  bool Resolved = evaluateFixup(Layout, Fixup, DF, Target, Value);
794  if (Target.getSymA() &&
795      Target.getSymA()->getKind() == MCSymbolRefExpr::VK_X86_ABS8 &&
796      Fixup.getKind() == FK_Data_1)
797    return false;
798  return getBackend().fixupNeedsRelaxationAdvanced(Fixup, Resolved, Value, DF,
799                                                   Layout);
800}
801
802bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F,
803                                          const MCAsmLayout &Layout) const {
804  // If this inst doesn't ever need relaxation, ignore it. This occurs when we
805  // are intentionally pushing out inst fragments, or because we relaxed a
806  // previous instruction to one that doesn't need relaxation.
807  if (!getBackend().mayNeedRelaxation(F->getInst()))
808    return false;
809
810  for (const MCFixup &Fixup : F->getFixups())
811    if (fixupNeedsRelaxation(Fixup, F, Layout))
812      return true;
813
814  return false;
815}
816
817bool MCAssembler::relaxInstruction(MCAsmLayout &Layout,
818                                   MCRelaxableFragment &F) {
819  if (!fragmentNeedsRelaxation(&F, Layout))
820    return false;
821
822  ++stats::RelaxedInstructions;
823
824  // FIXME-PERF: We could immediately lower out instructions if we can tell
825  // they are fully resolved, to avoid retesting on later passes.
826
827  // Relax the fragment.
828
829  MCInst Relaxed;
830  getBackend().relaxInstruction(F.getInst(), F.getSubtargetInfo(), Relaxed);
831
832  // Encode the new instruction.
833  //
834  // FIXME-PERF: If it matters, we could let the target do this. It can
835  // probably do so more efficiently in many cases.
836  SmallVector<MCFixup, 4> Fixups;
837  SmallString<256> Code;
838  raw_svector_ostream VecOS(Code);
839  getEmitter().encodeInstruction(Relaxed, VecOS, Fixups, F.getSubtargetInfo());
840
841  // Update the fragment.
842  F.setInst(Relaxed);
843  F.getContents() = Code;
844  F.getFixups() = Fixups;
845
846  return true;
847}
848
849bool MCAssembler::relaxPaddingFragment(MCAsmLayout &Layout,
850                                       MCPaddingFragment &PF) {
851  uint64_t OldSize = PF.getSize();
852  if (!getBackend().relaxFragment(&PF, Layout))
853    return false;
854  uint64_t NewSize = PF.getSize();
855
856  ++stats::PaddingFragmentsRelaxations;
857  stats::PaddingFragmentsBytes += NewSize;
858  stats::PaddingFragmentsBytes -= OldSize;
859  return true;
860}
861
862bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) {
863  uint64_t OldSize = LF.getContents().size();
864  int64_t Value;
865  bool Abs = LF.getValue().evaluateKnownAbsolute(Value, Layout);
866  if (!Abs)
867    report_fatal_error("sleb128 and uleb128 expressions must be absolute");
868  SmallString<8> &Data = LF.getContents();
869  Data.clear();
870  raw_svector_ostream OSE(Data);
871  if (LF.isSigned())
872    encodeSLEB128(Value, OSE);
873  else
874    encodeULEB128(Value, OSE);
875  return OldSize != LF.getContents().size();
876}
877
878bool MCAssembler::relaxDwarfLineAddr(MCAsmLayout &Layout,
879                                     MCDwarfLineAddrFragment &DF) {
880  MCContext &Context = Layout.getAssembler().getContext();
881  uint64_t OldSize = DF.getContents().size();
882  int64_t AddrDelta;
883  bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout);
884  assert(Abs && "We created a line delta with an invalid expression");
885  (void) Abs;
886  int64_t LineDelta;
887  LineDelta = DF.getLineDelta();
888  SmallString<8> &Data = DF.getContents();
889  Data.clear();
890  raw_svector_ostream OSE(Data);
891  MCDwarfLineAddr::Encode(Context, getDWARFLinetableParams(), LineDelta,
892                          AddrDelta, OSE);
893  return OldSize != Data.size();
894}
895
896bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout,
897                                              MCDwarfCallFrameFragment &DF) {
898  MCContext &Context = Layout.getAssembler().getContext();
899  uint64_t OldSize = DF.getContents().size();
900  int64_t AddrDelta;
901  bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout);
902  assert(Abs && "We created call frame with an invalid expression");
903  (void) Abs;
904  SmallString<8> &Data = DF.getContents();
905  Data.clear();
906  raw_svector_ostream OSE(Data);
907  MCDwarfFrameEmitter::EncodeAdvanceLoc(Context, AddrDelta, OSE);
908  return OldSize != Data.size();
909}
910
911bool MCAssembler::relaxCVInlineLineTable(MCAsmLayout &Layout,
912                                         MCCVInlineLineTableFragment &F) {
913  unsigned OldSize = F.getContents().size();
914  getContext().getCVContext().encodeInlineLineTable(Layout, F);
915  return OldSize != F.getContents().size();
916}
917
918bool MCAssembler::relaxCVDefRange(MCAsmLayout &Layout,
919                                  MCCVDefRangeFragment &F) {
920  unsigned OldSize = F.getContents().size();
921  getContext().getCVContext().encodeDefRange(Layout, F);
922  return OldSize != F.getContents().size();
923}
924
925bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout, MCSection &Sec) {
926  // Holds the first fragment which needed relaxing during this layout. It will
927  // remain NULL if none were relaxed.
928  // When a fragment is relaxed, all the fragments following it should get
929  // invalidated because their offset is going to change.
930  MCFragment *FirstRelaxedFragment = nullptr;
931
932  // Attempt to relax all the fragments in the section.
933  for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) {
934    // Check if this is a fragment that needs relaxation.
935    bool RelaxedFrag = false;
936    switch(I->getKind()) {
937    default:
938      break;
939    case MCFragment::FT_Relaxable:
940      assert(!getRelaxAll() &&
941             "Did not expect a MCRelaxableFragment in RelaxAll mode");
942      RelaxedFrag = relaxInstruction(Layout, *cast<MCRelaxableFragment>(I));
943      break;
944    case MCFragment::FT_Dwarf:
945      RelaxedFrag = relaxDwarfLineAddr(Layout,
946                                       *cast<MCDwarfLineAddrFragment>(I));
947      break;
948    case MCFragment::FT_DwarfFrame:
949      RelaxedFrag =
950        relaxDwarfCallFrameFragment(Layout,
951                                    *cast<MCDwarfCallFrameFragment>(I));
952      break;
953    case MCFragment::FT_LEB:
954      RelaxedFrag = relaxLEB(Layout, *cast<MCLEBFragment>(I));
955      break;
956    case MCFragment::FT_Padding:
957      RelaxedFrag = relaxPaddingFragment(Layout, *cast<MCPaddingFragment>(I));
958      break;
959    case MCFragment::FT_CVInlineLines:
960      RelaxedFrag =
961          relaxCVInlineLineTable(Layout, *cast<MCCVInlineLineTableFragment>(I));
962      break;
963    case MCFragment::FT_CVDefRange:
964      RelaxedFrag = relaxCVDefRange(Layout, *cast<MCCVDefRangeFragment>(I));
965      break;
966    }
967    if (RelaxedFrag && !FirstRelaxedFragment)
968      FirstRelaxedFragment = &*I;
969  }
970  if (FirstRelaxedFragment) {
971    Layout.invalidateFragmentsFrom(FirstRelaxedFragment);
972    return true;
973  }
974  return false;
975}
976
977bool MCAssembler::layoutOnce(MCAsmLayout &Layout) {
978  ++stats::RelaxationSteps;
979
980  bool WasRelaxed = false;
981  for (iterator it = begin(), ie = end(); it != ie; ++it) {
982    MCSection &Sec = *it;
983    while (layoutSectionOnce(Layout, Sec))
984      WasRelaxed = true;
985  }
986
987  return WasRelaxed;
988}
989
990void MCAssembler::finishLayout(MCAsmLayout &Layout) {
991  // The layout is done. Mark every fragment as valid.
992  for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) {
993    MCSection &Section = *Layout.getSectionOrder()[i];
994    Layout.getFragmentOffset(&*Section.rbegin());
995    computeFragmentSize(Layout, *Section.rbegin());
996  }
997  getBackend().finishLayout(*this, Layout);
998}
999