1//===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "MCTargetDesc/X86BaseInfo.h"
10#include "MCTargetDesc/X86FixupKinds.h"
11#include "llvm/ADT/StringSwitch.h"
12#include "llvm/BinaryFormat/ELF.h"
13#include "llvm/BinaryFormat/MachO.h"
14#include "llvm/MC/MCAsmBackend.h"
15#include "llvm/MC/MCAssembler.h"
16#include "llvm/MC/MCContext.h"
17#include "llvm/MC/MCDwarf.h"
18#include "llvm/MC/MCELFObjectWriter.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCFixupKindInfo.h"
21#include "llvm/MC/MCInst.h"
22#include "llvm/MC/MCInstrInfo.h"
23#include "llvm/MC/MCMachObjectWriter.h"
24#include "llvm/MC/MCObjectStreamer.h"
25#include "llvm/MC/MCObjectWriter.h"
26#include "llvm/MC/MCRegisterInfo.h"
27#include "llvm/MC/MCSectionMachO.h"
28#include "llvm/MC/MCSubtargetInfo.h"
29#include "llvm/MC/MCValue.h"
30#include "llvm/Support/CommandLine.h"
31#include "llvm/Support/ErrorHandling.h"
32#include "llvm/Support/TargetRegistry.h"
33#include "llvm/Support/raw_ostream.h"
34
35using namespace llvm;
36
37namespace {
38/// A wrapper for holding a mask of the values from X86::AlignBranchBoundaryKind
39class X86AlignBranchKind {
40private:
41  uint8_t AlignBranchKind = 0;
42
43public:
44  void operator=(const std::string &Val) {
45    if (Val.empty())
46      return;
47    SmallVector<StringRef, 6> BranchTypes;
48    StringRef(Val).split(BranchTypes, '+', -1, false);
49    for (auto BranchType : BranchTypes) {
50      if (BranchType == "fused")
51        addKind(X86::AlignBranchFused);
52      else if (BranchType == "jcc")
53        addKind(X86::AlignBranchJcc);
54      else if (BranchType == "jmp")
55        addKind(X86::AlignBranchJmp);
56      else if (BranchType == "call")
57        addKind(X86::AlignBranchCall);
58      else if (BranchType == "ret")
59        addKind(X86::AlignBranchRet);
60      else if (BranchType == "indirect")
61        addKind(X86::AlignBranchIndirect);
62      else {
63        report_fatal_error(
64            "'-x86-align-branch 'The branches's type is combination of jcc, "
65            "fused, jmp, call, ret, indirect.(plus separated)",
66            false);
67      }
68    }
69  }
70
71  operator uint8_t() const { return AlignBranchKind; }
72  void addKind(X86::AlignBranchBoundaryKind Value) { AlignBranchKind |= Value; }
73};
74
75X86AlignBranchKind X86AlignBranchKindLoc;
76
77cl::opt<unsigned> X86AlignBranchBoundary(
78    "x86-align-branch-boundary", cl::init(0),
79    cl::desc(
80        "Control how the assembler should align branches with NOP. If the "
81        "boundary's size is not 0, it should be a power of 2 and no less "
82        "than 32. Branches will be aligned to prevent from being across or "
83        "against the boundary of specified size. The default value 0 does not "
84        "align branches."));
85
86cl::opt<X86AlignBranchKind, true, cl::parser<std::string>> X86AlignBranch(
87    "x86-align-branch",
88    cl::desc(
89        "Specify types of branches to align. The branches's types are "
90        "combination of jcc, fused, jmp, call, ret, indirect. jcc indicates "
91        "conditional jumps, fused indicates fused conditional jumps, jmp "
92        "indicates unconditional jumps, call indicates direct and indirect "
93        "calls, ret indicates rets, indirect indicates indirect jumps."),
94    cl::value_desc("(plus separated list of types)"),
95    cl::location(X86AlignBranchKindLoc));
96
97cl::opt<bool> X86AlignBranchWithin32BBoundaries(
98    "x86-branches-within-32B-boundaries", cl::init(false),
99    cl::desc(
100        "Align selected instructions to mitigate negative performance impact "
101        "of Intel's micro code update for errata skx102.  May break "
102        "assumptions about labels corresponding to particular instructions, "
103        "and should be used with caution."));
104
105class X86ELFObjectWriter : public MCELFObjectTargetWriter {
106public:
107  X86ELFObjectWriter(bool is64Bit, uint8_t OSABI, uint16_t EMachine,
108                     bool HasRelocationAddend, bool foobar)
109    : MCELFObjectTargetWriter(is64Bit, OSABI, EMachine, HasRelocationAddend) {}
110};
111
112class X86AsmBackend : public MCAsmBackend {
113  const MCSubtargetInfo &STI;
114  std::unique_ptr<const MCInstrInfo> MCII;
115  X86AlignBranchKind AlignBranchType;
116  Align AlignBoundary;
117
118  bool isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const;
119
120  bool needAlign(MCObjectStreamer &OS) const;
121  bool needAlignInst(const MCInst &Inst) const;
122  MCBoundaryAlignFragment *
123  getOrCreateBoundaryAlignFragment(MCObjectStreamer &OS) const;
124  MCInst PrevInst;
125
126public:
127  X86AsmBackend(const Target &T, const MCSubtargetInfo &STI)
128      : MCAsmBackend(support::little), STI(STI),
129        MCII(T.createMCInstrInfo()) {
130    if (X86AlignBranchWithin32BBoundaries) {
131      // At the moment, this defaults to aligning fused branches, unconditional
132      // jumps, and (unfused) conditional jumps with nops.  Both the
133      // instructions aligned and the alignment method (nop vs prefix) may
134      // change in the future.
135      AlignBoundary = assumeAligned(32);;
136      AlignBranchType.addKind(X86::AlignBranchFused);
137      AlignBranchType.addKind(X86::AlignBranchJcc);
138      AlignBranchType.addKind(X86::AlignBranchJmp);
139    }
140    // Allow overriding defaults set by master flag
141    if (X86AlignBranchBoundary.getNumOccurrences())
142      AlignBoundary = assumeAligned(X86AlignBranchBoundary);
143    if (X86AlignBranch.getNumOccurrences())
144      AlignBranchType = X86AlignBranchKindLoc;
145  }
146
147  bool allowAutoPadding() const override;
148  void alignBranchesBegin(MCObjectStreamer &OS, const MCInst &Inst) override;
149  void alignBranchesEnd(MCObjectStreamer &OS, const MCInst &Inst) override;
150
151  unsigned getNumFixupKinds() const override {
152    return X86::NumTargetFixupKinds;
153  }
154
155  Optional<MCFixupKind> getFixupKind(StringRef Name) const override;
156
157  const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
158
159  bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
160                             const MCValue &Target) override;
161
162  void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
163                  const MCValue &Target, MutableArrayRef<char> Data,
164                  uint64_t Value, bool IsResolved,
165                  const MCSubtargetInfo *STI) const override;
166
167  bool mayNeedRelaxation(const MCInst &Inst,
168                         const MCSubtargetInfo &STI) const override;
169
170  bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
171                            const MCRelaxableFragment *DF,
172                            const MCAsmLayout &Layout) const override;
173
174  void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
175                        MCInst &Res) const override;
176
177  bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
178};
179} // end anonymous namespace
180
181static unsigned getRelaxedOpcodeBranch(const MCInst &Inst, bool is16BitMode) {
182  unsigned Op = Inst.getOpcode();
183  switch (Op) {
184  default:
185    return Op;
186  case X86::JCC_1:
187    return (is16BitMode) ? X86::JCC_2 : X86::JCC_4;
188  case X86::JMP_1:
189    return (is16BitMode) ? X86::JMP_2 : X86::JMP_4;
190  }
191}
192
193static unsigned getRelaxedOpcodeArith(const MCInst &Inst) {
194  unsigned Op = Inst.getOpcode();
195  switch (Op) {
196  default:
197    return Op;
198
199    // IMUL
200  case X86::IMUL16rri8: return X86::IMUL16rri;
201  case X86::IMUL16rmi8: return X86::IMUL16rmi;
202  case X86::IMUL32rri8: return X86::IMUL32rri;
203  case X86::IMUL32rmi8: return X86::IMUL32rmi;
204  case X86::IMUL64rri8: return X86::IMUL64rri32;
205  case X86::IMUL64rmi8: return X86::IMUL64rmi32;
206
207    // AND
208  case X86::AND16ri8: return X86::AND16ri;
209  case X86::AND16mi8: return X86::AND16mi;
210  case X86::AND32ri8: return X86::AND32ri;
211  case X86::AND32mi8: return X86::AND32mi;
212  case X86::AND64ri8: return X86::AND64ri32;
213  case X86::AND64mi8: return X86::AND64mi32;
214
215    // OR
216  case X86::OR16ri8: return X86::OR16ri;
217  case X86::OR16mi8: return X86::OR16mi;
218  case X86::OR32ri8: return X86::OR32ri;
219  case X86::OR32mi8: return X86::OR32mi;
220  case X86::OR64ri8: return X86::OR64ri32;
221  case X86::OR64mi8: return X86::OR64mi32;
222
223    // XOR
224  case X86::XOR16ri8: return X86::XOR16ri;
225  case X86::XOR16mi8: return X86::XOR16mi;
226  case X86::XOR32ri8: return X86::XOR32ri;
227  case X86::XOR32mi8: return X86::XOR32mi;
228  case X86::XOR64ri8: return X86::XOR64ri32;
229  case X86::XOR64mi8: return X86::XOR64mi32;
230
231    // ADD
232  case X86::ADD16ri8: return X86::ADD16ri;
233  case X86::ADD16mi8: return X86::ADD16mi;
234  case X86::ADD32ri8: return X86::ADD32ri;
235  case X86::ADD32mi8: return X86::ADD32mi;
236  case X86::ADD64ri8: return X86::ADD64ri32;
237  case X86::ADD64mi8: return X86::ADD64mi32;
238
239   // ADC
240  case X86::ADC16ri8: return X86::ADC16ri;
241  case X86::ADC16mi8: return X86::ADC16mi;
242  case X86::ADC32ri8: return X86::ADC32ri;
243  case X86::ADC32mi8: return X86::ADC32mi;
244  case X86::ADC64ri8: return X86::ADC64ri32;
245  case X86::ADC64mi8: return X86::ADC64mi32;
246
247    // SUB
248  case X86::SUB16ri8: return X86::SUB16ri;
249  case X86::SUB16mi8: return X86::SUB16mi;
250  case X86::SUB32ri8: return X86::SUB32ri;
251  case X86::SUB32mi8: return X86::SUB32mi;
252  case X86::SUB64ri8: return X86::SUB64ri32;
253  case X86::SUB64mi8: return X86::SUB64mi32;
254
255   // SBB
256  case X86::SBB16ri8: return X86::SBB16ri;
257  case X86::SBB16mi8: return X86::SBB16mi;
258  case X86::SBB32ri8: return X86::SBB32ri;
259  case X86::SBB32mi8: return X86::SBB32mi;
260  case X86::SBB64ri8: return X86::SBB64ri32;
261  case X86::SBB64mi8: return X86::SBB64mi32;
262
263    // CMP
264  case X86::CMP16ri8: return X86::CMP16ri;
265  case X86::CMP16mi8: return X86::CMP16mi;
266  case X86::CMP32ri8: return X86::CMP32ri;
267  case X86::CMP32mi8: return X86::CMP32mi;
268  case X86::CMP64ri8: return X86::CMP64ri32;
269  case X86::CMP64mi8: return X86::CMP64mi32;
270
271    // PUSH
272  case X86::PUSH32i8:  return X86::PUSHi32;
273  case X86::PUSH16i8:  return X86::PUSHi16;
274  case X86::PUSH64i8:  return X86::PUSH64i32;
275  }
276}
277
278static unsigned getRelaxedOpcode(const MCInst &Inst, bool is16BitMode) {
279  unsigned R = getRelaxedOpcodeArith(Inst);
280  if (R != Inst.getOpcode())
281    return R;
282  return getRelaxedOpcodeBranch(Inst, is16BitMode);
283}
284
285static X86::CondCode getCondFromBranch(const MCInst &MI,
286                                       const MCInstrInfo &MCII) {
287  unsigned Opcode = MI.getOpcode();
288  switch (Opcode) {
289  default:
290    return X86::COND_INVALID;
291  case X86::JCC_1: {
292    const MCInstrDesc &Desc = MCII.get(Opcode);
293    return static_cast<X86::CondCode>(
294        MI.getOperand(Desc.getNumOperands() - 1).getImm());
295  }
296  }
297}
298
299static X86::SecondMacroFusionInstKind
300classifySecondInstInMacroFusion(const MCInst &MI, const MCInstrInfo &MCII) {
301  X86::CondCode CC = getCondFromBranch(MI, MCII);
302  return classifySecondCondCodeInMacroFusion(CC);
303}
304
305/// Check if the instruction uses RIP relative addressing.
306static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII) {
307  unsigned Opcode = MI.getOpcode();
308  const MCInstrDesc &Desc = MCII.get(Opcode);
309  uint64_t TSFlags = Desc.TSFlags;
310  unsigned CurOp = X86II::getOperandBias(Desc);
311  int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
312  if (MemoryOperand < 0)
313    return false;
314  unsigned BaseRegNum = MemoryOperand + CurOp + X86::AddrBaseReg;
315  unsigned BaseReg = MI.getOperand(BaseRegNum).getReg();
316  return (BaseReg == X86::RIP);
317}
318
319/// Check if the instruction is valid as the first instruction in macro fusion.
320static bool isFirstMacroFusibleInst(const MCInst &Inst,
321                                    const MCInstrInfo &MCII) {
322  // An Intel instruction with RIP relative addressing is not macro fusible.
323  if (isRIPRelative(Inst, MCII))
324    return false;
325  X86::FirstMacroFusionInstKind FIK =
326      X86::classifyFirstOpcodeInMacroFusion(Inst.getOpcode());
327  return FIK != X86::FirstMacroFusionInstKind::Invalid;
328}
329
330/// Check if the two instructions will be macro-fused on the target cpu.
331bool X86AsmBackend::isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const {
332  const MCInstrDesc &InstDesc = MCII->get(Jcc.getOpcode());
333  if (!InstDesc.isConditionalBranch())
334    return false;
335  if (!isFirstMacroFusibleInst(Cmp, *MCII))
336    return false;
337  const X86::FirstMacroFusionInstKind CmpKind =
338      X86::classifyFirstOpcodeInMacroFusion(Cmp.getOpcode());
339  const X86::SecondMacroFusionInstKind BranchKind =
340      classifySecondInstInMacroFusion(Jcc, *MCII);
341  return X86::isMacroFused(CmpKind, BranchKind);
342}
343
344/// Check if the instruction has a variant symbol operand.
345static bool hasVariantSymbol(const MCInst &MI) {
346  for (auto &Operand : MI) {
347    if (!Operand.isExpr())
348      continue;
349    const MCExpr &Expr = *Operand.getExpr();
350    if (Expr.getKind() == MCExpr::SymbolRef &&
351        cast<MCSymbolRefExpr>(Expr).getKind() != MCSymbolRefExpr::VK_None)
352      return true;
353  }
354  return false;
355}
356
357bool X86AsmBackend::allowAutoPadding() const {
358  return (AlignBoundary != Align::None() &&
359          AlignBranchType != X86::AlignBranchNone);
360}
361
362bool X86AsmBackend::needAlign(MCObjectStreamer &OS) const {
363  if (!OS.getAllowAutoPadding())
364    return false;
365  assert(allowAutoPadding() && "incorrect initialization!");
366
367  MCAssembler &Assembler = OS.getAssembler();
368  MCSection *Sec = OS.getCurrentSectionOnly();
369  // To be Done: Currently don't deal with Bundle cases.
370  if (Assembler.isBundlingEnabled() && Sec->isBundleLocked())
371    return false;
372
373  // Branches only need to be aligned in 32-bit or 64-bit mode.
374  if (!(STI.hasFeature(X86::Mode64Bit) || STI.hasFeature(X86::Mode32Bit)))
375    return false;
376
377  return true;
378}
379
380/// Check if the instruction operand needs to be aligned. Padding is disabled
381/// before intruction which may be rewritten by linker(e.g. TLSCALL).
382bool X86AsmBackend::needAlignInst(const MCInst &Inst) const {
383  // Linker may rewrite the instruction with variant symbol operand.
384  if (hasVariantSymbol(Inst))
385    return false;
386
387  const MCInstrDesc &InstDesc = MCII->get(Inst.getOpcode());
388  return (InstDesc.isConditionalBranch() &&
389          (AlignBranchType & X86::AlignBranchJcc)) ||
390         (InstDesc.isUnconditionalBranch() &&
391          (AlignBranchType & X86::AlignBranchJmp)) ||
392         (InstDesc.isCall() &&
393          (AlignBranchType & X86::AlignBranchCall)) ||
394         (InstDesc.isReturn() &&
395          (AlignBranchType & X86::AlignBranchRet)) ||
396         (InstDesc.isIndirectBranch() &&
397          (AlignBranchType & X86::AlignBranchIndirect));
398}
399
400static bool canReuseBoundaryAlignFragment(const MCBoundaryAlignFragment &F) {
401  // If a MCBoundaryAlignFragment has not been used to emit NOP,we can reuse it.
402  return !F.canEmitNops();
403}
404
405MCBoundaryAlignFragment *
406X86AsmBackend::getOrCreateBoundaryAlignFragment(MCObjectStreamer &OS) const {
407  auto *F = dyn_cast_or_null<MCBoundaryAlignFragment>(OS.getCurrentFragment());
408  if (!F || !canReuseBoundaryAlignFragment(*F)) {
409    F = new MCBoundaryAlignFragment(AlignBoundary);
410    OS.insert(F);
411  }
412  return F;
413}
414
415/// Insert MCBoundaryAlignFragment before instructions to align branches.
416void X86AsmBackend::alignBranchesBegin(MCObjectStreamer &OS,
417                                       const MCInst &Inst) {
418  if (!needAlign(OS))
419    return;
420
421  MCFragment *CF = OS.getCurrentFragment();
422  bool NeedAlignFused = AlignBranchType & X86::AlignBranchFused;
423  if (NeedAlignFused && isMacroFused(PrevInst, Inst) && CF) {
424    // Macro fusion actually happens and there is no other fragment inserted
425    // after the previous instruction. NOP can be emitted in PF to align fused
426    // jcc.
427    if (auto *PF =
428            dyn_cast_or_null<MCBoundaryAlignFragment>(CF->getPrevNode())) {
429      const_cast<MCBoundaryAlignFragment *>(PF)->setEmitNops(true);
430      const_cast<MCBoundaryAlignFragment *>(PF)->setFused(true);
431    }
432  } else if (needAlignInst(Inst)) {
433    // Note: When there is at least one fragment, such as MCAlignFragment,
434    // inserted after the previous instruction, e.g.
435    //
436    // \code
437    //   cmp %rax %rcx
438    //   .align 16
439    //   je .Label0
440    // \ endcode
441    //
442    // We will treat the JCC as a unfused branch although it may be fused
443    // with the CMP.
444    auto *F = getOrCreateBoundaryAlignFragment(OS);
445    F->setEmitNops(true);
446    F->setFused(false);
447  } else if (NeedAlignFused && isFirstMacroFusibleInst(Inst, *MCII)) {
448    // We don't know if macro fusion happens until the reaching the next
449    // instruction, so a place holder is put here if necessary.
450    getOrCreateBoundaryAlignFragment(OS);
451  }
452
453  PrevInst = Inst;
454}
455
456/// Insert a MCBoundaryAlignFragment to mark the end of the branch to be aligned
457/// if necessary.
458void X86AsmBackend::alignBranchesEnd(MCObjectStreamer &OS, const MCInst &Inst) {
459  if (!needAlign(OS))
460    return;
461  // If the branch is emitted into a MCRelaxableFragment, we can determine the
462  // size of the branch easily in MCAssembler::relaxBoundaryAlign. When the
463  // branch is fused, the fused branch(macro fusion pair) must be emitted into
464  // two fragments. Or when the branch is unfused, the branch must be emitted
465  // into one fragment. The MCRelaxableFragment naturally marks the end of the
466  // fused or unfused branch.
467  // Otherwise, we need to insert a MCBoundaryAlignFragment to mark the end of
468  // the branch. This MCBoundaryAlignFragment may be reused to emit NOP to align
469  // other branch.
470  if (needAlignInst(Inst) && !isa<MCRelaxableFragment>(OS.getCurrentFragment()))
471    OS.insert(new MCBoundaryAlignFragment(AlignBoundary));
472
473  // Update the maximum alignment on the current section if necessary.
474  MCSection *Sec = OS.getCurrentSectionOnly();
475  if (AlignBoundary.value() > Sec->getAlignment())
476    Sec->setAlignment(AlignBoundary);
477}
478
479Optional<MCFixupKind> X86AsmBackend::getFixupKind(StringRef Name) const {
480  if (STI.getTargetTriple().isOSBinFormatELF()) {
481    if (STI.getTargetTriple().getArch() == Triple::x86_64) {
482      if (Name == "R_X86_64_NONE")
483        return FK_NONE;
484    } else {
485      if (Name == "R_386_NONE")
486        return FK_NONE;
487    }
488  }
489  return MCAsmBackend::getFixupKind(Name);
490}
491
492const MCFixupKindInfo &X86AsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
493  const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
494      {"reloc_riprel_4byte", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
495      {"reloc_riprel_4byte_movq_load", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
496      {"reloc_riprel_4byte_relax", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
497      {"reloc_riprel_4byte_relax_rex", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
498      {"reloc_signed_4byte", 0, 32, 0},
499      {"reloc_signed_4byte_relax", 0, 32, 0},
500      {"reloc_global_offset_table", 0, 32, 0},
501      {"reloc_global_offset_table8", 0, 64, 0},
502      {"reloc_branch_4byte_pcrel", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
503  };
504
505  if (Kind < FirstTargetFixupKind)
506    return MCAsmBackend::getFixupKindInfo(Kind);
507
508  assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
509         "Invalid kind!");
510  assert(Infos[Kind - FirstTargetFixupKind].Name && "Empty fixup name!");
511  return Infos[Kind - FirstTargetFixupKind];
512}
513
514bool X86AsmBackend::shouldForceRelocation(const MCAssembler &,
515                                          const MCFixup &Fixup,
516                                          const MCValue &) {
517  return Fixup.getKind() == FK_NONE;
518}
519
520static unsigned getFixupKindSize(unsigned Kind) {
521  switch (Kind) {
522  default:
523    llvm_unreachable("invalid fixup kind!");
524  case FK_NONE:
525    return 0;
526  case FK_PCRel_1:
527  case FK_SecRel_1:
528  case FK_Data_1:
529    return 1;
530  case FK_PCRel_2:
531  case FK_SecRel_2:
532  case FK_Data_2:
533    return 2;
534  case FK_PCRel_4:
535  case X86::reloc_riprel_4byte:
536  case X86::reloc_riprel_4byte_relax:
537  case X86::reloc_riprel_4byte_relax_rex:
538  case X86::reloc_riprel_4byte_movq_load:
539  case X86::reloc_signed_4byte:
540  case X86::reloc_signed_4byte_relax:
541  case X86::reloc_global_offset_table:
542  case X86::reloc_branch_4byte_pcrel:
543  case FK_SecRel_4:
544  case FK_Data_4:
545    return 4;
546  case FK_PCRel_8:
547  case FK_SecRel_8:
548  case FK_Data_8:
549  case X86::reloc_global_offset_table8:
550    return 8;
551  }
552}
553
554void X86AsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
555                               const MCValue &Target,
556                               MutableArrayRef<char> Data,
557                               uint64_t Value, bool IsResolved,
558                               const MCSubtargetInfo *STI) const {
559  unsigned Size = getFixupKindSize(Fixup.getKind());
560
561  assert(Fixup.getOffset() + Size <= Data.size() && "Invalid fixup offset!");
562
563  int64_t SignedValue = static_cast<int64_t>(Value);
564  if ((Target.isAbsolute() || IsResolved) &&
565      getFixupKindInfo(Fixup.getKind()).Flags &
566      MCFixupKindInfo::FKF_IsPCRel) {
567    // check that PC relative fixup fits into the fixup size.
568    if (Size > 0 && !isIntN(Size * 8, SignedValue))
569      Asm.getContext().reportError(
570                                   Fixup.getLoc(), "value of " + Twine(SignedValue) +
571                                   " is too large for field of " + Twine(Size) +
572                                   ((Size == 1) ? " byte." : " bytes."));
573  } else {
574    // Check that uppper bits are either all zeros or all ones.
575    // Specifically ignore overflow/underflow as long as the leakage is
576    // limited to the lower bits. This is to remain compatible with
577    // other assemblers.
578    assert((Size == 0 || isIntN(Size * 8 + 1, SignedValue)) &&
579           "Value does not fit in the Fixup field");
580  }
581
582  for (unsigned i = 0; i != Size; ++i)
583    Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
584}
585
586bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst,
587                                      const MCSubtargetInfo &STI) const {
588  // Branches can always be relaxed in either mode.
589  if (getRelaxedOpcodeBranch(Inst, false) != Inst.getOpcode())
590    return true;
591
592  // Check if this instruction is ever relaxable.
593  if (getRelaxedOpcodeArith(Inst) == Inst.getOpcode())
594    return false;
595
596
597  // Check if the relaxable operand has an expression. For the current set of
598  // relaxable instructions, the relaxable operand is always the last operand.
599  unsigned RelaxableOp = Inst.getNumOperands() - 1;
600  if (Inst.getOperand(RelaxableOp).isExpr())
601    return true;
602
603  return false;
604}
605
606bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
607                                         uint64_t Value,
608                                         const MCRelaxableFragment *DF,
609                                         const MCAsmLayout &Layout) const {
610  // Relax if the value is too big for a (signed) i8.
611  return !isInt<8>(Value);
612}
613
614// FIXME: Can tblgen help at all here to verify there aren't other instructions
615// we can relax?
616void X86AsmBackend::relaxInstruction(const MCInst &Inst,
617                                     const MCSubtargetInfo &STI,
618                                     MCInst &Res) const {
619  // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
620  bool is16BitMode = STI.getFeatureBits()[X86::Mode16Bit];
621  unsigned RelaxedOp = getRelaxedOpcode(Inst, is16BitMode);
622
623  if (RelaxedOp == Inst.getOpcode()) {
624    SmallString<256> Tmp;
625    raw_svector_ostream OS(Tmp);
626    Inst.dump_pretty(OS);
627    OS << "\n";
628    report_fatal_error("unexpected instruction to relax: " + OS.str());
629  }
630
631  Res = Inst;
632  Res.setOpcode(RelaxedOp);
633}
634
635/// Write a sequence of optimal nops to the output, covering \p Count
636/// bytes.
637/// \return - true on success, false on failure
638bool X86AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
639  static const char Nops[10][11] = {
640    // nop
641    "\x90",
642    // xchg %ax,%ax
643    "\x66\x90",
644    // nopl (%[re]ax)
645    "\x0f\x1f\x00",
646    // nopl 0(%[re]ax)
647    "\x0f\x1f\x40\x00",
648    // nopl 0(%[re]ax,%[re]ax,1)
649    "\x0f\x1f\x44\x00\x00",
650    // nopw 0(%[re]ax,%[re]ax,1)
651    "\x66\x0f\x1f\x44\x00\x00",
652    // nopl 0L(%[re]ax)
653    "\x0f\x1f\x80\x00\x00\x00\x00",
654    // nopl 0L(%[re]ax,%[re]ax,1)
655    "\x0f\x1f\x84\x00\x00\x00\x00\x00",
656    // nopw 0L(%[re]ax,%[re]ax,1)
657    "\x66\x0f\x1f\x84\x00\x00\x00\x00\x00",
658    // nopw %cs:0L(%[re]ax,%[re]ax,1)
659    "\x66\x2e\x0f\x1f\x84\x00\x00\x00\x00\x00",
660  };
661
662  // This CPU doesn't support long nops. If needed add more.
663  // FIXME: We could generated something better than plain 0x90.
664  if (!STI.getFeatureBits()[X86::FeatureNOPL]) {
665    for (uint64_t i = 0; i < Count; ++i)
666      OS << '\x90';
667    return true;
668  }
669
670  // 15-bytes is the longest single NOP instruction, but 10-bytes is
671  // commonly the longest that can be efficiently decoded.
672  uint64_t MaxNopLength = 10;
673  if (STI.getFeatureBits()[X86::ProcIntelSLM])
674    MaxNopLength = 7;
675  else if (STI.getFeatureBits()[X86::FeatureFast15ByteNOP])
676    MaxNopLength = 15;
677  else if (STI.getFeatureBits()[X86::FeatureFast11ByteNOP])
678    MaxNopLength = 11;
679
680  // Emit as many MaxNopLength NOPs as needed, then emit a NOP of the remaining
681  // length.
682  do {
683    const uint8_t ThisNopLength = (uint8_t) std::min(Count, MaxNopLength);
684    const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10;
685    for (uint8_t i = 0; i < Prefixes; i++)
686      OS << '\x66';
687    const uint8_t Rest = ThisNopLength - Prefixes;
688    if (Rest != 0)
689      OS.write(Nops[Rest - 1], Rest);
690    Count -= ThisNopLength;
691  } while (Count != 0);
692
693  return true;
694}
695
696/* *** */
697
698namespace {
699
700class ELFX86AsmBackend : public X86AsmBackend {
701public:
702  uint8_t OSABI;
703  ELFX86AsmBackend(const Target &T, uint8_t OSABI, const MCSubtargetInfo &STI)
704      : X86AsmBackend(T, STI), OSABI(OSABI) {}
705};
706
707class ELFX86_32AsmBackend : public ELFX86AsmBackend {
708public:
709  ELFX86_32AsmBackend(const Target &T, uint8_t OSABI,
710                      const MCSubtargetInfo &STI)
711    : ELFX86AsmBackend(T, OSABI, STI) {}
712
713  std::unique_ptr<MCObjectTargetWriter>
714  createObjectTargetWriter() const override {
715    return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI, ELF::EM_386);
716  }
717};
718
719class ELFX86_X32AsmBackend : public ELFX86AsmBackend {
720public:
721  ELFX86_X32AsmBackend(const Target &T, uint8_t OSABI,
722                       const MCSubtargetInfo &STI)
723      : ELFX86AsmBackend(T, OSABI, STI) {}
724
725  std::unique_ptr<MCObjectTargetWriter>
726  createObjectTargetWriter() const override {
727    return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
728                                    ELF::EM_X86_64);
729  }
730};
731
732class ELFX86_IAMCUAsmBackend : public ELFX86AsmBackend {
733public:
734  ELFX86_IAMCUAsmBackend(const Target &T, uint8_t OSABI,
735                         const MCSubtargetInfo &STI)
736      : ELFX86AsmBackend(T, OSABI, STI) {}
737
738  std::unique_ptr<MCObjectTargetWriter>
739  createObjectTargetWriter() const override {
740    return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
741                                    ELF::EM_IAMCU);
742  }
743};
744
745class ELFX86_64AsmBackend : public ELFX86AsmBackend {
746public:
747  ELFX86_64AsmBackend(const Target &T, uint8_t OSABI,
748                      const MCSubtargetInfo &STI)
749    : ELFX86AsmBackend(T, OSABI, STI) {}
750
751  std::unique_ptr<MCObjectTargetWriter>
752  createObjectTargetWriter() const override {
753    return createX86ELFObjectWriter(/*IsELF64*/ true, OSABI, ELF::EM_X86_64);
754  }
755};
756
757class WindowsX86AsmBackend : public X86AsmBackend {
758  bool Is64Bit;
759
760public:
761  WindowsX86AsmBackend(const Target &T, bool is64Bit,
762                       const MCSubtargetInfo &STI)
763    : X86AsmBackend(T, STI)
764    , Is64Bit(is64Bit) {
765  }
766
767  Optional<MCFixupKind> getFixupKind(StringRef Name) const override {
768    return StringSwitch<Optional<MCFixupKind>>(Name)
769        .Case("dir32", FK_Data_4)
770        .Case("secrel32", FK_SecRel_4)
771        .Case("secidx", FK_SecRel_2)
772        .Default(MCAsmBackend::getFixupKind(Name));
773  }
774
775  std::unique_ptr<MCObjectTargetWriter>
776  createObjectTargetWriter() const override {
777    return createX86WinCOFFObjectWriter(Is64Bit);
778  }
779};
780
781namespace CU {
782
783  /// Compact unwind encoding values.
784  enum CompactUnwindEncodings {
785    /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
786    /// the return address, then [RE]SP is moved to [RE]BP.
787    UNWIND_MODE_BP_FRAME                   = 0x01000000,
788
789    /// A frameless function with a small constant stack size.
790    UNWIND_MODE_STACK_IMMD                 = 0x02000000,
791
792    /// A frameless function with a large constant stack size.
793    UNWIND_MODE_STACK_IND                  = 0x03000000,
794
795    /// No compact unwind encoding is available.
796    UNWIND_MODE_DWARF                      = 0x04000000,
797
798    /// Mask for encoding the frame registers.
799    UNWIND_BP_FRAME_REGISTERS              = 0x00007FFF,
800
801    /// Mask for encoding the frameless registers.
802    UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
803  };
804
805} // end CU namespace
806
807class DarwinX86AsmBackend : public X86AsmBackend {
808  const MCRegisterInfo &MRI;
809
810  /// Number of registers that can be saved in a compact unwind encoding.
811  enum { CU_NUM_SAVED_REGS = 6 };
812
813  mutable unsigned SavedRegs[CU_NUM_SAVED_REGS];
814  bool Is64Bit;
815
816  unsigned OffsetSize;                   ///< Offset of a "push" instruction.
817  unsigned MoveInstrSize;                ///< Size of a "move" instruction.
818  unsigned StackDivide;                  ///< Amount to adjust stack size by.
819protected:
820  /// Size of a "push" instruction for the given register.
821  unsigned PushInstrSize(unsigned Reg) const {
822    switch (Reg) {
823      case X86::EBX:
824      case X86::ECX:
825      case X86::EDX:
826      case X86::EDI:
827      case X86::ESI:
828      case X86::EBP:
829      case X86::RBX:
830      case X86::RBP:
831        return 1;
832      case X86::R12:
833      case X86::R13:
834      case X86::R14:
835      case X86::R15:
836        return 2;
837    }
838    return 1;
839  }
840
841  /// Implementation of algorithm to generate the compact unwind encoding
842  /// for the CFI instructions.
843  uint32_t
844  generateCompactUnwindEncodingImpl(ArrayRef<MCCFIInstruction> Instrs) const {
845    if (Instrs.empty()) return 0;
846
847    // Reset the saved registers.
848    unsigned SavedRegIdx = 0;
849    memset(SavedRegs, 0, sizeof(SavedRegs));
850
851    bool HasFP = false;
852
853    // Encode that we are using EBP/RBP as the frame pointer.
854    uint32_t CompactUnwindEncoding = 0;
855
856    unsigned SubtractInstrIdx = Is64Bit ? 3 : 2;
857    unsigned InstrOffset = 0;
858    unsigned StackAdjust = 0;
859    unsigned StackSize = 0;
860    unsigned NumDefCFAOffsets = 0;
861
862    for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
863      const MCCFIInstruction &Inst = Instrs[i];
864
865      switch (Inst.getOperation()) {
866      default:
867        // Any other CFI directives indicate a frame that we aren't prepared
868        // to represent via compact unwind, so just bail out.
869        return 0;
870      case MCCFIInstruction::OpDefCfaRegister: {
871        // Defines a frame pointer. E.g.
872        //
873        //     movq %rsp, %rbp
874        //  L0:
875        //     .cfi_def_cfa_register %rbp
876        //
877        HasFP = true;
878
879        // If the frame pointer is other than esp/rsp, we do not have a way to
880        // generate a compact unwinding representation, so bail out.
881        if (*MRI.getLLVMRegNum(Inst.getRegister(), true) !=
882            (Is64Bit ? X86::RBP : X86::EBP))
883          return 0;
884
885        // Reset the counts.
886        memset(SavedRegs, 0, sizeof(SavedRegs));
887        StackAdjust = 0;
888        SavedRegIdx = 0;
889        InstrOffset += MoveInstrSize;
890        break;
891      }
892      case MCCFIInstruction::OpDefCfaOffset: {
893        // Defines a new offset for the CFA. E.g.
894        //
895        //  With frame:
896        //
897        //     pushq %rbp
898        //  L0:
899        //     .cfi_def_cfa_offset 16
900        //
901        //  Without frame:
902        //
903        //     subq $72, %rsp
904        //  L0:
905        //     .cfi_def_cfa_offset 80
906        //
907        StackSize = std::abs(Inst.getOffset()) / StackDivide;
908        ++NumDefCFAOffsets;
909        break;
910      }
911      case MCCFIInstruction::OpOffset: {
912        // Defines a "push" of a callee-saved register. E.g.
913        //
914        //     pushq %r15
915        //     pushq %r14
916        //     pushq %rbx
917        //  L0:
918        //     subq $120, %rsp
919        //  L1:
920        //     .cfi_offset %rbx, -40
921        //     .cfi_offset %r14, -32
922        //     .cfi_offset %r15, -24
923        //
924        if (SavedRegIdx == CU_NUM_SAVED_REGS)
925          // If there are too many saved registers, we cannot use a compact
926          // unwind encoding.
927          return CU::UNWIND_MODE_DWARF;
928
929        unsigned Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
930        SavedRegs[SavedRegIdx++] = Reg;
931        StackAdjust += OffsetSize;
932        InstrOffset += PushInstrSize(Reg);
933        break;
934      }
935      }
936    }
937
938    StackAdjust /= StackDivide;
939
940    if (HasFP) {
941      if ((StackAdjust & 0xFF) != StackAdjust)
942        // Offset was too big for a compact unwind encoding.
943        return CU::UNWIND_MODE_DWARF;
944
945      // Get the encoding of the saved registers when we have a frame pointer.
946      uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame();
947      if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
948
949      CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME;
950      CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
951      CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
952    } else {
953      SubtractInstrIdx += InstrOffset;
954      ++StackAdjust;
955
956      if ((StackSize & 0xFF) == StackSize) {
957        // Frameless stack with a small stack size.
958        CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD;
959
960        // Encode the stack size.
961        CompactUnwindEncoding |= (StackSize & 0xFF) << 16;
962      } else {
963        if ((StackAdjust & 0x7) != StackAdjust)
964          // The extra stack adjustments are too big for us to handle.
965          return CU::UNWIND_MODE_DWARF;
966
967        // Frameless stack with an offset too large for us to encode compactly.
968        CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND;
969
970        // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
971        // instruction.
972        CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
973
974        // Encode any extra stack adjustments (done via push instructions).
975        CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
976      }
977
978      // Encode the number of registers saved. (Reverse the list first.)
979      std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]);
980      CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
981
982      // Get the encoding of the saved registers when we don't have a frame
983      // pointer.
984      uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx);
985      if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
986
987      // Encode the register encoding.
988      CompactUnwindEncoding |=
989        RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION;
990    }
991
992    return CompactUnwindEncoding;
993  }
994
995private:
996  /// Get the compact unwind number for a given register. The number
997  /// corresponds to the enum lists in compact_unwind_encoding.h.
998  int getCompactUnwindRegNum(unsigned Reg) const {
999    static const MCPhysReg CU32BitRegs[7] = {
1000      X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
1001    };
1002    static const MCPhysReg CU64BitRegs[] = {
1003      X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
1004    };
1005    const MCPhysReg *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs;
1006    for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
1007      if (*CURegs == Reg)
1008        return Idx;
1009
1010    return -1;
1011  }
1012
1013  /// Return the registers encoded for a compact encoding with a frame
1014  /// pointer.
1015  uint32_t encodeCompactUnwindRegistersWithFrame() const {
1016    // Encode the registers in the order they were saved --- 3-bits per
1017    // register. The list of saved registers is assumed to be in reverse
1018    // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS.
1019    uint32_t RegEnc = 0;
1020    for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) {
1021      unsigned Reg = SavedRegs[i];
1022      if (Reg == 0) break;
1023
1024      int CURegNum = getCompactUnwindRegNum(Reg);
1025      if (CURegNum == -1) return ~0U;
1026
1027      // Encode the 3-bit register number in order, skipping over 3-bits for
1028      // each register.
1029      RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
1030    }
1031
1032    assert((RegEnc & 0x3FFFF) == RegEnc &&
1033           "Invalid compact register encoding!");
1034    return RegEnc;
1035  }
1036
1037  /// Create the permutation encoding used with frameless stacks. It is
1038  /// passed the number of registers to be saved and an array of the registers
1039  /// saved.
1040  uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const {
1041    // The saved registers are numbered from 1 to 6. In order to encode the
1042    // order in which they were saved, we re-number them according to their
1043    // place in the register order. The re-numbering is relative to the last
1044    // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in
1045    // that order:
1046    //
1047    //    Orig  Re-Num
1048    //    ----  ------
1049    //     6       6
1050    //     2       2
1051    //     4       3
1052    //     5       3
1053    //
1054    for (unsigned i = 0; i < RegCount; ++i) {
1055      int CUReg = getCompactUnwindRegNum(SavedRegs[i]);
1056      if (CUReg == -1) return ~0U;
1057      SavedRegs[i] = CUReg;
1058    }
1059
1060    // Reverse the list.
1061    std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]);
1062
1063    uint32_t RenumRegs[CU_NUM_SAVED_REGS];
1064    for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){
1065      unsigned Countless = 0;
1066      for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
1067        if (SavedRegs[j] < SavedRegs[i])
1068          ++Countless;
1069
1070      RenumRegs[i] = SavedRegs[i] - Countless - 1;
1071    }
1072
1073    // Take the renumbered values and encode them into a 10-bit number.
1074    uint32_t permutationEncoding = 0;
1075    switch (RegCount) {
1076    case 6:
1077      permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
1078                             + 6 * RenumRegs[2] +  2 * RenumRegs[3]
1079                             +     RenumRegs[4];
1080      break;
1081    case 5:
1082      permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
1083                             + 6 * RenumRegs[3] +  2 * RenumRegs[4]
1084                             +     RenumRegs[5];
1085      break;
1086    case 4:
1087      permutationEncoding |=  60 * RenumRegs[2] + 12 * RenumRegs[3]
1088                             + 3 * RenumRegs[4] +      RenumRegs[5];
1089      break;
1090    case 3:
1091      permutationEncoding |=  20 * RenumRegs[3] +  4 * RenumRegs[4]
1092                             +     RenumRegs[5];
1093      break;
1094    case 2:
1095      permutationEncoding |=   5 * RenumRegs[4] +      RenumRegs[5];
1096      break;
1097    case 1:
1098      permutationEncoding |=       RenumRegs[5];
1099      break;
1100    }
1101
1102    assert((permutationEncoding & 0x3FF) == permutationEncoding &&
1103           "Invalid compact register encoding!");
1104    return permutationEncoding;
1105  }
1106
1107public:
1108  DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI,
1109                      const MCSubtargetInfo &STI, bool Is64Bit)
1110    : X86AsmBackend(T, STI), MRI(MRI), Is64Bit(Is64Bit) {
1111    memset(SavedRegs, 0, sizeof(SavedRegs));
1112    OffsetSize = Is64Bit ? 8 : 4;
1113    MoveInstrSize = Is64Bit ? 3 : 2;
1114    StackDivide = Is64Bit ? 8 : 4;
1115  }
1116};
1117
1118class DarwinX86_32AsmBackend : public DarwinX86AsmBackend {
1119public:
1120  DarwinX86_32AsmBackend(const Target &T, const MCRegisterInfo &MRI,
1121                         const MCSubtargetInfo &STI)
1122      : DarwinX86AsmBackend(T, MRI, STI, false) {}
1123
1124  std::unique_ptr<MCObjectTargetWriter>
1125  createObjectTargetWriter() const override {
1126    return createX86MachObjectWriter(/*Is64Bit=*/false,
1127                                     MachO::CPU_TYPE_I386,
1128                                     MachO::CPU_SUBTYPE_I386_ALL);
1129  }
1130
1131  /// Generate the compact unwind encoding for the CFI instructions.
1132  uint32_t generateCompactUnwindEncoding(
1133                             ArrayRef<MCCFIInstruction> Instrs) const override {
1134    return generateCompactUnwindEncodingImpl(Instrs);
1135  }
1136};
1137
1138class DarwinX86_64AsmBackend : public DarwinX86AsmBackend {
1139  const MachO::CPUSubTypeX86 Subtype;
1140public:
1141  DarwinX86_64AsmBackend(const Target &T, const MCRegisterInfo &MRI,
1142                         const MCSubtargetInfo &STI, MachO::CPUSubTypeX86 st)
1143      : DarwinX86AsmBackend(T, MRI, STI, true), Subtype(st) {}
1144
1145  std::unique_ptr<MCObjectTargetWriter>
1146  createObjectTargetWriter() const override {
1147    return createX86MachObjectWriter(/*Is64Bit=*/true, MachO::CPU_TYPE_X86_64,
1148                                     Subtype);
1149  }
1150
1151  /// Generate the compact unwind encoding for the CFI instructions.
1152  uint32_t generateCompactUnwindEncoding(
1153                             ArrayRef<MCCFIInstruction> Instrs) const override {
1154    return generateCompactUnwindEncodingImpl(Instrs);
1155  }
1156};
1157
1158} // end anonymous namespace
1159
1160MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
1161                                           const MCSubtargetInfo &STI,
1162                                           const MCRegisterInfo &MRI,
1163                                           const MCTargetOptions &Options) {
1164  const Triple &TheTriple = STI.getTargetTriple();
1165  if (TheTriple.isOSBinFormatMachO())
1166    return new DarwinX86_32AsmBackend(T, MRI, STI);
1167
1168  if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1169    return new WindowsX86AsmBackend(T, false, STI);
1170
1171  uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1172
1173  if (TheTriple.isOSIAMCU())
1174    return new ELFX86_IAMCUAsmBackend(T, OSABI, STI);
1175
1176  return new ELFX86_32AsmBackend(T, OSABI, STI);
1177}
1178
1179MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T,
1180                                           const MCSubtargetInfo &STI,
1181                                           const MCRegisterInfo &MRI,
1182                                           const MCTargetOptions &Options) {
1183  const Triple &TheTriple = STI.getTargetTriple();
1184  if (TheTriple.isOSBinFormatMachO()) {
1185    MachO::CPUSubTypeX86 CS =
1186        StringSwitch<MachO::CPUSubTypeX86>(TheTriple.getArchName())
1187            .Case("x86_64h", MachO::CPU_SUBTYPE_X86_64_H)
1188            .Default(MachO::CPU_SUBTYPE_X86_64_ALL);
1189    return new DarwinX86_64AsmBackend(T, MRI, STI, CS);
1190  }
1191
1192  if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1193    return new WindowsX86AsmBackend(T, true, STI);
1194
1195  uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1196
1197  if (TheTriple.getEnvironment() == Triple::GNUX32)
1198    return new ELFX86_X32AsmBackend(T, OSABI, STI);
1199  return new ELFX86_64AsmBackend(T, OSABI, STI);
1200}
1201