1//===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the IRTranslator class.
10//===----------------------------------------------------------------------===//
11
12#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13#include "llvm/ADT/PostOrderIterator.h"
14#include "llvm/ADT/STLExtras.h"
15#include "llvm/ADT/ScopeExit.h"
16#include "llvm/ADT/SmallSet.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/Analysis/AliasAnalysis.h"
19#include "llvm/Analysis/AssumptionCache.h"
20#include "llvm/Analysis/BranchProbabilityInfo.h"
21#include "llvm/Analysis/Loads.h"
22#include "llvm/Analysis/OptimizationRemarkEmitter.h"
23#include "llvm/Analysis/ValueTracking.h"
24#include "llvm/CodeGen/Analysis.h"
25#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
26#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
27#include "llvm/CodeGen/GlobalISel/CallLowering.h"
28#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
29#include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
30#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
31#include "llvm/CodeGen/LowLevelType.h"
32#include "llvm/CodeGen/MachineBasicBlock.h"
33#include "llvm/CodeGen/MachineFrameInfo.h"
34#include "llvm/CodeGen/MachineFunction.h"
35#include "llvm/CodeGen/MachineInstrBuilder.h"
36#include "llvm/CodeGen/MachineMemOperand.h"
37#include "llvm/CodeGen/MachineModuleInfo.h"
38#include "llvm/CodeGen/MachineOperand.h"
39#include "llvm/CodeGen/MachineRegisterInfo.h"
40#include "llvm/CodeGen/RuntimeLibcalls.h"
41#include "llvm/CodeGen/StackProtector.h"
42#include "llvm/CodeGen/SwitchLoweringUtils.h"
43#include "llvm/CodeGen/TargetFrameLowering.h"
44#include "llvm/CodeGen/TargetInstrInfo.h"
45#include "llvm/CodeGen/TargetLowering.h"
46#include "llvm/CodeGen/TargetPassConfig.h"
47#include "llvm/CodeGen/TargetRegisterInfo.h"
48#include "llvm/CodeGen/TargetSubtargetInfo.h"
49#include "llvm/IR/BasicBlock.h"
50#include "llvm/IR/CFG.h"
51#include "llvm/IR/Constant.h"
52#include "llvm/IR/Constants.h"
53#include "llvm/IR/DataLayout.h"
54#include "llvm/IR/DerivedTypes.h"
55#include "llvm/IR/DiagnosticInfo.h"
56#include "llvm/IR/Function.h"
57#include "llvm/IR/GetElementPtrTypeIterator.h"
58#include "llvm/IR/InlineAsm.h"
59#include "llvm/IR/InstrTypes.h"
60#include "llvm/IR/Instructions.h"
61#include "llvm/IR/IntrinsicInst.h"
62#include "llvm/IR/Intrinsics.h"
63#include "llvm/IR/LLVMContext.h"
64#include "llvm/IR/Metadata.h"
65#include "llvm/IR/PatternMatch.h"
66#include "llvm/IR/Statepoint.h"
67#include "llvm/IR/Type.h"
68#include "llvm/IR/User.h"
69#include "llvm/IR/Value.h"
70#include "llvm/InitializePasses.h"
71#include "llvm/MC/MCContext.h"
72#include "llvm/Pass.h"
73#include "llvm/Support/Casting.h"
74#include "llvm/Support/CodeGen.h"
75#include "llvm/Support/Debug.h"
76#include "llvm/Support/ErrorHandling.h"
77#include "llvm/Support/LowLevelTypeImpl.h"
78#include "llvm/Support/MathExtras.h"
79#include "llvm/Support/raw_ostream.h"
80#include "llvm/Target/TargetIntrinsicInfo.h"
81#include "llvm/Target/TargetMachine.h"
82#include "llvm/Transforms/Utils/MemoryOpRemark.h"
83#include <algorithm>
84#include <cassert>
85#include <cstdint>
86#include <iterator>
87#include <optional>
88#include <string>
89#include <utility>
90#include <vector>
91
92#define DEBUG_TYPE "irtranslator"
93
94using namespace llvm;
95
96static cl::opt<bool>
97    EnableCSEInIRTranslator("enable-cse-in-irtranslator",
98                            cl::desc("Should enable CSE in irtranslator"),
99                            cl::Optional, cl::init(false));
100char IRTranslator::ID = 0;
101
102INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
103                false, false)
104INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
105INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
106INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
107INITIALIZE_PASS_DEPENDENCY(StackProtector)
108INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
109INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
110                false, false)
111
112static void reportTranslationError(MachineFunction &MF,
113                                   const TargetPassConfig &TPC,
114                                   OptimizationRemarkEmitter &ORE,
115                                   OptimizationRemarkMissed &R) {
116  MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
117
118  // Print the function name explicitly if we don't have a debug location (which
119  // makes the diagnostic less useful) or if we're going to emit a raw error.
120  if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
121    R << (" (in function: " + MF.getName() + ")").str();
122
123  if (TPC.isGlobalISelAbortEnabled())
124    report_fatal_error(Twine(R.getMsg()));
125  else
126    ORE.emit(R);
127}
128
129IRTranslator::IRTranslator(CodeGenOpt::Level optlevel)
130    : MachineFunctionPass(ID), OptLevel(optlevel) {}
131
132#ifndef NDEBUG
133namespace {
134/// Verify that every instruction created has the same DILocation as the
135/// instruction being translated.
136class DILocationVerifier : public GISelChangeObserver {
137  const Instruction *CurrInst = nullptr;
138
139public:
140  DILocationVerifier() = default;
141  ~DILocationVerifier() = default;
142
143  const Instruction *getCurrentInst() const { return CurrInst; }
144  void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
145
146  void erasingInstr(MachineInstr &MI) override {}
147  void changingInstr(MachineInstr &MI) override {}
148  void changedInstr(MachineInstr &MI) override {}
149
150  void createdInstr(MachineInstr &MI) override {
151    assert(getCurrentInst() && "Inserted instruction without a current MI");
152
153    // Only print the check message if we're actually checking it.
154#ifndef NDEBUG
155    LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
156                      << " was copied to " << MI);
157#endif
158    // We allow insts in the entry block to have no debug loc because
159    // they could have originated from constants, and we don't want a jumpy
160    // debug experience.
161    assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
162            (MI.getParent()->isEntryBlock() && !MI.getDebugLoc())) &&
163           "Line info was not transferred to all instructions");
164  }
165};
166} // namespace
167#endif // ifndef NDEBUG
168
169
170void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
171  AU.addRequired<StackProtector>();
172  AU.addRequired<TargetPassConfig>();
173  AU.addRequired<GISelCSEAnalysisWrapperPass>();
174  AU.addRequired<AssumptionCacheTracker>();
175  if (OptLevel != CodeGenOpt::None) {
176    AU.addRequired<BranchProbabilityInfoWrapperPass>();
177    AU.addRequired<AAResultsWrapperPass>();
178  }
179  AU.addRequired<TargetLibraryInfoWrapperPass>();
180  AU.addPreserved<TargetLibraryInfoWrapperPass>();
181  getSelectionDAGFallbackAnalysisUsage(AU);
182  MachineFunctionPass::getAnalysisUsage(AU);
183}
184
185IRTranslator::ValueToVRegInfo::VRegListT &
186IRTranslator::allocateVRegs(const Value &Val) {
187  auto VRegsIt = VMap.findVRegs(Val);
188  if (VRegsIt != VMap.vregs_end())
189    return *VRegsIt->second;
190  auto *Regs = VMap.getVRegs(Val);
191  auto *Offsets = VMap.getOffsets(Val);
192  SmallVector<LLT, 4> SplitTys;
193  computeValueLLTs(*DL, *Val.getType(), SplitTys,
194                   Offsets->empty() ? Offsets : nullptr);
195  for (unsigned i = 0; i < SplitTys.size(); ++i)
196    Regs->push_back(0);
197  return *Regs;
198}
199
200ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
201  auto VRegsIt = VMap.findVRegs(Val);
202  if (VRegsIt != VMap.vregs_end())
203    return *VRegsIt->second;
204
205  if (Val.getType()->isVoidTy())
206    return *VMap.getVRegs(Val);
207
208  // Create entry for this type.
209  auto *VRegs = VMap.getVRegs(Val);
210  auto *Offsets = VMap.getOffsets(Val);
211
212  assert(Val.getType()->isSized() &&
213         "Don't know how to create an empty vreg");
214
215  SmallVector<LLT, 4> SplitTys;
216  computeValueLLTs(*DL, *Val.getType(), SplitTys,
217                   Offsets->empty() ? Offsets : nullptr);
218
219  if (!isa<Constant>(Val)) {
220    for (auto Ty : SplitTys)
221      VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
222    return *VRegs;
223  }
224
225  if (Val.getType()->isAggregateType()) {
226    // UndefValue, ConstantAggregateZero
227    auto &C = cast<Constant>(Val);
228    unsigned Idx = 0;
229    while (auto Elt = C.getAggregateElement(Idx++)) {
230      auto EltRegs = getOrCreateVRegs(*Elt);
231      llvm::copy(EltRegs, std::back_inserter(*VRegs));
232    }
233  } else {
234    assert(SplitTys.size() == 1 && "unexpectedly split LLT");
235    VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
236    bool Success = translate(cast<Constant>(Val), VRegs->front());
237    if (!Success) {
238      OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
239                                 MF->getFunction().getSubprogram(),
240                                 &MF->getFunction().getEntryBlock());
241      R << "unable to translate constant: " << ore::NV("Type", Val.getType());
242      reportTranslationError(*MF, *TPC, *ORE, R);
243      return *VRegs;
244    }
245  }
246
247  return *VRegs;
248}
249
250int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
251  auto MapEntry = FrameIndices.find(&AI);
252  if (MapEntry != FrameIndices.end())
253    return MapEntry->second;
254
255  uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
256  uint64_t Size =
257      ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
258
259  // Always allocate at least one byte.
260  Size = std::max<uint64_t>(Size, 1u);
261
262  int &FI = FrameIndices[&AI];
263  FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI);
264  return FI;
265}
266
267Align IRTranslator::getMemOpAlign(const Instruction &I) {
268  if (const StoreInst *SI = dyn_cast<StoreInst>(&I))
269    return SI->getAlign();
270  if (const LoadInst *LI = dyn_cast<LoadInst>(&I))
271    return LI->getAlign();
272  if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I))
273    return AI->getAlign();
274  if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I))
275    return AI->getAlign();
276
277  OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
278  R << "unable to translate memop: " << ore::NV("Opcode", &I);
279  reportTranslationError(*MF, *TPC, *ORE, R);
280  return Align(1);
281}
282
283MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
284  MachineBasicBlock *&MBB = BBToMBB[&BB];
285  assert(MBB && "BasicBlock was not encountered before");
286  return *MBB;
287}
288
289void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
290  assert(NewPred && "new predecessor must be a real MachineBasicBlock");
291  MachinePreds[Edge].push_back(NewPred);
292}
293
294bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
295                                     MachineIRBuilder &MIRBuilder) {
296  // Get or create a virtual register for each value.
297  // Unless the value is a Constant => loadimm cst?
298  // or inline constant each time?
299  // Creation of a virtual register needs to have a size.
300  Register Op0 = getOrCreateVReg(*U.getOperand(0));
301  Register Op1 = getOrCreateVReg(*U.getOperand(1));
302  Register Res = getOrCreateVReg(U);
303  uint16_t Flags = 0;
304  if (isa<Instruction>(U)) {
305    const Instruction &I = cast<Instruction>(U);
306    Flags = MachineInstr::copyFlagsFromInstruction(I);
307  }
308
309  MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
310  return true;
311}
312
313bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U,
314                                    MachineIRBuilder &MIRBuilder) {
315  Register Op0 = getOrCreateVReg(*U.getOperand(0));
316  Register Res = getOrCreateVReg(U);
317  uint16_t Flags = 0;
318  if (isa<Instruction>(U)) {
319    const Instruction &I = cast<Instruction>(U);
320    Flags = MachineInstr::copyFlagsFromInstruction(I);
321  }
322  MIRBuilder.buildInstr(Opcode, {Res}, {Op0}, Flags);
323  return true;
324}
325
326bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
327  return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
328}
329
330bool IRTranslator::translateCompare(const User &U,
331                                    MachineIRBuilder &MIRBuilder) {
332  auto *CI = dyn_cast<CmpInst>(&U);
333  Register Op0 = getOrCreateVReg(*U.getOperand(0));
334  Register Op1 = getOrCreateVReg(*U.getOperand(1));
335  Register Res = getOrCreateVReg(U);
336  CmpInst::Predicate Pred =
337      CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
338                                    cast<ConstantExpr>(U).getPredicate());
339  if (CmpInst::isIntPredicate(Pred))
340    MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
341  else if (Pred == CmpInst::FCMP_FALSE)
342    MIRBuilder.buildCopy(
343        Res, getOrCreateVReg(*Constant::getNullValue(U.getType())));
344  else if (Pred == CmpInst::FCMP_TRUE)
345    MIRBuilder.buildCopy(
346        Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
347  else {
348    uint16_t Flags = 0;
349    if (CI)
350      Flags = MachineInstr::copyFlagsFromInstruction(*CI);
351    MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, Flags);
352  }
353
354  return true;
355}
356
357bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
358  const ReturnInst &RI = cast<ReturnInst>(U);
359  const Value *Ret = RI.getReturnValue();
360  if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
361    Ret = nullptr;
362
363  ArrayRef<Register> VRegs;
364  if (Ret)
365    VRegs = getOrCreateVRegs(*Ret);
366
367  Register SwiftErrorVReg = 0;
368  if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
369    SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
370        &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
371  }
372
373  // The target may mess up with the insertion point, but
374  // this is not important as a return is the last instruction
375  // of the block anyway.
376  return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
377}
378
379void IRTranslator::emitBranchForMergedCondition(
380    const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
381    MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
382    BranchProbability TProb, BranchProbability FProb, bool InvertCond) {
383  // If the leaf of the tree is a comparison, merge the condition into
384  // the caseblock.
385  if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
386    CmpInst::Predicate Condition;
387    if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
388      Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
389    } else {
390      const FCmpInst *FC = cast<FCmpInst>(Cond);
391      Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate();
392    }
393
394    SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(0),
395                           BOp->getOperand(1), nullptr, TBB, FBB, CurBB,
396                           CurBuilder->getDebugLoc(), TProb, FProb);
397    SL->SwitchCases.push_back(CB);
398    return;
399  }
400
401  // Create a CaseBlock record representing this branch.
402  CmpInst::Predicate Pred = InvertCond ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
403  SwitchCG::CaseBlock CB(
404      Pred, false, Cond, ConstantInt::getTrue(MF->getFunction().getContext()),
405      nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
406  SL->SwitchCases.push_back(CB);
407}
408
409static bool isValInBlock(const Value *V, const BasicBlock *BB) {
410  if (const Instruction *I = dyn_cast<Instruction>(V))
411    return I->getParent() == BB;
412  return true;
413}
414
415void IRTranslator::findMergedConditions(
416    const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
417    MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
418    Instruction::BinaryOps Opc, BranchProbability TProb,
419    BranchProbability FProb, bool InvertCond) {
420  using namespace PatternMatch;
421  assert((Opc == Instruction::And || Opc == Instruction::Or) &&
422         "Expected Opc to be AND/OR");
423  // Skip over not part of the tree and remember to invert op and operands at
424  // next level.
425  Value *NotCond;
426  if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
427      isValInBlock(NotCond, CurBB->getBasicBlock())) {
428    findMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
429                         !InvertCond);
430    return;
431  }
432
433  const Instruction *BOp = dyn_cast<Instruction>(Cond);
434  const Value *BOpOp0, *BOpOp1;
435  // Compute the effective opcode for Cond, taking into account whether it needs
436  // to be inverted, e.g.
437  //   and (not (or A, B)), C
438  // gets lowered as
439  //   and (and (not A, not B), C)
440  Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
441  if (BOp) {
442    BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
443               ? Instruction::And
444               : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
445                      ? Instruction::Or
446                      : (Instruction::BinaryOps)0);
447    if (InvertCond) {
448      if (BOpc == Instruction::And)
449        BOpc = Instruction::Or;
450      else if (BOpc == Instruction::Or)
451        BOpc = Instruction::And;
452    }
453  }
454
455  // If this node is not part of the or/and tree, emit it as a branch.
456  // Note that all nodes in the tree should have same opcode.
457  bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
458  if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
459      !isValInBlock(BOpOp0, CurBB->getBasicBlock()) ||
460      !isValInBlock(BOpOp1, CurBB->getBasicBlock())) {
461    emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb,
462                                 InvertCond);
463    return;
464  }
465
466  //  Create TmpBB after CurBB.
467  MachineFunction::iterator BBI(CurBB);
468  MachineBasicBlock *TmpBB =
469      MF->CreateMachineBasicBlock(CurBB->getBasicBlock());
470  CurBB->getParent()->insert(++BBI, TmpBB);
471
472  if (Opc == Instruction::Or) {
473    // Codegen X | Y as:
474    // BB1:
475    //   jmp_if_X TBB
476    //   jmp TmpBB
477    // TmpBB:
478    //   jmp_if_Y TBB
479    //   jmp FBB
480    //
481
482    // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
483    // The requirement is that
484    //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
485    //     = TrueProb for original BB.
486    // Assuming the original probabilities are A and B, one choice is to set
487    // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
488    // A/(1+B) and 2B/(1+B). This choice assumes that
489    //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
490    // Another choice is to assume TrueProb for BB1 equals to TrueProb for
491    // TmpBB, but the math is more complicated.
492
493    auto NewTrueProb = TProb / 2;
494    auto NewFalseProb = TProb / 2 + FProb;
495    // Emit the LHS condition.
496    findMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
497                         NewFalseProb, InvertCond);
498
499    // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
500    SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
501    BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
502    // Emit the RHS condition into TmpBB.
503    findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
504                         Probs[1], InvertCond);
505  } else {
506    assert(Opc == Instruction::And && "Unknown merge op!");
507    // Codegen X & Y as:
508    // BB1:
509    //   jmp_if_X TmpBB
510    //   jmp FBB
511    // TmpBB:
512    //   jmp_if_Y TBB
513    //   jmp FBB
514    //
515    //  This requires creation of TmpBB after CurBB.
516
517    // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
518    // The requirement is that
519    //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
520    //     = FalseProb for original BB.
521    // Assuming the original probabilities are A and B, one choice is to set
522    // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
523    // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
524    // TrueProb for BB1 * FalseProb for TmpBB.
525
526    auto NewTrueProb = TProb + FProb / 2;
527    auto NewFalseProb = FProb / 2;
528    // Emit the LHS condition.
529    findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
530                         NewFalseProb, InvertCond);
531
532    // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
533    SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
534    BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
535    // Emit the RHS condition into TmpBB.
536    findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
537                         Probs[1], InvertCond);
538  }
539}
540
541bool IRTranslator::shouldEmitAsBranches(
542    const std::vector<SwitchCG::CaseBlock> &Cases) {
543  // For multiple cases, it's better to emit as branches.
544  if (Cases.size() != 2)
545    return true;
546
547  // If this is two comparisons of the same values or'd or and'd together, they
548  // will get folded into a single comparison, so don't emit two blocks.
549  if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
550       Cases[0].CmpRHS == Cases[1].CmpRHS) ||
551      (Cases[0].CmpRHS == Cases[1].CmpLHS &&
552       Cases[0].CmpLHS == Cases[1].CmpRHS)) {
553    return false;
554  }
555
556  // Handle: (X != null) | (Y != null) --> (X|Y) != 0
557  // Handle: (X == null) & (Y == null) --> (X|Y) == 0
558  if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
559      Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
560      isa<Constant>(Cases[0].CmpRHS) &&
561      cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
562    if (Cases[0].PredInfo.Pred == CmpInst::ICMP_EQ &&
563        Cases[0].TrueBB == Cases[1].ThisBB)
564      return false;
565    if (Cases[0].PredInfo.Pred == CmpInst::ICMP_NE &&
566        Cases[0].FalseBB == Cases[1].ThisBB)
567      return false;
568  }
569
570  return true;
571}
572
573bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
574  const BranchInst &BrInst = cast<BranchInst>(U);
575  auto &CurMBB = MIRBuilder.getMBB();
576  auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0));
577
578  if (BrInst.isUnconditional()) {
579    // If the unconditional target is the layout successor, fallthrough.
580    if (OptLevel == CodeGenOpt::None || !CurMBB.isLayoutSuccessor(Succ0MBB))
581      MIRBuilder.buildBr(*Succ0MBB);
582
583    // Link successors.
584    for (const BasicBlock *Succ : successors(&BrInst))
585      CurMBB.addSuccessor(&getMBB(*Succ));
586    return true;
587  }
588
589  // If this condition is one of the special cases we handle, do special stuff
590  // now.
591  const Value *CondVal = BrInst.getCondition();
592  MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1));
593
594  const auto &TLI = *MF->getSubtarget().getTargetLowering();
595
596  // If this is a series of conditions that are or'd or and'd together, emit
597  // this as a sequence of branches instead of setcc's with and/or operations.
598  // As long as jumps are not expensive (exceptions for multi-use logic ops,
599  // unpredictable branches, and vector extracts because those jumps are likely
600  // expensive for any target), this should improve performance.
601  // For example, instead of something like:
602  //     cmp A, B
603  //     C = seteq
604  //     cmp D, E
605  //     F = setle
606  //     or C, F
607  //     jnz foo
608  // Emit:
609  //     cmp A, B
610  //     je foo
611  //     cmp D, E
612  //     jle foo
613  using namespace PatternMatch;
614  const Instruction *CondI = dyn_cast<Instruction>(CondVal);
615  if (!TLI.isJumpExpensive() && CondI && CondI->hasOneUse() &&
616      !BrInst.hasMetadata(LLVMContext::MD_unpredictable)) {
617    Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
618    Value *Vec;
619    const Value *BOp0, *BOp1;
620    if (match(CondI, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
621      Opcode = Instruction::And;
622    else if (match(CondI, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
623      Opcode = Instruction::Or;
624
625    if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
626                    match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
627      findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
628                           getEdgeProbability(&CurMBB, Succ0MBB),
629                           getEdgeProbability(&CurMBB, Succ1MBB),
630                           /*InvertCond=*/false);
631      assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!");
632
633      // Allow some cases to be rejected.
634      if (shouldEmitAsBranches(SL->SwitchCases)) {
635        // Emit the branch for this block.
636        emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
637        SL->SwitchCases.erase(SL->SwitchCases.begin());
638        return true;
639      }
640
641      // Okay, we decided not to do this, remove any inserted MBB's and clear
642      // SwitchCases.
643      for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I)
644        MF->erase(SL->SwitchCases[I].ThisBB);
645
646      SL->SwitchCases.clear();
647    }
648  }
649
650  // Create a CaseBlock record representing this branch.
651  SwitchCG::CaseBlock CB(CmpInst::ICMP_EQ, false, CondVal,
652                         ConstantInt::getTrue(MF->getFunction().getContext()),
653                         nullptr, Succ0MBB, Succ1MBB, &CurMBB,
654                         CurBuilder->getDebugLoc());
655
656  // Use emitSwitchCase to actually insert the fast branch sequence for this
657  // cond branch.
658  emitSwitchCase(CB, &CurMBB, *CurBuilder);
659  return true;
660}
661
662void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
663                                        MachineBasicBlock *Dst,
664                                        BranchProbability Prob) {
665  if (!FuncInfo.BPI) {
666    Src->addSuccessorWithoutProb(Dst);
667    return;
668  }
669  if (Prob.isUnknown())
670    Prob = getEdgeProbability(Src, Dst);
671  Src->addSuccessor(Dst, Prob);
672}
673
674BranchProbability
675IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
676                                 const MachineBasicBlock *Dst) const {
677  const BasicBlock *SrcBB = Src->getBasicBlock();
678  const BasicBlock *DstBB = Dst->getBasicBlock();
679  if (!FuncInfo.BPI) {
680    // If BPI is not available, set the default probability as 1 / N, where N is
681    // the number of successors.
682    auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
683    return BranchProbability(1, SuccSize);
684  }
685  return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
686}
687
688bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
689  using namespace SwitchCG;
690  // Extract cases from the switch.
691  const SwitchInst &SI = cast<SwitchInst>(U);
692  BranchProbabilityInfo *BPI = FuncInfo.BPI;
693  CaseClusterVector Clusters;
694  Clusters.reserve(SI.getNumCases());
695  for (const auto &I : SI.cases()) {
696    MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
697    assert(Succ && "Could not find successor mbb in mapping");
698    const ConstantInt *CaseVal = I.getCaseValue();
699    BranchProbability Prob =
700        BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
701            : BranchProbability(1, SI.getNumCases() + 1);
702    Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
703  }
704
705  MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
706
707  // Cluster adjacent cases with the same destination. We do this at all
708  // optimization levels because it's cheap to do and will make codegen faster
709  // if there are many clusters.
710  sortAndRangeify(Clusters);
711
712  MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
713
714  // If there is only the default destination, jump there directly.
715  if (Clusters.empty()) {
716    SwitchMBB->addSuccessor(DefaultMBB);
717    if (DefaultMBB != SwitchMBB->getNextNode())
718      MIB.buildBr(*DefaultMBB);
719    return true;
720  }
721
722  SL->findJumpTables(Clusters, &SI, DefaultMBB, nullptr, nullptr);
723  SL->findBitTestClusters(Clusters, &SI);
724
725  LLVM_DEBUG({
726    dbgs() << "Case clusters: ";
727    for (const CaseCluster &C : Clusters) {
728      if (C.Kind == CC_JumpTable)
729        dbgs() << "JT:";
730      if (C.Kind == CC_BitTests)
731        dbgs() << "BT:";
732
733      C.Low->getValue().print(dbgs(), true);
734      if (C.Low != C.High) {
735        dbgs() << '-';
736        C.High->getValue().print(dbgs(), true);
737      }
738      dbgs() << ' ';
739    }
740    dbgs() << '\n';
741  });
742
743  assert(!Clusters.empty());
744  SwitchWorkList WorkList;
745  CaseClusterIt First = Clusters.begin();
746  CaseClusterIt Last = Clusters.end() - 1;
747  auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
748  WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
749
750  // FIXME: At the moment we don't do any splitting optimizations here like
751  // SelectionDAG does, so this worklist only has one entry.
752  while (!WorkList.empty()) {
753    SwitchWorkListItem W = WorkList.pop_back_val();
754    if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
755      return false;
756  }
757  return true;
758}
759
760void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
761                                 MachineBasicBlock *MBB) {
762  // Emit the code for the jump table
763  assert(JT.Reg != -1U && "Should lower JT Header first!");
764  MachineIRBuilder MIB(*MBB->getParent());
765  MIB.setMBB(*MBB);
766  MIB.setDebugLoc(CurBuilder->getDebugLoc());
767
768  Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
769  const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
770
771  auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
772  MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
773}
774
775bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
776                                       SwitchCG::JumpTableHeader &JTH,
777                                       MachineBasicBlock *HeaderBB) {
778  MachineIRBuilder MIB(*HeaderBB->getParent());
779  MIB.setMBB(*HeaderBB);
780  MIB.setDebugLoc(CurBuilder->getDebugLoc());
781
782  const Value &SValue = *JTH.SValue;
783  // Subtract the lowest switch case value from the value being switched on.
784  const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
785  Register SwitchOpReg = getOrCreateVReg(SValue);
786  auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
787  auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
788
789  // This value may be smaller or larger than the target's pointer type, and
790  // therefore require extension or truncating.
791  Type *PtrIRTy = SValue.getType()->getPointerTo();
792  const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
793  Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
794
795  JT.Reg = Sub.getReg(0);
796
797  if (JTH.FallthroughUnreachable) {
798    if (JT.MBB != HeaderBB->getNextNode())
799      MIB.buildBr(*JT.MBB);
800    return true;
801  }
802
803  // Emit the range check for the jump table, and branch to the default block
804  // for the switch statement if the value being switched on exceeds the
805  // largest case in the switch.
806  auto Cst = getOrCreateVReg(
807      *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
808  Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
809  auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
810
811  auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
812
813  // Avoid emitting unnecessary branches to the next block.
814  if (JT.MBB != HeaderBB->getNextNode())
815    BrCond = MIB.buildBr(*JT.MBB);
816  return true;
817}
818
819void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
820                                  MachineBasicBlock *SwitchBB,
821                                  MachineIRBuilder &MIB) {
822  Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
823  Register Cond;
824  DebugLoc OldDbgLoc = MIB.getDebugLoc();
825  MIB.setDebugLoc(CB.DbgLoc);
826  MIB.setMBB(*CB.ThisBB);
827
828  if (CB.PredInfo.NoCmp) {
829    // Branch or fall through to TrueBB.
830    addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
831    addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
832                      CB.ThisBB);
833    CB.ThisBB->normalizeSuccProbs();
834    if (CB.TrueBB != CB.ThisBB->getNextNode())
835      MIB.buildBr(*CB.TrueBB);
836    MIB.setDebugLoc(OldDbgLoc);
837    return;
838  }
839
840  const LLT i1Ty = LLT::scalar(1);
841  // Build the compare.
842  if (!CB.CmpMHS) {
843    const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS);
844    // For conditional branch lowering, we might try to do something silly like
845    // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so,
846    // just re-use the existing condition vreg.
847    if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI &&
848        CI->getZExtValue() == 1 && CB.PredInfo.Pred == CmpInst::ICMP_EQ) {
849      Cond = CondLHS;
850    } else {
851      Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
852      if (CmpInst::isFPPredicate(CB.PredInfo.Pred))
853        Cond =
854            MIB.buildFCmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
855      else
856        Cond =
857            MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
858    }
859  } else {
860    assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&
861           "Can only handle SLE ranges");
862
863    const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
864    const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
865
866    Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
867    if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
868      Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
869      Cond =
870          MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
871    } else {
872      const LLT CmpTy = MRI->getType(CmpOpReg);
873      auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
874      auto Diff = MIB.buildConstant(CmpTy, High - Low);
875      Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
876    }
877  }
878
879  // Update successor info
880  addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
881
882  addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
883                    CB.ThisBB);
884
885  // TrueBB and FalseBB are always different unless the incoming IR is
886  // degenerate. This only happens when running llc on weird IR.
887  if (CB.TrueBB != CB.FalseBB)
888    addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
889  CB.ThisBB->normalizeSuccProbs();
890
891  addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
892                    CB.ThisBB);
893
894  MIB.buildBrCond(Cond, *CB.TrueBB);
895  MIB.buildBr(*CB.FalseBB);
896  MIB.setDebugLoc(OldDbgLoc);
897}
898
899bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
900                                          MachineBasicBlock *SwitchMBB,
901                                          MachineBasicBlock *CurMBB,
902                                          MachineBasicBlock *DefaultMBB,
903                                          MachineIRBuilder &MIB,
904                                          MachineFunction::iterator BBI,
905                                          BranchProbability UnhandledProbs,
906                                          SwitchCG::CaseClusterIt I,
907                                          MachineBasicBlock *Fallthrough,
908                                          bool FallthroughUnreachable) {
909  using namespace SwitchCG;
910  MachineFunction *CurMF = SwitchMBB->getParent();
911  // FIXME: Optimize away range check based on pivot comparisons.
912  JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
913  SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
914  BranchProbability DefaultProb = W.DefaultProb;
915
916  // The jump block hasn't been inserted yet; insert it here.
917  MachineBasicBlock *JumpMBB = JT->MBB;
918  CurMF->insert(BBI, JumpMBB);
919
920  // Since the jump table block is separate from the switch block, we need
921  // to keep track of it as a machine predecessor to the default block,
922  // otherwise we lose the phi edges.
923  addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
924                    CurMBB);
925  addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
926                    JumpMBB);
927
928  auto JumpProb = I->Prob;
929  auto FallthroughProb = UnhandledProbs;
930
931  // If the default statement is a target of the jump table, we evenly
932  // distribute the default probability to successors of CurMBB. Also
933  // update the probability on the edge from JumpMBB to Fallthrough.
934  for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
935                                        SE = JumpMBB->succ_end();
936       SI != SE; ++SI) {
937    if (*SI == DefaultMBB) {
938      JumpProb += DefaultProb / 2;
939      FallthroughProb -= DefaultProb / 2;
940      JumpMBB->setSuccProbability(SI, DefaultProb / 2);
941      JumpMBB->normalizeSuccProbs();
942    } else {
943      // Also record edges from the jump table block to it's successors.
944      addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
945                        JumpMBB);
946    }
947  }
948
949  if (FallthroughUnreachable)
950    JTH->FallthroughUnreachable = true;
951
952  if (!JTH->FallthroughUnreachable)
953    addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
954  addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
955  CurMBB->normalizeSuccProbs();
956
957  // The jump table header will be inserted in our current block, do the
958  // range check, and fall through to our fallthrough block.
959  JTH->HeaderBB = CurMBB;
960  JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
961
962  // If we're in the right place, emit the jump table header right now.
963  if (CurMBB == SwitchMBB) {
964    if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
965      return false;
966    JTH->Emitted = true;
967  }
968  return true;
969}
970bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
971                                            Value *Cond,
972                                            MachineBasicBlock *Fallthrough,
973                                            bool FallthroughUnreachable,
974                                            BranchProbability UnhandledProbs,
975                                            MachineBasicBlock *CurMBB,
976                                            MachineIRBuilder &MIB,
977                                            MachineBasicBlock *SwitchMBB) {
978  using namespace SwitchCG;
979  const Value *RHS, *LHS, *MHS;
980  CmpInst::Predicate Pred;
981  if (I->Low == I->High) {
982    // Check Cond == I->Low.
983    Pred = CmpInst::ICMP_EQ;
984    LHS = Cond;
985    RHS = I->Low;
986    MHS = nullptr;
987  } else {
988    // Check I->Low <= Cond <= I->High.
989    Pred = CmpInst::ICMP_SLE;
990    LHS = I->Low;
991    MHS = Cond;
992    RHS = I->High;
993  }
994
995  // If Fallthrough is unreachable, fold away the comparison.
996  // The false probability is the sum of all unhandled cases.
997  CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
998               CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
999
1000  emitSwitchCase(CB, SwitchMBB, MIB);
1001  return true;
1002}
1003
1004void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
1005                                     MachineBasicBlock *SwitchBB) {
1006  MachineIRBuilder &MIB = *CurBuilder;
1007  MIB.setMBB(*SwitchBB);
1008
1009  // Subtract the minimum value.
1010  Register SwitchOpReg = getOrCreateVReg(*B.SValue);
1011
1012  LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1013  Register MinValReg = MIB.buildConstant(SwitchOpTy, B.First).getReg(0);
1014  auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1015
1016  Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
1017  const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1018
1019  LLT MaskTy = SwitchOpTy;
1020  if (MaskTy.getSizeInBits() > PtrTy.getSizeInBits() ||
1021      !isPowerOf2_32(MaskTy.getSizeInBits()))
1022    MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1023  else {
1024    // Ensure that the type will fit the mask value.
1025    for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) {
1026      if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) {
1027        // Switch table case range are encoded into series of masks.
1028        // Just use pointer type, it's guaranteed to fit.
1029        MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1030        break;
1031      }
1032    }
1033  }
1034  Register SubReg = RangeSub.getReg(0);
1035  if (SwitchOpTy != MaskTy)
1036    SubReg = MIB.buildZExtOrTrunc(MaskTy, SubReg).getReg(0);
1037
1038  B.RegVT = getMVTForLLT(MaskTy);
1039  B.Reg = SubReg;
1040
1041  MachineBasicBlock *MBB = B.Cases[0].ThisBB;
1042
1043  if (!B.FallthroughUnreachable)
1044    addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
1045  addSuccessorWithProb(SwitchBB, MBB, B.Prob);
1046
1047  SwitchBB->normalizeSuccProbs();
1048
1049  if (!B.FallthroughUnreachable) {
1050    // Conditional branch to the default block.
1051    auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range);
1052    auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1),
1053                                  RangeSub, RangeCst);
1054    MIB.buildBrCond(RangeCmp, *B.Default);
1055  }
1056
1057  // Avoid emitting unnecessary branches to the next block.
1058  if (MBB != SwitchBB->getNextNode())
1059    MIB.buildBr(*MBB);
1060}
1061
1062void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
1063                                   MachineBasicBlock *NextMBB,
1064                                   BranchProbability BranchProbToNext,
1065                                   Register Reg, SwitchCG::BitTestCase &B,
1066                                   MachineBasicBlock *SwitchBB) {
1067  MachineIRBuilder &MIB = *CurBuilder;
1068  MIB.setMBB(*SwitchBB);
1069
1070  LLT SwitchTy = getLLTForMVT(BB.RegVT);
1071  Register Cmp;
1072  unsigned PopCount = llvm::popcount(B.Mask);
1073  if (PopCount == 1) {
1074    // Testing for a single bit; just compare the shift count with what it
1075    // would need to be to shift a 1 bit in that position.
1076    auto MaskTrailingZeros =
1077        MIB.buildConstant(SwitchTy, countTrailingZeros(B.Mask));
1078    Cmp =
1079        MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::scalar(1), Reg, MaskTrailingZeros)
1080            .getReg(0);
1081  } else if (PopCount == BB.Range) {
1082    // There is only one zero bit in the range, test for it directly.
1083    auto MaskTrailingOnes =
1084        MIB.buildConstant(SwitchTy, countTrailingOnes(B.Mask));
1085    Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Reg, MaskTrailingOnes)
1086              .getReg(0);
1087  } else {
1088    // Make desired shift.
1089    auto CstOne = MIB.buildConstant(SwitchTy, 1);
1090    auto SwitchVal = MIB.buildShl(SwitchTy, CstOne, Reg);
1091
1092    // Emit bit tests and jumps.
1093    auto CstMask = MIB.buildConstant(SwitchTy, B.Mask);
1094    auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask);
1095    auto CstZero = MIB.buildConstant(SwitchTy, 0);
1096    Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), AndOp, CstZero)
1097              .getReg(0);
1098  }
1099
1100  // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
1101  addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
1102  // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
1103  addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1104  // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
1105  // one as they are relative probabilities (and thus work more like weights),
1106  // and hence we need to normalize them to let the sum of them become one.
1107  SwitchBB->normalizeSuccProbs();
1108
1109  // Record the fact that the IR edge from the header to the bit test target
1110  // will go through our new block. Neeeded for PHIs to have nodes added.
1111  addMachineCFGPred({BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()},
1112                    SwitchBB);
1113
1114  MIB.buildBrCond(Cmp, *B.TargetBB);
1115
1116  // Avoid emitting unnecessary branches to the next block.
1117  if (NextMBB != SwitchBB->getNextNode())
1118    MIB.buildBr(*NextMBB);
1119}
1120
1121bool IRTranslator::lowerBitTestWorkItem(
1122    SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
1123    MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
1124    MachineIRBuilder &MIB, MachineFunction::iterator BBI,
1125    BranchProbability DefaultProb, BranchProbability UnhandledProbs,
1126    SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
1127    bool FallthroughUnreachable) {
1128  using namespace SwitchCG;
1129  MachineFunction *CurMF = SwitchMBB->getParent();
1130  // FIXME: Optimize away range check based on pivot comparisons.
1131  BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
1132  // The bit test blocks haven't been inserted yet; insert them here.
1133  for (BitTestCase &BTC : BTB->Cases)
1134    CurMF->insert(BBI, BTC.ThisBB);
1135
1136  // Fill in fields of the BitTestBlock.
1137  BTB->Parent = CurMBB;
1138  BTB->Default = Fallthrough;
1139
1140  BTB->DefaultProb = UnhandledProbs;
1141  // If the cases in bit test don't form a contiguous range, we evenly
1142  // distribute the probability on the edge to Fallthrough to two
1143  // successors of CurMBB.
1144  if (!BTB->ContiguousRange) {
1145    BTB->Prob += DefaultProb / 2;
1146    BTB->DefaultProb -= DefaultProb / 2;
1147  }
1148
1149  if (FallthroughUnreachable)
1150    BTB->FallthroughUnreachable = true;
1151
1152  // If we're in the right place, emit the bit test header right now.
1153  if (CurMBB == SwitchMBB) {
1154    emitBitTestHeader(*BTB, SwitchMBB);
1155    BTB->Emitted = true;
1156  }
1157  return true;
1158}
1159
1160bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
1161                                       Value *Cond,
1162                                       MachineBasicBlock *SwitchMBB,
1163                                       MachineBasicBlock *DefaultMBB,
1164                                       MachineIRBuilder &MIB) {
1165  using namespace SwitchCG;
1166  MachineFunction *CurMF = FuncInfo.MF;
1167  MachineBasicBlock *NextMBB = nullptr;
1168  MachineFunction::iterator BBI(W.MBB);
1169  if (++BBI != FuncInfo.MF->end())
1170    NextMBB = &*BBI;
1171
1172  if (EnableOpts) {
1173    // Here, we order cases by probability so the most likely case will be
1174    // checked first. However, two clusters can have the same probability in
1175    // which case their relative ordering is non-deterministic. So we use Low
1176    // as a tie-breaker as clusters are guaranteed to never overlap.
1177    llvm::sort(W.FirstCluster, W.LastCluster + 1,
1178               [](const CaseCluster &a, const CaseCluster &b) {
1179                 return a.Prob != b.Prob
1180                            ? a.Prob > b.Prob
1181                            : a.Low->getValue().slt(b.Low->getValue());
1182               });
1183
1184    // Rearrange the case blocks so that the last one falls through if possible
1185    // without changing the order of probabilities.
1186    for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
1187      --I;
1188      if (I->Prob > W.LastCluster->Prob)
1189        break;
1190      if (I->Kind == CC_Range && I->MBB == NextMBB) {
1191        std::swap(*I, *W.LastCluster);
1192        break;
1193      }
1194    }
1195  }
1196
1197  // Compute total probability.
1198  BranchProbability DefaultProb = W.DefaultProb;
1199  BranchProbability UnhandledProbs = DefaultProb;
1200  for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
1201    UnhandledProbs += I->Prob;
1202
1203  MachineBasicBlock *CurMBB = W.MBB;
1204  for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
1205    bool FallthroughUnreachable = false;
1206    MachineBasicBlock *Fallthrough;
1207    if (I == W.LastCluster) {
1208      // For the last cluster, fall through to the default destination.
1209      Fallthrough = DefaultMBB;
1210      FallthroughUnreachable = isa<UnreachableInst>(
1211          DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
1212    } else {
1213      Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
1214      CurMF->insert(BBI, Fallthrough);
1215    }
1216    UnhandledProbs -= I->Prob;
1217
1218    switch (I->Kind) {
1219    case CC_BitTests: {
1220      if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1221                                DefaultProb, UnhandledProbs, I, Fallthrough,
1222                                FallthroughUnreachable)) {
1223        LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch");
1224        return false;
1225      }
1226      break;
1227    }
1228
1229    case CC_JumpTable: {
1230      if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1231                                  UnhandledProbs, I, Fallthrough,
1232                                  FallthroughUnreachable)) {
1233        LLVM_DEBUG(dbgs() << "Failed to lower jump table");
1234        return false;
1235      }
1236      break;
1237    }
1238    case CC_Range: {
1239      if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
1240                                    FallthroughUnreachable, UnhandledProbs,
1241                                    CurMBB, MIB, SwitchMBB)) {
1242        LLVM_DEBUG(dbgs() << "Failed to lower switch range");
1243        return false;
1244      }
1245      break;
1246    }
1247    }
1248    CurMBB = Fallthrough;
1249  }
1250
1251  return true;
1252}
1253
1254bool IRTranslator::translateIndirectBr(const User &U,
1255                                       MachineIRBuilder &MIRBuilder) {
1256  const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
1257
1258  const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
1259  MIRBuilder.buildBrIndirect(Tgt);
1260
1261  // Link successors.
1262  SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1263  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
1264  for (const BasicBlock *Succ : successors(&BrInst)) {
1265    // It's legal for indirectbr instructions to have duplicate blocks in the
1266    // destination list. We don't allow this in MIR. Skip anything that's
1267    // already a successor.
1268    if (!AddedSuccessors.insert(Succ).second)
1269      continue;
1270    CurBB.addSuccessor(&getMBB(*Succ));
1271  }
1272
1273  return true;
1274}
1275
1276static bool isSwiftError(const Value *V) {
1277  if (auto Arg = dyn_cast<Argument>(V))
1278    return Arg->hasSwiftErrorAttr();
1279  if (auto AI = dyn_cast<AllocaInst>(V))
1280    return AI->isSwiftError();
1281  return false;
1282}
1283
1284bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
1285  const LoadInst &LI = cast<LoadInst>(U);
1286
1287  unsigned StoreSize = DL->getTypeStoreSize(LI.getType());
1288  if (StoreSize == 0)
1289    return true;
1290
1291  ArrayRef<Register> Regs = getOrCreateVRegs(LI);
1292  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
1293  Register Base = getOrCreateVReg(*LI.getPointerOperand());
1294  AAMDNodes AAInfo = LI.getAAMetadata();
1295
1296  const Value *Ptr = LI.getPointerOperand();
1297  Type *OffsetIRTy = DL->getIntPtrType(Ptr->getType());
1298  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1299
1300  if (CLI->supportSwiftError() && isSwiftError(Ptr)) {
1301    assert(Regs.size() == 1 && "swifterror should be single pointer");
1302    Register VReg =
1303        SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), Ptr);
1304    MIRBuilder.buildCopy(Regs[0], VReg);
1305    return true;
1306  }
1307
1308  auto &TLI = *MF->getSubtarget().getTargetLowering();
1309  MachineMemOperand::Flags Flags =
1310      TLI.getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
1311  if (AA && !(Flags & MachineMemOperand::MOInvariant)) {
1312    if (AA->pointsToConstantMemory(
1313            MemoryLocation(Ptr, LocationSize::precise(StoreSize), AAInfo))) {
1314      Flags |= MachineMemOperand::MOInvariant;
1315    }
1316  }
1317
1318  const MDNode *Ranges =
1319      Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
1320  for (unsigned i = 0; i < Regs.size(); ++i) {
1321    Register Addr;
1322    MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1323
1324    MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
1325    Align BaseAlign = getMemOpAlign(LI);
1326    auto MMO = MF->getMachineMemOperand(
1327        Ptr, Flags, MRI->getType(Regs[i]),
1328        commonAlignment(BaseAlign, Offsets[i] / 8), AAInfo, Ranges,
1329        LI.getSyncScopeID(), LI.getOrdering());
1330    MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
1331  }
1332
1333  return true;
1334}
1335
1336bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
1337  const StoreInst &SI = cast<StoreInst>(U);
1338  if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
1339    return true;
1340
1341  ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
1342  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
1343  Register Base = getOrCreateVReg(*SI.getPointerOperand());
1344
1345  Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
1346  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1347
1348  if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
1349    assert(Vals.size() == 1 && "swifterror should be single pointer");
1350
1351    Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
1352                                                    SI.getPointerOperand());
1353    MIRBuilder.buildCopy(VReg, Vals[0]);
1354    return true;
1355  }
1356
1357  auto &TLI = *MF->getSubtarget().getTargetLowering();
1358  MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL);
1359
1360  for (unsigned i = 0; i < Vals.size(); ++i) {
1361    Register Addr;
1362    MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1363
1364    MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
1365    Align BaseAlign = getMemOpAlign(SI);
1366    auto MMO = MF->getMachineMemOperand(
1367        Ptr, Flags, MRI->getType(Vals[i]),
1368        commonAlignment(BaseAlign, Offsets[i] / 8), SI.getAAMetadata(), nullptr,
1369        SI.getSyncScopeID(), SI.getOrdering());
1370    MIRBuilder.buildStore(Vals[i], Addr, *MMO);
1371  }
1372  return true;
1373}
1374
1375static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
1376  const Value *Src = U.getOperand(0);
1377  Type *Int32Ty = Type::getInt32Ty(U.getContext());
1378
1379  // getIndexedOffsetInType is designed for GEPs, so the first index is the
1380  // usual array element rather than looking into the actual aggregate.
1381  SmallVector<Value *, 1> Indices;
1382  Indices.push_back(ConstantInt::get(Int32Ty, 0));
1383
1384  if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
1385    for (auto Idx : EVI->indices())
1386      Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1387  } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
1388    for (auto Idx : IVI->indices())
1389      Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1390  } else {
1391    for (unsigned i = 1; i < U.getNumOperands(); ++i)
1392      Indices.push_back(U.getOperand(i));
1393  }
1394
1395  return 8 * static_cast<uint64_t>(
1396                 DL.getIndexedOffsetInType(Src->getType(), Indices));
1397}
1398
1399bool IRTranslator::translateExtractValue(const User &U,
1400                                         MachineIRBuilder &MIRBuilder) {
1401  const Value *Src = U.getOperand(0);
1402  uint64_t Offset = getOffsetFromIndices(U, *DL);
1403  ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1404  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
1405  unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
1406  auto &DstRegs = allocateVRegs(U);
1407
1408  for (unsigned i = 0; i < DstRegs.size(); ++i)
1409    DstRegs[i] = SrcRegs[Idx++];
1410
1411  return true;
1412}
1413
1414bool IRTranslator::translateInsertValue(const User &U,
1415                                        MachineIRBuilder &MIRBuilder) {
1416  const Value *Src = U.getOperand(0);
1417  uint64_t Offset = getOffsetFromIndices(U, *DL);
1418  auto &DstRegs = allocateVRegs(U);
1419  ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1420  ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1421  ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
1422  auto *InsertedIt = InsertedRegs.begin();
1423
1424  for (unsigned i = 0; i < DstRegs.size(); ++i) {
1425    if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
1426      DstRegs[i] = *InsertedIt++;
1427    else
1428      DstRegs[i] = SrcRegs[i];
1429  }
1430
1431  return true;
1432}
1433
1434bool IRTranslator::translateSelect(const User &U,
1435                                   MachineIRBuilder &MIRBuilder) {
1436  Register Tst = getOrCreateVReg(*U.getOperand(0));
1437  ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1438  ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1439  ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1440
1441  uint16_t Flags = 0;
1442  if (const SelectInst *SI = dyn_cast<SelectInst>(&U))
1443    Flags = MachineInstr::copyFlagsFromInstruction(*SI);
1444
1445  for (unsigned i = 0; i < ResRegs.size(); ++i) {
1446    MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1447  }
1448
1449  return true;
1450}
1451
1452bool IRTranslator::translateCopy(const User &U, const Value &V,
1453                                 MachineIRBuilder &MIRBuilder) {
1454  Register Src = getOrCreateVReg(V);
1455  auto &Regs = *VMap.getVRegs(U);
1456  if (Regs.empty()) {
1457    Regs.push_back(Src);
1458    VMap.getOffsets(U)->push_back(0);
1459  } else {
1460    // If we already assigned a vreg for this instruction, we can't change that.
1461    // Emit a copy to satisfy the users we already emitted.
1462    MIRBuilder.buildCopy(Regs[0], Src);
1463  }
1464  return true;
1465}
1466
1467bool IRTranslator::translateBitCast(const User &U,
1468                                    MachineIRBuilder &MIRBuilder) {
1469  // If we're bitcasting to the source type, we can reuse the source vreg.
1470  if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1471      getLLTForType(*U.getType(), *DL))
1472    return translateCopy(U, *U.getOperand(0), MIRBuilder);
1473
1474  return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1475}
1476
1477bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1478                                 MachineIRBuilder &MIRBuilder) {
1479  Register Op = getOrCreateVReg(*U.getOperand(0));
1480  Register Res = getOrCreateVReg(U);
1481  MIRBuilder.buildInstr(Opcode, {Res}, {Op});
1482  return true;
1483}
1484
1485bool IRTranslator::translateGetElementPtr(const User &U,
1486                                          MachineIRBuilder &MIRBuilder) {
1487  Value &Op0 = *U.getOperand(0);
1488  Register BaseReg = getOrCreateVReg(Op0);
1489  Type *PtrIRTy = Op0.getType();
1490  LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1491  Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1492  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1493
1494  // Normalize Vector GEP - all scalar operands should be converted to the
1495  // splat vector.
1496  unsigned VectorWidth = 0;
1497
1498  // True if we should use a splat vector; using VectorWidth alone is not
1499  // sufficient.
1500  bool WantSplatVector = false;
1501  if (auto *VT = dyn_cast<VectorType>(U.getType())) {
1502    VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
1503    // We don't produce 1 x N vectors; those are treated as scalars.
1504    WantSplatVector = VectorWidth > 1;
1505  }
1506
1507  // We might need to splat the base pointer into a vector if the offsets
1508  // are vectors.
1509  if (WantSplatVector && !PtrTy.isVector()) {
1510    BaseReg =
1511        MIRBuilder
1512            .buildSplatVector(LLT::fixed_vector(VectorWidth, PtrTy), BaseReg)
1513            .getReg(0);
1514    PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
1515    PtrTy = getLLTForType(*PtrIRTy, *DL);
1516    OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1517    OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1518  }
1519
1520  int64_t Offset = 0;
1521  for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1522       GTI != E; ++GTI) {
1523    const Value *Idx = GTI.getOperand();
1524    if (StructType *StTy = GTI.getStructTypeOrNull()) {
1525      unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1526      Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
1527      continue;
1528    } else {
1529      uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
1530
1531      // If this is a scalar constant or a splat vector of constants,
1532      // handle it quickly.
1533      if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1534        Offset += ElementSize * CI->getSExtValue();
1535        continue;
1536      }
1537
1538      if (Offset != 0) {
1539        auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1540        BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1541                      .getReg(0);
1542        Offset = 0;
1543      }
1544
1545      Register IdxReg = getOrCreateVReg(*Idx);
1546      LLT IdxTy = MRI->getType(IdxReg);
1547      if (IdxTy != OffsetTy) {
1548        if (!IdxTy.isVector() && WantSplatVector) {
1549          IdxReg = MIRBuilder.buildSplatVector(
1550            OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0);
1551        }
1552
1553        IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1554      }
1555
1556      // N = N + Idx * ElementSize;
1557      // Avoid doing it for ElementSize of 1.
1558      Register GepOffsetReg;
1559      if (ElementSize != 1) {
1560        auto ElementSizeMIB = MIRBuilder.buildConstant(
1561            getLLTForType(*OffsetIRTy, *DL), ElementSize);
1562        GepOffsetReg =
1563            MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0);
1564      } else
1565        GepOffsetReg = IdxReg;
1566
1567      BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);
1568    }
1569  }
1570
1571  if (Offset != 0) {
1572    auto OffsetMIB =
1573        MIRBuilder.buildConstant(OffsetTy, Offset);
1574    MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
1575    return true;
1576  }
1577
1578  MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1579  return true;
1580}
1581
1582bool IRTranslator::translateMemFunc(const CallInst &CI,
1583                                    MachineIRBuilder &MIRBuilder,
1584                                    unsigned Opcode) {
1585  const Value *SrcPtr = CI.getArgOperand(1);
1586  // If the source is undef, then just emit a nop.
1587  if (isa<UndefValue>(SrcPtr))
1588    return true;
1589
1590  SmallVector<Register, 3> SrcRegs;
1591
1592  unsigned MinPtrSize = UINT_MAX;
1593  for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) {
1594    Register SrcReg = getOrCreateVReg(**AI);
1595    LLT SrcTy = MRI->getType(SrcReg);
1596    if (SrcTy.isPointer())
1597      MinPtrSize = std::min<unsigned>(SrcTy.getSizeInBits(), MinPtrSize);
1598    SrcRegs.push_back(SrcReg);
1599  }
1600
1601  LLT SizeTy = LLT::scalar(MinPtrSize);
1602
1603  // The size operand should be the minimum of the pointer sizes.
1604  Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1];
1605  if (MRI->getType(SizeOpReg) != SizeTy)
1606    SizeOpReg = MIRBuilder.buildZExtOrTrunc(SizeTy, SizeOpReg).getReg(0);
1607
1608  auto ICall = MIRBuilder.buildInstr(Opcode);
1609  for (Register SrcReg : SrcRegs)
1610    ICall.addUse(SrcReg);
1611
1612  Align DstAlign;
1613  Align SrcAlign;
1614  unsigned IsVol =
1615      cast<ConstantInt>(CI.getArgOperand(CI.arg_size() - 1))->getZExtValue();
1616
1617  ConstantInt *CopySize = nullptr;
1618
1619  if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1620    DstAlign = MCI->getDestAlign().valueOrOne();
1621    SrcAlign = MCI->getSourceAlign().valueOrOne();
1622    CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1623  } else if (auto *MCI = dyn_cast<MemCpyInlineInst>(&CI)) {
1624    DstAlign = MCI->getDestAlign().valueOrOne();
1625    SrcAlign = MCI->getSourceAlign().valueOrOne();
1626    CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1627  } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1628    DstAlign = MMI->getDestAlign().valueOrOne();
1629    SrcAlign = MMI->getSourceAlign().valueOrOne();
1630    CopySize = dyn_cast<ConstantInt>(MMI->getArgOperand(2));
1631  } else {
1632    auto *MSI = cast<MemSetInst>(&CI);
1633    DstAlign = MSI->getDestAlign().valueOrOne();
1634  }
1635
1636  if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1637    // We need to propagate the tail call flag from the IR inst as an argument.
1638    // Otherwise, we have to pessimize and assume later that we cannot tail call
1639    // any memory intrinsics.
1640    ICall.addImm(CI.isTailCall() ? 1 : 0);
1641  }
1642
1643  // Create mem operands to store the alignment and volatile info.
1644  MachineMemOperand::Flags LoadFlags = MachineMemOperand::MOLoad;
1645  MachineMemOperand::Flags StoreFlags = MachineMemOperand::MOStore;
1646  if (IsVol) {
1647    LoadFlags |= MachineMemOperand::MOVolatile;
1648    StoreFlags |= MachineMemOperand::MOVolatile;
1649  }
1650
1651  AAMDNodes AAInfo = CI.getAAMetadata();
1652  if (AA && CopySize &&
1653      AA->pointsToConstantMemory(MemoryLocation(
1654          SrcPtr, LocationSize::precise(CopySize->getZExtValue()), AAInfo))) {
1655    LoadFlags |= MachineMemOperand::MOInvariant;
1656
1657    // FIXME: pointsToConstantMemory probably does not imply dereferenceable,
1658    // but the previous usage implied it did. Probably should check
1659    // isDereferenceableAndAlignedPointer.
1660    LoadFlags |= MachineMemOperand::MODereferenceable;
1661  }
1662
1663  ICall.addMemOperand(
1664      MF->getMachineMemOperand(MachinePointerInfo(CI.getArgOperand(0)),
1665                               StoreFlags, 1, DstAlign, AAInfo));
1666  if (Opcode != TargetOpcode::G_MEMSET)
1667    ICall.addMemOperand(MF->getMachineMemOperand(
1668        MachinePointerInfo(SrcPtr), LoadFlags, 1, SrcAlign, AAInfo));
1669
1670  return true;
1671}
1672
1673void IRTranslator::getStackGuard(Register DstReg,
1674                                 MachineIRBuilder &MIRBuilder) {
1675  const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1676  MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1677  auto MIB =
1678      MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1679
1680  auto &TLI = *MF->getSubtarget().getTargetLowering();
1681  Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
1682  if (!Global)
1683    return;
1684
1685  unsigned AddrSpace = Global->getType()->getPointerAddressSpace();
1686  LLT PtrTy = LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1687
1688  MachinePointerInfo MPInfo(Global);
1689  auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1690               MachineMemOperand::MODereferenceable;
1691  MachineMemOperand *MemRef = MF->getMachineMemOperand(
1692      MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1693  MIB.setMemRefs({MemRef});
1694}
1695
1696bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1697                                              MachineIRBuilder &MIRBuilder) {
1698  ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1699  MIRBuilder.buildInstr(
1700      Op, {ResRegs[0], ResRegs[1]},
1701      {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))});
1702
1703  return true;
1704}
1705
1706bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
1707                                                MachineIRBuilder &MIRBuilder) {
1708  Register Dst = getOrCreateVReg(CI);
1709  Register Src0 = getOrCreateVReg(*CI.getOperand(0));
1710  Register Src1 = getOrCreateVReg(*CI.getOperand(1));
1711  uint64_t Scale = cast<ConstantInt>(CI.getOperand(2))->getZExtValue();
1712  MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale });
1713  return true;
1714}
1715
1716unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1717  switch (ID) {
1718    default:
1719      break;
1720    case Intrinsic::bswap:
1721      return TargetOpcode::G_BSWAP;
1722    case Intrinsic::bitreverse:
1723      return TargetOpcode::G_BITREVERSE;
1724    case Intrinsic::fshl:
1725      return TargetOpcode::G_FSHL;
1726    case Intrinsic::fshr:
1727      return TargetOpcode::G_FSHR;
1728    case Intrinsic::ceil:
1729      return TargetOpcode::G_FCEIL;
1730    case Intrinsic::cos:
1731      return TargetOpcode::G_FCOS;
1732    case Intrinsic::ctpop:
1733      return TargetOpcode::G_CTPOP;
1734    case Intrinsic::exp:
1735      return TargetOpcode::G_FEXP;
1736    case Intrinsic::exp2:
1737      return TargetOpcode::G_FEXP2;
1738    case Intrinsic::fabs:
1739      return TargetOpcode::G_FABS;
1740    case Intrinsic::copysign:
1741      return TargetOpcode::G_FCOPYSIGN;
1742    case Intrinsic::minnum:
1743      return TargetOpcode::G_FMINNUM;
1744    case Intrinsic::maxnum:
1745      return TargetOpcode::G_FMAXNUM;
1746    case Intrinsic::minimum:
1747      return TargetOpcode::G_FMINIMUM;
1748    case Intrinsic::maximum:
1749      return TargetOpcode::G_FMAXIMUM;
1750    case Intrinsic::canonicalize:
1751      return TargetOpcode::G_FCANONICALIZE;
1752    case Intrinsic::floor:
1753      return TargetOpcode::G_FFLOOR;
1754    case Intrinsic::fma:
1755      return TargetOpcode::G_FMA;
1756    case Intrinsic::log:
1757      return TargetOpcode::G_FLOG;
1758    case Intrinsic::log2:
1759      return TargetOpcode::G_FLOG2;
1760    case Intrinsic::log10:
1761      return TargetOpcode::G_FLOG10;
1762    case Intrinsic::nearbyint:
1763      return TargetOpcode::G_FNEARBYINT;
1764    case Intrinsic::pow:
1765      return TargetOpcode::G_FPOW;
1766    case Intrinsic::powi:
1767      return TargetOpcode::G_FPOWI;
1768    case Intrinsic::rint:
1769      return TargetOpcode::G_FRINT;
1770    case Intrinsic::round:
1771      return TargetOpcode::G_INTRINSIC_ROUND;
1772    case Intrinsic::roundeven:
1773      return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1774    case Intrinsic::sin:
1775      return TargetOpcode::G_FSIN;
1776    case Intrinsic::sqrt:
1777      return TargetOpcode::G_FSQRT;
1778    case Intrinsic::trunc:
1779      return TargetOpcode::G_INTRINSIC_TRUNC;
1780    case Intrinsic::readcyclecounter:
1781      return TargetOpcode::G_READCYCLECOUNTER;
1782    case Intrinsic::ptrmask:
1783      return TargetOpcode::G_PTRMASK;
1784    case Intrinsic::lrint:
1785      return TargetOpcode::G_INTRINSIC_LRINT;
1786    // FADD/FMUL require checking the FMF, so are handled elsewhere.
1787    case Intrinsic::vector_reduce_fmin:
1788      return TargetOpcode::G_VECREDUCE_FMIN;
1789    case Intrinsic::vector_reduce_fmax:
1790      return TargetOpcode::G_VECREDUCE_FMAX;
1791    case Intrinsic::vector_reduce_add:
1792      return TargetOpcode::G_VECREDUCE_ADD;
1793    case Intrinsic::vector_reduce_mul:
1794      return TargetOpcode::G_VECREDUCE_MUL;
1795    case Intrinsic::vector_reduce_and:
1796      return TargetOpcode::G_VECREDUCE_AND;
1797    case Intrinsic::vector_reduce_or:
1798      return TargetOpcode::G_VECREDUCE_OR;
1799    case Intrinsic::vector_reduce_xor:
1800      return TargetOpcode::G_VECREDUCE_XOR;
1801    case Intrinsic::vector_reduce_smax:
1802      return TargetOpcode::G_VECREDUCE_SMAX;
1803    case Intrinsic::vector_reduce_smin:
1804      return TargetOpcode::G_VECREDUCE_SMIN;
1805    case Intrinsic::vector_reduce_umax:
1806      return TargetOpcode::G_VECREDUCE_UMAX;
1807    case Intrinsic::vector_reduce_umin:
1808      return TargetOpcode::G_VECREDUCE_UMIN;
1809    case Intrinsic::lround:
1810      return TargetOpcode::G_LROUND;
1811    case Intrinsic::llround:
1812      return TargetOpcode::G_LLROUND;
1813  }
1814  return Intrinsic::not_intrinsic;
1815}
1816
1817bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
1818                                            Intrinsic::ID ID,
1819                                            MachineIRBuilder &MIRBuilder) {
1820
1821  unsigned Op = getSimpleIntrinsicOpcode(ID);
1822
1823  // Is this a simple intrinsic?
1824  if (Op == Intrinsic::not_intrinsic)
1825    return false;
1826
1827  // Yes. Let's translate it.
1828  SmallVector<llvm::SrcOp, 4> VRegs;
1829  for (const auto &Arg : CI.args())
1830    VRegs.push_back(getOrCreateVReg(*Arg));
1831
1832  MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
1833                        MachineInstr::copyFlagsFromInstruction(CI));
1834  return true;
1835}
1836
1837// TODO: Include ConstainedOps.def when all strict instructions are defined.
1838static unsigned getConstrainedOpcode(Intrinsic::ID ID) {
1839  switch (ID) {
1840  case Intrinsic::experimental_constrained_fadd:
1841    return TargetOpcode::G_STRICT_FADD;
1842  case Intrinsic::experimental_constrained_fsub:
1843    return TargetOpcode::G_STRICT_FSUB;
1844  case Intrinsic::experimental_constrained_fmul:
1845    return TargetOpcode::G_STRICT_FMUL;
1846  case Intrinsic::experimental_constrained_fdiv:
1847    return TargetOpcode::G_STRICT_FDIV;
1848  case Intrinsic::experimental_constrained_frem:
1849    return TargetOpcode::G_STRICT_FREM;
1850  case Intrinsic::experimental_constrained_fma:
1851    return TargetOpcode::G_STRICT_FMA;
1852  case Intrinsic::experimental_constrained_sqrt:
1853    return TargetOpcode::G_STRICT_FSQRT;
1854  default:
1855    return 0;
1856  }
1857}
1858
1859bool IRTranslator::translateConstrainedFPIntrinsic(
1860  const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) {
1861  fp::ExceptionBehavior EB = *FPI.getExceptionBehavior();
1862
1863  unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID());
1864  if (!Opcode)
1865    return false;
1866
1867  unsigned Flags = MachineInstr::copyFlagsFromInstruction(FPI);
1868  if (EB == fp::ExceptionBehavior::ebIgnore)
1869    Flags |= MachineInstr::NoFPExcept;
1870
1871  SmallVector<llvm::SrcOp, 4> VRegs;
1872  VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(0)));
1873  if (!FPI.isUnaryOp())
1874    VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(1)));
1875  if (FPI.isTernaryOp())
1876    VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(2)));
1877
1878  MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags);
1879  return true;
1880}
1881
1882bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
1883                                           MachineIRBuilder &MIRBuilder) {
1884  if (auto *MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
1885    if (ORE->enabled()) {
1886      if (MemoryOpRemark::canHandle(MI, *LibInfo)) {
1887        MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);
1888        R.visit(MI);
1889      }
1890    }
1891  }
1892
1893  // If this is a simple intrinsic (that is, we just need to add a def of
1894  // a vreg, and uses for each arg operand, then translate it.
1895  if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
1896    return true;
1897
1898  switch (ID) {
1899  default:
1900    break;
1901  case Intrinsic::lifetime_start:
1902  case Intrinsic::lifetime_end: {
1903    // No stack colouring in O0, discard region information.
1904    if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
1905      return true;
1906
1907    unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
1908                                                  : TargetOpcode::LIFETIME_END;
1909
1910    // Get the underlying objects for the location passed on the lifetime
1911    // marker.
1912    SmallVector<const Value *, 4> Allocas;
1913    getUnderlyingObjects(CI.getArgOperand(1), Allocas);
1914
1915    // Iterate over each underlying object, creating lifetime markers for each
1916    // static alloca. Quit if we find a non-static alloca.
1917    for (const Value *V : Allocas) {
1918      const AllocaInst *AI = dyn_cast<AllocaInst>(V);
1919      if (!AI)
1920        continue;
1921
1922      if (!AI->isStaticAlloca())
1923        return true;
1924
1925      MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
1926    }
1927    return true;
1928  }
1929  case Intrinsic::dbg_declare: {
1930    const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
1931    assert(DI.getVariable() && "Missing variable");
1932
1933    const Value *Address = DI.getAddress();
1934    if (!Address || isa<UndefValue>(Address)) {
1935      LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
1936      return true;
1937    }
1938
1939    assert(DI.getVariable()->isValidLocationForIntrinsic(
1940               MIRBuilder.getDebugLoc()) &&
1941           "Expected inlined-at fields to agree");
1942    auto AI = dyn_cast<AllocaInst>(Address);
1943    if (AI && AI->isStaticAlloca()) {
1944      // Static allocas are tracked at the MF level, no need for DBG_VALUE
1945      // instructions (in fact, they get ignored if they *do* exist).
1946      MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
1947                             getOrCreateFrameIndex(*AI), DI.getDebugLoc());
1948    } else {
1949      // A dbg.declare describes the address of a source variable, so lower it
1950      // into an indirect DBG_VALUE.
1951      MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
1952                                       DI.getVariable(), DI.getExpression());
1953    }
1954    return true;
1955  }
1956  case Intrinsic::dbg_label: {
1957    const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
1958    assert(DI.getLabel() && "Missing label");
1959
1960    assert(DI.getLabel()->isValidLocationForIntrinsic(
1961               MIRBuilder.getDebugLoc()) &&
1962           "Expected inlined-at fields to agree");
1963
1964    MIRBuilder.buildDbgLabel(DI.getLabel());
1965    return true;
1966  }
1967  case Intrinsic::vaend:
1968    // No target I know of cares about va_end. Certainly no in-tree target
1969    // does. Simplest intrinsic ever!
1970    return true;
1971  case Intrinsic::vastart: {
1972    auto &TLI = *MF->getSubtarget().getTargetLowering();
1973    Value *Ptr = CI.getArgOperand(0);
1974    unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
1975
1976    // FIXME: Get alignment
1977    MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
1978        .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
1979                                                MachineMemOperand::MOStore,
1980                                                ListSize, Align(1)));
1981    return true;
1982  }
1983  case Intrinsic::dbg_value: {
1984    // This form of DBG_VALUE is target-independent.
1985    const DbgValueInst &DI = cast<DbgValueInst>(CI);
1986    const Value *V = DI.getValue();
1987    assert(DI.getVariable()->isValidLocationForIntrinsic(
1988               MIRBuilder.getDebugLoc()) &&
1989           "Expected inlined-at fields to agree");
1990    if (!V || DI.hasArgList()) {
1991      // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to
1992      // terminate any prior location.
1993      MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
1994    } else if (const auto *CI = dyn_cast<Constant>(V)) {
1995      MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
1996    } else {
1997      for (Register Reg : getOrCreateVRegs(*V)) {
1998        // FIXME: This does not handle register-indirect values at offset 0. The
1999        // direct/indirect thing shouldn't really be handled by something as
2000        // implicit as reg+noreg vs reg+imm in the first place, but it seems
2001        // pretty baked in right now.
2002        MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
2003      }
2004    }
2005    return true;
2006  }
2007  case Intrinsic::uadd_with_overflow:
2008    return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2009  case Intrinsic::sadd_with_overflow:
2010    return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2011  case Intrinsic::usub_with_overflow:
2012    return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2013  case Intrinsic::ssub_with_overflow:
2014    return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2015  case Intrinsic::umul_with_overflow:
2016    return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2017  case Intrinsic::smul_with_overflow:
2018    return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2019  case Intrinsic::uadd_sat:
2020    return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2021  case Intrinsic::sadd_sat:
2022    return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2023  case Intrinsic::usub_sat:
2024    return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2025  case Intrinsic::ssub_sat:
2026    return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2027  case Intrinsic::ushl_sat:
2028    return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2029  case Intrinsic::sshl_sat:
2030    return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2031  case Intrinsic::umin:
2032    return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2033  case Intrinsic::umax:
2034    return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2035  case Intrinsic::smin:
2036    return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2037  case Intrinsic::smax:
2038    return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2039  case Intrinsic::abs:
2040    // TODO: Preserve "int min is poison" arg in GMIR?
2041    return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2042  case Intrinsic::smul_fix:
2043    return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2044  case Intrinsic::umul_fix:
2045    return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2046  case Intrinsic::smul_fix_sat:
2047    return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2048  case Intrinsic::umul_fix_sat:
2049    return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2050  case Intrinsic::sdiv_fix:
2051    return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2052  case Intrinsic::udiv_fix:
2053    return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2054  case Intrinsic::sdiv_fix_sat:
2055    return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2056  case Intrinsic::udiv_fix_sat:
2057    return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2058  case Intrinsic::fmuladd: {
2059    const TargetMachine &TM = MF->getTarget();
2060    const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
2061    Register Dst = getOrCreateVReg(CI);
2062    Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
2063    Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
2064    Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
2065    if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
2066        TLI.isFMAFasterThanFMulAndFAdd(*MF,
2067                                       TLI.getValueType(*DL, CI.getType()))) {
2068      // TODO: Revisit this to see if we should move this part of the
2069      // lowering to the combiner.
2070      MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
2071                          MachineInstr::copyFlagsFromInstruction(CI));
2072    } else {
2073      LLT Ty = getLLTForType(*CI.getType(), *DL);
2074      auto FMul = MIRBuilder.buildFMul(
2075          Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI));
2076      MIRBuilder.buildFAdd(Dst, FMul, Op2,
2077                           MachineInstr::copyFlagsFromInstruction(CI));
2078    }
2079    return true;
2080  }
2081  case Intrinsic::convert_from_fp16:
2082    // FIXME: This intrinsic should probably be removed from the IR.
2083    MIRBuilder.buildFPExt(getOrCreateVReg(CI),
2084                          getOrCreateVReg(*CI.getArgOperand(0)),
2085                          MachineInstr::copyFlagsFromInstruction(CI));
2086    return true;
2087  case Intrinsic::convert_to_fp16:
2088    // FIXME: This intrinsic should probably be removed from the IR.
2089    MIRBuilder.buildFPTrunc(getOrCreateVReg(CI),
2090                            getOrCreateVReg(*CI.getArgOperand(0)),
2091                            MachineInstr::copyFlagsFromInstruction(CI));
2092    return true;
2093  case Intrinsic::memcpy_inline:
2094    return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2095  case Intrinsic::memcpy:
2096    return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2097  case Intrinsic::memmove:
2098    return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2099  case Intrinsic::memset:
2100    return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2101  case Intrinsic::eh_typeid_for: {
2102    GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
2103    Register Reg = getOrCreateVReg(CI);
2104    unsigned TypeID = MF->getTypeIDFor(GV);
2105    MIRBuilder.buildConstant(Reg, TypeID);
2106    return true;
2107  }
2108  case Intrinsic::objectsize:
2109    llvm_unreachable("llvm.objectsize.* should have been lowered already");
2110
2111  case Intrinsic::is_constant:
2112    llvm_unreachable("llvm.is.constant.* should have been lowered already");
2113
2114  case Intrinsic::stackguard:
2115    getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2116    return true;
2117  case Intrinsic::stackprotector: {
2118    const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
2119    LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2120    Register GuardVal;
2121    if (TLI.useLoadStackGuardNode()) {
2122      GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2123      getStackGuard(GuardVal, MIRBuilder);
2124    } else
2125      GuardVal = getOrCreateVReg(*CI.getArgOperand(0)); // The guard's value.
2126
2127    AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
2128    int FI = getOrCreateFrameIndex(*Slot);
2129    MF->getFrameInfo().setStackProtectorIndex(FI);
2130
2131    MIRBuilder.buildStore(
2132        GuardVal, getOrCreateVReg(*Slot),
2133        *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
2134                                  MachineMemOperand::MOStore |
2135                                      MachineMemOperand::MOVolatile,
2136                                  PtrTy, Align(8)));
2137    return true;
2138  }
2139  case Intrinsic::stacksave: {
2140    // Save the stack pointer to the location provided by the intrinsic.
2141    Register Reg = getOrCreateVReg(CI);
2142    Register StackPtr = MF->getSubtarget()
2143                            .getTargetLowering()
2144                            ->getStackPointerRegisterToSaveRestore();
2145
2146    // If the target doesn't specify a stack pointer, then fall back.
2147    if (!StackPtr)
2148      return false;
2149
2150    MIRBuilder.buildCopy(Reg, StackPtr);
2151    return true;
2152  }
2153  case Intrinsic::stackrestore: {
2154    // Restore the stack pointer from the location provided by the intrinsic.
2155    Register Reg = getOrCreateVReg(*CI.getArgOperand(0));
2156    Register StackPtr = MF->getSubtarget()
2157                            .getTargetLowering()
2158                            ->getStackPointerRegisterToSaveRestore();
2159
2160    // If the target doesn't specify a stack pointer, then fall back.
2161    if (!StackPtr)
2162      return false;
2163
2164    MIRBuilder.buildCopy(StackPtr, Reg);
2165    return true;
2166  }
2167  case Intrinsic::cttz:
2168  case Intrinsic::ctlz: {
2169    ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
2170    bool isTrailing = ID == Intrinsic::cttz;
2171    unsigned Opcode = isTrailing
2172                          ? Cst->isZero() ? TargetOpcode::G_CTTZ
2173                                          : TargetOpcode::G_CTTZ_ZERO_UNDEF
2174                          : Cst->isZero() ? TargetOpcode::G_CTLZ
2175                                          : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2176    MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},
2177                          {getOrCreateVReg(*CI.getArgOperand(0))});
2178    return true;
2179  }
2180  case Intrinsic::invariant_start: {
2181    LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2182    Register Undef = MRI->createGenericVirtualRegister(PtrTy);
2183    MIRBuilder.buildUndef(Undef);
2184    return true;
2185  }
2186  case Intrinsic::invariant_end:
2187    return true;
2188  case Intrinsic::expect:
2189  case Intrinsic::annotation:
2190  case Intrinsic::ptr_annotation:
2191  case Intrinsic::launder_invariant_group:
2192  case Intrinsic::strip_invariant_group: {
2193    // Drop the intrinsic, but forward the value.
2194    MIRBuilder.buildCopy(getOrCreateVReg(CI),
2195                         getOrCreateVReg(*CI.getArgOperand(0)));
2196    return true;
2197  }
2198  case Intrinsic::assume:
2199  case Intrinsic::experimental_noalias_scope_decl:
2200  case Intrinsic::var_annotation:
2201  case Intrinsic::sideeffect:
2202    // Discard annotate attributes, assumptions, and artificial side-effects.
2203    return true;
2204  case Intrinsic::read_volatile_register:
2205  case Intrinsic::read_register: {
2206    Value *Arg = CI.getArgOperand(0);
2207    MIRBuilder
2208        .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2209        .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
2210    return true;
2211  }
2212  case Intrinsic::write_register: {
2213    Value *Arg = CI.getArgOperand(0);
2214    MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER)
2215      .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
2216      .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
2217    return true;
2218  }
2219  case Intrinsic::localescape: {
2220    MachineBasicBlock &EntryMBB = MF->front();
2221    StringRef EscapedName = GlobalValue::dropLLVMManglingEscape(MF->getName());
2222
2223    // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
2224    // is the same on all targets.
2225    for (unsigned Idx = 0, E = CI.arg_size(); Idx < E; ++Idx) {
2226      Value *Arg = CI.getArgOperand(Idx)->stripPointerCasts();
2227      if (isa<ConstantPointerNull>(Arg))
2228        continue; // Skip null pointers. They represent a hole in index space.
2229
2230      int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg));
2231      MCSymbol *FrameAllocSym =
2232          MF->getMMI().getContext().getOrCreateFrameAllocSymbol(EscapedName,
2233                                                                Idx);
2234
2235      // This should be inserted at the start of the entry block.
2236      auto LocalEscape =
2237          MIRBuilder.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE)
2238              .addSym(FrameAllocSym)
2239              .addFrameIndex(FI);
2240
2241      EntryMBB.insert(EntryMBB.begin(), LocalEscape);
2242    }
2243
2244    return true;
2245  }
2246  case Intrinsic::vector_reduce_fadd:
2247  case Intrinsic::vector_reduce_fmul: {
2248    // Need to check for the reassoc flag to decide whether we want a
2249    // sequential reduction opcode or not.
2250    Register Dst = getOrCreateVReg(CI);
2251    Register ScalarSrc = getOrCreateVReg(*CI.getArgOperand(0));
2252    Register VecSrc = getOrCreateVReg(*CI.getArgOperand(1));
2253    unsigned Opc = 0;
2254    if (!CI.hasAllowReassoc()) {
2255      // The sequential ordering case.
2256      Opc = ID == Intrinsic::vector_reduce_fadd
2257                ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2258                : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2259      MIRBuilder.buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},
2260                            MachineInstr::copyFlagsFromInstruction(CI));
2261      return true;
2262    }
2263    // We split the operation into a separate G_FADD/G_FMUL + the reduce,
2264    // since the associativity doesn't matter.
2265    unsigned ScalarOpc;
2266    if (ID == Intrinsic::vector_reduce_fadd) {
2267      Opc = TargetOpcode::G_VECREDUCE_FADD;
2268      ScalarOpc = TargetOpcode::G_FADD;
2269    } else {
2270      Opc = TargetOpcode::G_VECREDUCE_FMUL;
2271      ScalarOpc = TargetOpcode::G_FMUL;
2272    }
2273    LLT DstTy = MRI->getType(Dst);
2274    auto Rdx = MIRBuilder.buildInstr(
2275        Opc, {DstTy}, {VecSrc}, MachineInstr::copyFlagsFromInstruction(CI));
2276    MIRBuilder.buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2277                          MachineInstr::copyFlagsFromInstruction(CI));
2278
2279    return true;
2280  }
2281  case Intrinsic::trap:
2282  case Intrinsic::debugtrap:
2283  case Intrinsic::ubsantrap: {
2284    StringRef TrapFuncName =
2285        CI.getAttributes().getFnAttr("trap-func-name").getValueAsString();
2286    if (TrapFuncName.empty())
2287      break; // Use the default handling.
2288    CallLowering::CallLoweringInfo Info;
2289    if (ID == Intrinsic::ubsantrap) {
2290      Info.OrigArgs.push_back({getOrCreateVRegs(*CI.getArgOperand(0)),
2291                               CI.getArgOperand(0)->getType(), 0});
2292    }
2293    Info.Callee = MachineOperand::CreateES(TrapFuncName.data());
2294    Info.CB = &CI;
2295    Info.OrigRet = {Register(), Type::getVoidTy(CI.getContext()), 0};
2296    return CLI->lowerCall(MIRBuilder, Info);
2297  }
2298  case Intrinsic::fptrunc_round: {
2299    unsigned Flags = MachineInstr::copyFlagsFromInstruction(CI);
2300
2301    // Convert the metadata argument to a constant integer
2302    Metadata *MD = cast<MetadataAsValue>(CI.getArgOperand(1))->getMetadata();
2303    std::optional<RoundingMode> RoundMode =
2304        convertStrToRoundingMode(cast<MDString>(MD)->getString());
2305
2306    // Add the Rounding mode as an integer
2307    MIRBuilder
2308        .buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2309                    {getOrCreateVReg(CI)},
2310                    {getOrCreateVReg(*CI.getArgOperand(0))}, Flags)
2311        .addImm((int)*RoundMode);
2312
2313    return true;
2314  }
2315  case Intrinsic::is_fpclass: {
2316    Value *FpValue = CI.getOperand(0);
2317    ConstantInt *TestMaskValue = cast<ConstantInt>(CI.getOperand(1));
2318
2319    MIRBuilder
2320        .buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2321                    {getOrCreateVReg(*FpValue)})
2322        .addImm(TestMaskValue->getZExtValue());
2323
2324    return true;
2325  }
2326#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)  \
2327  case Intrinsic::INTRINSIC:
2328#include "llvm/IR/ConstrainedOps.def"
2329    return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
2330                                           MIRBuilder);
2331
2332  }
2333  return false;
2334}
2335
2336bool IRTranslator::translateInlineAsm(const CallBase &CB,
2337                                      MachineIRBuilder &MIRBuilder) {
2338
2339  const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2340
2341  if (!ALI) {
2342    LLVM_DEBUG(
2343        dbgs() << "Inline asm lowering is not supported for this target yet\n");
2344    return false;
2345  }
2346
2347  return ALI->lowerInlineAsm(
2348      MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); });
2349}
2350
2351bool IRTranslator::translateCallBase(const CallBase &CB,
2352                                     MachineIRBuilder &MIRBuilder) {
2353  ArrayRef<Register> Res = getOrCreateVRegs(CB);
2354
2355  SmallVector<ArrayRef<Register>, 8> Args;
2356  Register SwiftInVReg = 0;
2357  Register SwiftErrorVReg = 0;
2358  for (const auto &Arg : CB.args()) {
2359    if (CLI->supportSwiftError() && isSwiftError(Arg)) {
2360      assert(SwiftInVReg == 0 && "Expected only one swift error argument");
2361      LLT Ty = getLLTForType(*Arg->getType(), *DL);
2362      SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2363      MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2364                                            &CB, &MIRBuilder.getMBB(), Arg));
2365      Args.emplace_back(ArrayRef(SwiftInVReg));
2366      SwiftErrorVReg =
2367          SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);
2368      continue;
2369    }
2370    Args.push_back(getOrCreateVRegs(*Arg));
2371  }
2372
2373  if (auto *CI = dyn_cast<CallInst>(&CB)) {
2374    if (ORE->enabled()) {
2375      if (MemoryOpRemark::canHandle(CI, *LibInfo)) {
2376        MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);
2377        R.visit(CI);
2378      }
2379    }
2380  }
2381
2382  // We don't set HasCalls on MFI here yet because call lowering may decide to
2383  // optimize into tail calls. Instead, we defer that to selection where a final
2384  // scan is done to check if any instructions are calls.
2385  bool Success =
2386      CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
2387                     [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
2388
2389  // Check if we just inserted a tail call.
2390  if (Success) {
2391    assert(!HasTailCall && "Can't tail call return twice from block?");
2392    const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
2393    HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
2394  }
2395
2396  return Success;
2397}
2398
2399bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
2400  const CallInst &CI = cast<CallInst>(U);
2401  auto TII = MF->getTarget().getIntrinsicInfo();
2402  const Function *F = CI.getCalledFunction();
2403
2404  // FIXME: support Windows dllimport function calls.
2405  if (F && (F->hasDLLImportStorageClass() ||
2406            (MF->getTarget().getTargetTriple().isOSWindows() &&
2407             F->hasExternalWeakLinkage())))
2408    return false;
2409
2410  // FIXME: support control flow guard targets.
2411  if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2412    return false;
2413
2414  // FIXME: support statepoints and related.
2415  if (isa<GCStatepointInst, GCRelocateInst, GCResultInst>(U))
2416    return false;
2417
2418  if (CI.isInlineAsm())
2419    return translateInlineAsm(CI, MIRBuilder);
2420
2421  diagnoseDontCall(CI);
2422
2423  Intrinsic::ID ID = Intrinsic::not_intrinsic;
2424  if (F && F->isIntrinsic()) {
2425    ID = F->getIntrinsicID();
2426    if (TII && ID == Intrinsic::not_intrinsic)
2427      ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
2428  }
2429
2430  if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
2431    return translateCallBase(CI, MIRBuilder);
2432
2433  assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
2434
2435  if (translateKnownIntrinsic(CI, ID, MIRBuilder))
2436    return true;
2437
2438  ArrayRef<Register> ResultRegs;
2439  if (!CI.getType()->isVoidTy())
2440    ResultRegs = getOrCreateVRegs(CI);
2441
2442  // Ignore the callsite attributes. Backend code is most likely not expecting
2443  // an intrinsic to sometimes have side effects and sometimes not.
2444  MachineInstrBuilder MIB =
2445      MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory());
2446  if (isa<FPMathOperator>(CI))
2447    MIB->copyIRFlags(CI);
2448
2449  for (const auto &Arg : enumerate(CI.args())) {
2450    // If this is required to be an immediate, don't materialize it in a
2451    // register.
2452    if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
2453      if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
2454        // imm arguments are more convenient than cimm (and realistically
2455        // probably sufficient), so use them.
2456        assert(CI->getBitWidth() <= 64 &&
2457               "large intrinsic immediates not handled");
2458        MIB.addImm(CI->getSExtValue());
2459      } else {
2460        MIB.addFPImm(cast<ConstantFP>(Arg.value()));
2461      }
2462    } else if (auto *MDVal = dyn_cast<MetadataAsValue>(Arg.value())) {
2463      auto *MD = MDVal->getMetadata();
2464      auto *MDN = dyn_cast<MDNode>(MD);
2465      if (!MDN) {
2466        if (auto *ConstMD = dyn_cast<ConstantAsMetadata>(MD))
2467          MDN = MDNode::get(MF->getFunction().getContext(), ConstMD);
2468        else // This was probably an MDString.
2469          return false;
2470      }
2471      MIB.addMetadata(MDN);
2472    } else {
2473      ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
2474      if (VRegs.size() > 1)
2475        return false;
2476      MIB.addUse(VRegs[0]);
2477    }
2478  }
2479
2480  // Add a MachineMemOperand if it is a target mem intrinsic.
2481  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
2482  TargetLowering::IntrinsicInfo Info;
2483  // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
2484  if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
2485    Align Alignment = Info.align.value_or(
2486        DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
2487    LLT MemTy = Info.memVT.isSimple()
2488                    ? getLLTForMVT(Info.memVT.getSimpleVT())
2489                    : LLT::scalar(Info.memVT.getStoreSizeInBits());
2490
2491    // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
2492    //       didn't yield anything useful.
2493    MachinePointerInfo MPI;
2494    if (Info.ptrVal)
2495      MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
2496    else if (Info.fallbackAddressSpace)
2497      MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
2498    MIB.addMemOperand(
2499        MF->getMachineMemOperand(MPI, Info.flags, MemTy, Alignment, CI.getAAMetadata()));
2500  }
2501
2502  return true;
2503}
2504
2505bool IRTranslator::findUnwindDestinations(
2506    const BasicBlock *EHPadBB,
2507    BranchProbability Prob,
2508    SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2509        &UnwindDests) {
2510  EHPersonality Personality = classifyEHPersonality(
2511      EHPadBB->getParent()->getFunction().getPersonalityFn());
2512  bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2513  bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2514  bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2515  bool IsSEH = isAsynchronousEHPersonality(Personality);
2516
2517  if (IsWasmCXX) {
2518    // Ignore this for now.
2519    return false;
2520  }
2521
2522  while (EHPadBB) {
2523    const Instruction *Pad = EHPadBB->getFirstNonPHI();
2524    BasicBlock *NewEHPadBB = nullptr;
2525    if (isa<LandingPadInst>(Pad)) {
2526      // Stop on landingpads. They are not funclets.
2527      UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2528      break;
2529    }
2530    if (isa<CleanupPadInst>(Pad)) {
2531      // Stop on cleanup pads. Cleanups are always funclet entries for all known
2532      // personalities.
2533      UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2534      UnwindDests.back().first->setIsEHScopeEntry();
2535      UnwindDests.back().first->setIsEHFuncletEntry();
2536      break;
2537    }
2538    if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2539      // Add the catchpad handlers to the possible destinations.
2540      for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2541        UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2542        // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2543        if (IsMSVCCXX || IsCoreCLR)
2544          UnwindDests.back().first->setIsEHFuncletEntry();
2545        if (!IsSEH)
2546          UnwindDests.back().first->setIsEHScopeEntry();
2547      }
2548      NewEHPadBB = CatchSwitch->getUnwindDest();
2549    } else {
2550      continue;
2551    }
2552
2553    BranchProbabilityInfo *BPI = FuncInfo.BPI;
2554    if (BPI && NewEHPadBB)
2555      Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2556    EHPadBB = NewEHPadBB;
2557  }
2558  return true;
2559}
2560
2561bool IRTranslator::translateInvoke(const User &U,
2562                                   MachineIRBuilder &MIRBuilder) {
2563  const InvokeInst &I = cast<InvokeInst>(U);
2564  MCContext &Context = MF->getContext();
2565
2566  const BasicBlock *ReturnBB = I.getSuccessor(0);
2567  const BasicBlock *EHPadBB = I.getSuccessor(1);
2568
2569  const Function *Fn = I.getCalledFunction();
2570
2571  // FIXME: support invoking patchpoint and statepoint intrinsics.
2572  if (Fn && Fn->isIntrinsic())
2573    return false;
2574
2575  // FIXME: support whatever these are.
2576  if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
2577    return false;
2578
2579  // FIXME: support control flow guard targets.
2580  if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2581    return false;
2582
2583  // FIXME: support Windows exception handling.
2584  if (!isa<LandingPadInst>(EHPadBB->getFirstNonPHI()))
2585    return false;
2586
2587  bool LowerInlineAsm = I.isInlineAsm();
2588  bool NeedEHLabel = true;
2589
2590  // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
2591  // the region covered by the try.
2592  MCSymbol *BeginSymbol = nullptr;
2593  if (NeedEHLabel) {
2594    MIRBuilder.buildInstr(TargetOpcode::G_INVOKE_REGION_START);
2595    BeginSymbol = Context.createTempSymbol();
2596    MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
2597  }
2598
2599  if (LowerInlineAsm) {
2600    if (!translateInlineAsm(I, MIRBuilder))
2601      return false;
2602  } else if (!translateCallBase(I, MIRBuilder))
2603    return false;
2604
2605  MCSymbol *EndSymbol = nullptr;
2606  if (NeedEHLabel) {
2607    EndSymbol = Context.createTempSymbol();
2608    MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
2609  }
2610
2611  SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2612  BranchProbabilityInfo *BPI = FuncInfo.BPI;
2613  MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB();
2614  BranchProbability EHPadBBProb =
2615      BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2616          : BranchProbability::getZero();
2617
2618  if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
2619    return false;
2620
2621  MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
2622                    &ReturnMBB = getMBB(*ReturnBB);
2623  // Update successor info.
2624  addSuccessorWithProb(InvokeMBB, &ReturnMBB);
2625  for (auto &UnwindDest : UnwindDests) {
2626    UnwindDest.first->setIsEHPad();
2627    addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2628  }
2629  InvokeMBB->normalizeSuccProbs();
2630
2631  if (NeedEHLabel) {
2632    assert(BeginSymbol && "Expected a begin symbol!");
2633    assert(EndSymbol && "Expected an end symbol!");
2634    MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
2635  }
2636
2637  MIRBuilder.buildBr(ReturnMBB);
2638  return true;
2639}
2640
2641bool IRTranslator::translateCallBr(const User &U,
2642                                   MachineIRBuilder &MIRBuilder) {
2643  // FIXME: Implement this.
2644  return false;
2645}
2646
2647bool IRTranslator::translateLandingPad(const User &U,
2648                                       MachineIRBuilder &MIRBuilder) {
2649  const LandingPadInst &LP = cast<LandingPadInst>(U);
2650
2651  MachineBasicBlock &MBB = MIRBuilder.getMBB();
2652
2653  MBB.setIsEHPad();
2654
2655  // If there aren't registers to copy the values into (e.g., during SjLj
2656  // exceptions), then don't bother.
2657  auto &TLI = *MF->getSubtarget().getTargetLowering();
2658  const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
2659  if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2660      TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2661    return true;
2662
2663  // If landingpad's return type is token type, we don't create DAG nodes
2664  // for its exception pointer and selector value. The extraction of exception
2665  // pointer or selector value from token type landingpads is not currently
2666  // supported.
2667  if (LP.getType()->isTokenTy())
2668    return true;
2669
2670  // Add a label to mark the beginning of the landing pad.  Deletion of the
2671  // landing pad can thus be detected via the MachineModuleInfo.
2672  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
2673    .addSym(MF->addLandingPad(&MBB));
2674
2675  // If the unwinder does not preserve all registers, ensure that the
2676  // function marks the clobbered registers as used.
2677  const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
2678  if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))
2679    MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
2680
2681  LLT Ty = getLLTForType(*LP.getType(), *DL);
2682  Register Undef = MRI->createGenericVirtualRegister(Ty);
2683  MIRBuilder.buildUndef(Undef);
2684
2685  SmallVector<LLT, 2> Tys;
2686  for (Type *Ty : cast<StructType>(LP.getType())->elements())
2687    Tys.push_back(getLLTForType(*Ty, *DL));
2688  assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
2689
2690  // Mark exception register as live in.
2691  Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
2692  if (!ExceptionReg)
2693    return false;
2694
2695  MBB.addLiveIn(ExceptionReg);
2696  ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
2697  MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
2698
2699  Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
2700  if (!SelectorReg)
2701    return false;
2702
2703  MBB.addLiveIn(SelectorReg);
2704  Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
2705  MIRBuilder.buildCopy(PtrVReg, SelectorReg);
2706  MIRBuilder.buildCast(ResRegs[1], PtrVReg);
2707
2708  return true;
2709}
2710
2711bool IRTranslator::translateAlloca(const User &U,
2712                                   MachineIRBuilder &MIRBuilder) {
2713  auto &AI = cast<AllocaInst>(U);
2714
2715  if (AI.isSwiftError())
2716    return true;
2717
2718  if (AI.isStaticAlloca()) {
2719    Register Res = getOrCreateVReg(AI);
2720    int FI = getOrCreateFrameIndex(AI);
2721    MIRBuilder.buildFrameIndex(Res, FI);
2722    return true;
2723  }
2724
2725  // FIXME: support stack probing for Windows.
2726  if (MF->getTarget().getTargetTriple().isOSWindows())
2727    return false;
2728
2729  // Now we're in the harder dynamic case.
2730  Register NumElts = getOrCreateVReg(*AI.getArraySize());
2731  Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
2732  LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
2733  if (MRI->getType(NumElts) != IntPtrTy) {
2734    Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
2735    MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
2736    NumElts = ExtElts;
2737  }
2738
2739  Type *Ty = AI.getAllocatedType();
2740
2741  Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
2742  Register TySize =
2743      getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
2744  MIRBuilder.buildMul(AllocSize, NumElts, TySize);
2745
2746  // Round the size of the allocation up to the stack alignment size
2747  // by add SA-1 to the size. This doesn't overflow because we're computing
2748  // an address inside an alloca.
2749  Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
2750  auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1);
2751  auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
2752                                      MachineInstr::NoUWrap);
2753  auto AlignCst =
2754      MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1));
2755  auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
2756
2757  Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty));
2758  if (Alignment <= StackAlign)
2759    Alignment = Align(1);
2760  MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment);
2761
2762  MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
2763  assert(MF->getFrameInfo().hasVarSizedObjects());
2764  return true;
2765}
2766
2767bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
2768  // FIXME: We may need more info about the type. Because of how LLT works,
2769  // we're completely discarding the i64/double distinction here (amongst
2770  // others). Fortunately the ABIs I know of where that matters don't use va_arg
2771  // anyway but that's not guaranteed.
2772  MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
2773                        {getOrCreateVReg(*U.getOperand(0)),
2774                         DL->getABITypeAlign(U.getType()).value()});
2775  return true;
2776}
2777
2778bool IRTranslator::translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder) {
2779    if (!MF->getTarget().Options.TrapUnreachable)
2780    return true;
2781
2782  auto &UI = cast<UnreachableInst>(U);
2783  // We may be able to ignore unreachable behind a noreturn call.
2784  if (MF->getTarget().Options.NoTrapAfterNoreturn) {
2785    const BasicBlock &BB = *UI.getParent();
2786    if (&UI != &BB.front()) {
2787      BasicBlock::const_iterator PredI =
2788        std::prev(BasicBlock::const_iterator(UI));
2789      if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2790        if (Call->doesNotReturn())
2791          return true;
2792      }
2793    }
2794  }
2795
2796  MIRBuilder.buildIntrinsic(Intrinsic::trap, ArrayRef<Register>(), true);
2797  return true;
2798}
2799
2800bool IRTranslator::translateInsertElement(const User &U,
2801                                          MachineIRBuilder &MIRBuilder) {
2802  // If it is a <1 x Ty> vector, use the scalar as it is
2803  // not a legal vector type in LLT.
2804  if (cast<FixedVectorType>(U.getType())->getNumElements() == 1)
2805    return translateCopy(U, *U.getOperand(1), MIRBuilder);
2806
2807  Register Res = getOrCreateVReg(U);
2808  Register Val = getOrCreateVReg(*U.getOperand(0));
2809  Register Elt = getOrCreateVReg(*U.getOperand(1));
2810  Register Idx = getOrCreateVReg(*U.getOperand(2));
2811  MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
2812  return true;
2813}
2814
2815bool IRTranslator::translateExtractElement(const User &U,
2816                                           MachineIRBuilder &MIRBuilder) {
2817  // If it is a <1 x Ty> vector, use the scalar as it is
2818  // not a legal vector type in LLT.
2819  if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1)
2820    return translateCopy(U, *U.getOperand(0), MIRBuilder);
2821
2822  Register Res = getOrCreateVReg(U);
2823  Register Val = getOrCreateVReg(*U.getOperand(0));
2824  const auto &TLI = *MF->getSubtarget().getTargetLowering();
2825  unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
2826  Register Idx;
2827  if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
2828    if (CI->getBitWidth() != PreferredVecIdxWidth) {
2829      APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
2830      auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
2831      Idx = getOrCreateVReg(*NewIdxCI);
2832    }
2833  }
2834  if (!Idx)
2835    Idx = getOrCreateVReg(*U.getOperand(1));
2836  if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
2837    const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
2838    Idx = MIRBuilder.buildZExtOrTrunc(VecIdxTy, Idx).getReg(0);
2839  }
2840  MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
2841  return true;
2842}
2843
2844bool IRTranslator::translateShuffleVector(const User &U,
2845                                          MachineIRBuilder &MIRBuilder) {
2846  ArrayRef<int> Mask;
2847  if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
2848    Mask = SVI->getShuffleMask();
2849  else
2850    Mask = cast<ConstantExpr>(U).getShuffleMask();
2851  ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
2852  MIRBuilder
2853      .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
2854                  {getOrCreateVReg(*U.getOperand(0)),
2855                   getOrCreateVReg(*U.getOperand(1))})
2856      .addShuffleMask(MaskAlloc);
2857  return true;
2858}
2859
2860bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
2861  const PHINode &PI = cast<PHINode>(U);
2862
2863  SmallVector<MachineInstr *, 4> Insts;
2864  for (auto Reg : getOrCreateVRegs(PI)) {
2865    auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
2866    Insts.push_back(MIB.getInstr());
2867  }
2868
2869  PendingPHIs.emplace_back(&PI, std::move(Insts));
2870  return true;
2871}
2872
2873bool IRTranslator::translateAtomicCmpXchg(const User &U,
2874                                          MachineIRBuilder &MIRBuilder) {
2875  const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
2876
2877  auto &TLI = *MF->getSubtarget().getTargetLowering();
2878  auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
2879
2880  auto Res = getOrCreateVRegs(I);
2881  Register OldValRes = Res[0];
2882  Register SuccessRes = Res[1];
2883  Register Addr = getOrCreateVReg(*I.getPointerOperand());
2884  Register Cmp = getOrCreateVReg(*I.getCompareOperand());
2885  Register NewVal = getOrCreateVReg(*I.getNewValOperand());
2886
2887  MIRBuilder.buildAtomicCmpXchgWithSuccess(
2888      OldValRes, SuccessRes, Addr, Cmp, NewVal,
2889      *MF->getMachineMemOperand(
2890          MachinePointerInfo(I.getPointerOperand()), Flags, MRI->getType(Cmp),
2891          getMemOpAlign(I), I.getAAMetadata(), nullptr, I.getSyncScopeID(),
2892          I.getSuccessOrdering(), I.getFailureOrdering()));
2893  return true;
2894}
2895
2896bool IRTranslator::translateAtomicRMW(const User &U,
2897                                      MachineIRBuilder &MIRBuilder) {
2898  const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
2899  auto &TLI = *MF->getSubtarget().getTargetLowering();
2900  auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
2901
2902  Register Res = getOrCreateVReg(I);
2903  Register Addr = getOrCreateVReg(*I.getPointerOperand());
2904  Register Val = getOrCreateVReg(*I.getValOperand());
2905
2906  unsigned Opcode = 0;
2907  switch (I.getOperation()) {
2908  default:
2909    return false;
2910  case AtomicRMWInst::Xchg:
2911    Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
2912    break;
2913  case AtomicRMWInst::Add:
2914    Opcode = TargetOpcode::G_ATOMICRMW_ADD;
2915    break;
2916  case AtomicRMWInst::Sub:
2917    Opcode = TargetOpcode::G_ATOMICRMW_SUB;
2918    break;
2919  case AtomicRMWInst::And:
2920    Opcode = TargetOpcode::G_ATOMICRMW_AND;
2921    break;
2922  case AtomicRMWInst::Nand:
2923    Opcode = TargetOpcode::G_ATOMICRMW_NAND;
2924    break;
2925  case AtomicRMWInst::Or:
2926    Opcode = TargetOpcode::G_ATOMICRMW_OR;
2927    break;
2928  case AtomicRMWInst::Xor:
2929    Opcode = TargetOpcode::G_ATOMICRMW_XOR;
2930    break;
2931  case AtomicRMWInst::Max:
2932    Opcode = TargetOpcode::G_ATOMICRMW_MAX;
2933    break;
2934  case AtomicRMWInst::Min:
2935    Opcode = TargetOpcode::G_ATOMICRMW_MIN;
2936    break;
2937  case AtomicRMWInst::UMax:
2938    Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
2939    break;
2940  case AtomicRMWInst::UMin:
2941    Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
2942    break;
2943  case AtomicRMWInst::FAdd:
2944    Opcode = TargetOpcode::G_ATOMICRMW_FADD;
2945    break;
2946  case AtomicRMWInst::FSub:
2947    Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
2948    break;
2949  case AtomicRMWInst::FMax:
2950    Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
2951    break;
2952  case AtomicRMWInst::FMin:
2953    Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
2954    break;
2955  case AtomicRMWInst::UIncWrap:
2956    Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
2957    break;
2958  case AtomicRMWInst::UDecWrap:
2959    Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
2960    break;
2961  }
2962
2963  MIRBuilder.buildAtomicRMW(
2964      Opcode, Res, Addr, Val,
2965      *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
2966                                Flags, MRI->getType(Val), getMemOpAlign(I),
2967                                I.getAAMetadata(), nullptr, I.getSyncScopeID(),
2968                                I.getOrdering()));
2969  return true;
2970}
2971
2972bool IRTranslator::translateFence(const User &U,
2973                                  MachineIRBuilder &MIRBuilder) {
2974  const FenceInst &Fence = cast<FenceInst>(U);
2975  MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
2976                        Fence.getSyncScopeID());
2977  return true;
2978}
2979
2980bool IRTranslator::translateFreeze(const User &U,
2981                                   MachineIRBuilder &MIRBuilder) {
2982  const ArrayRef<Register> DstRegs = getOrCreateVRegs(U);
2983  const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0));
2984
2985  assert(DstRegs.size() == SrcRegs.size() &&
2986         "Freeze with different source and destination type?");
2987
2988  for (unsigned I = 0; I < DstRegs.size(); ++I) {
2989    MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]);
2990  }
2991
2992  return true;
2993}
2994
2995void IRTranslator::finishPendingPhis() {
2996#ifndef NDEBUG
2997  DILocationVerifier Verifier;
2998  GISelObserverWrapper WrapperObserver(&Verifier);
2999  RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
3000#endif // ifndef NDEBUG
3001  for (auto &Phi : PendingPHIs) {
3002    const PHINode *PI = Phi.first;
3003    ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
3004    MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
3005    EntryBuilder->setDebugLoc(PI->getDebugLoc());
3006#ifndef NDEBUG
3007    Verifier.setCurrentInst(PI);
3008#endif // ifndef NDEBUG
3009
3010    SmallSet<const MachineBasicBlock *, 16> SeenPreds;
3011    for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
3012      auto IRPred = PI->getIncomingBlock(i);
3013      ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
3014      for (auto *Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
3015        if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
3016          continue;
3017        SeenPreds.insert(Pred);
3018        for (unsigned j = 0; j < ValRegs.size(); ++j) {
3019          MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
3020          MIB.addUse(ValRegs[j]);
3021          MIB.addMBB(Pred);
3022        }
3023      }
3024    }
3025  }
3026}
3027
3028bool IRTranslator::translate(const Instruction &Inst) {
3029  CurBuilder->setDebugLoc(Inst.getDebugLoc());
3030  CurBuilder->setPCSections(Inst.getMetadata(LLVMContext::MD_pcsections));
3031
3032  auto &TLI = *MF->getSubtarget().getTargetLowering();
3033  if (TLI.fallBackToDAGISel(Inst))
3034    return false;
3035
3036  switch (Inst.getOpcode()) {
3037#define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
3038  case Instruction::OPCODE:                                                    \
3039    return translate##OPCODE(Inst, *CurBuilder.get());
3040#include "llvm/IR/Instruction.def"
3041  default:
3042    return false;
3043  }
3044}
3045
3046bool IRTranslator::translate(const Constant &C, Register Reg) {
3047  // We only emit constants into the entry block from here. To prevent jumpy
3048  // debug behaviour remove debug line.
3049  if (auto CurrInstDL = CurBuilder->getDL())
3050    EntryBuilder->setDebugLoc(DebugLoc());
3051
3052  if (auto CI = dyn_cast<ConstantInt>(&C))
3053    EntryBuilder->buildConstant(Reg, *CI);
3054  else if (auto CF = dyn_cast<ConstantFP>(&C))
3055    EntryBuilder->buildFConstant(Reg, *CF);
3056  else if (isa<UndefValue>(C))
3057    EntryBuilder->buildUndef(Reg);
3058  else if (isa<ConstantPointerNull>(C))
3059    EntryBuilder->buildConstant(Reg, 0);
3060  else if (auto GV = dyn_cast<GlobalValue>(&C))
3061    EntryBuilder->buildGlobalValue(Reg, GV);
3062  else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
3063    if (!isa<FixedVectorType>(CAZ->getType()))
3064      return false;
3065    // Return the scalar if it is a <1 x Ty> vector.
3066    unsigned NumElts = CAZ->getElementCount().getFixedValue();
3067    if (NumElts == 1)
3068      return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder);
3069    SmallVector<Register, 4> Ops;
3070    for (unsigned I = 0; I < NumElts; ++I) {
3071      Constant &Elt = *CAZ->getElementValue(I);
3072      Ops.push_back(getOrCreateVReg(Elt));
3073    }
3074    EntryBuilder->buildBuildVector(Reg, Ops);
3075  } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
3076    // Return the scalar if it is a <1 x Ty> vector.
3077    if (CV->getNumElements() == 1)
3078      return translateCopy(C, *CV->getElementAsConstant(0), *EntryBuilder);
3079    SmallVector<Register, 4> Ops;
3080    for (unsigned i = 0; i < CV->getNumElements(); ++i) {
3081      Constant &Elt = *CV->getElementAsConstant(i);
3082      Ops.push_back(getOrCreateVReg(Elt));
3083    }
3084    EntryBuilder->buildBuildVector(Reg, Ops);
3085  } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
3086    switch(CE->getOpcode()) {
3087#define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
3088  case Instruction::OPCODE:                                                    \
3089    return translate##OPCODE(*CE, *EntryBuilder.get());
3090#include "llvm/IR/Instruction.def"
3091    default:
3092      return false;
3093    }
3094  } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
3095    if (CV->getNumOperands() == 1)
3096      return translateCopy(C, *CV->getOperand(0), *EntryBuilder);
3097    SmallVector<Register, 4> Ops;
3098    for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
3099      Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
3100    }
3101    EntryBuilder->buildBuildVector(Reg, Ops);
3102  } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
3103    EntryBuilder->buildBlockAddress(Reg, BA);
3104  } else
3105    return false;
3106
3107  return true;
3108}
3109
3110bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB,
3111                                      MachineBasicBlock &MBB) {
3112  for (auto &BTB : SL->BitTestCases) {
3113    // Emit header first, if it wasn't already emitted.
3114    if (!BTB.Emitted)
3115      emitBitTestHeader(BTB, BTB.Parent);
3116
3117    BranchProbability UnhandledProb = BTB.Prob;
3118    for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3119      UnhandledProb -= BTB.Cases[j].ExtraProb;
3120      // Set the current basic block to the mbb we wish to insert the code into
3121      MachineBasicBlock *MBB = BTB.Cases[j].ThisBB;
3122      // If all cases cover a contiguous range, it is not necessary to jump to
3123      // the default block after the last bit test fails. This is because the
3124      // range check during bit test header creation has guaranteed that every
3125      // case here doesn't go outside the range. In this case, there is no need
3126      // to perform the last bit test, as it will always be true. Instead, make
3127      // the second-to-last bit-test fall through to the target of the last bit
3128      // test, and delete the last bit test.
3129
3130      MachineBasicBlock *NextMBB;
3131      if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3132        // Second-to-last bit-test with contiguous range: fall through to the
3133        // target of the final bit test.
3134        NextMBB = BTB.Cases[j + 1].TargetBB;
3135      } else if (j + 1 == ej) {
3136        // For the last bit test, fall through to Default.
3137        NextMBB = BTB.Default;
3138      } else {
3139        // Otherwise, fall through to the next bit test.
3140        NextMBB = BTB.Cases[j + 1].ThisBB;
3141      }
3142
3143      emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB);
3144
3145      if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3146        // We need to record the replacement phi edge here that normally
3147        // happens in emitBitTestCase before we delete the case, otherwise the
3148        // phi edge will be lost.
3149        addMachineCFGPred({BTB.Parent->getBasicBlock(),
3150                           BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3151                          MBB);
3152        // Since we're not going to use the final bit test, remove it.
3153        BTB.Cases.pop_back();
3154        break;
3155      }
3156    }
3157    // This is "default" BB. We have two jumps to it. From "header" BB and from
3158    // last "case" BB, unless the latter was skipped.
3159    CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3160                                   BTB.Default->getBasicBlock()};
3161    addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3162    if (!BTB.ContiguousRange) {
3163      addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3164    }
3165  }
3166  SL->BitTestCases.clear();
3167
3168  for (auto &JTCase : SL->JTCases) {
3169    // Emit header first, if it wasn't already emitted.
3170    if (!JTCase.first.Emitted)
3171      emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3172
3173    emitJumpTable(JTCase.second, JTCase.second.MBB);
3174  }
3175  SL->JTCases.clear();
3176
3177  for (auto &SwCase : SL->SwitchCases)
3178    emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3179  SL->SwitchCases.clear();
3180
3181  // Check if we need to generate stack-protector guard checks.
3182  StackProtector &SP = getAnalysis<StackProtector>();
3183  if (SP.shouldEmitSDCheck(BB)) {
3184    const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
3185    bool FunctionBasedInstrumentation =
3186        TLI.getSSPStackGuardCheck(*MF->getFunction().getParent());
3187    SPDescriptor.initialize(&BB, &MBB, FunctionBasedInstrumentation);
3188  }
3189  // Handle stack protector.
3190  if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
3191    LLVM_DEBUG(dbgs() << "Unimplemented stack protector case\n");
3192    return false;
3193  } else if (SPDescriptor.shouldEmitStackProtector()) {
3194    MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();
3195    MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();
3196
3197    // Find the split point to split the parent mbb. At the same time copy all
3198    // physical registers used in the tail of parent mbb into virtual registers
3199    // before the split point and back into physical registers after the split
3200    // point. This prevents us needing to deal with Live-ins and many other
3201    // register allocation issues caused by us splitting the parent mbb. The
3202    // register allocator will clean up said virtual copies later on.
3203    MachineBasicBlock::iterator SplitPoint = findSplitPointForStackProtector(
3204        ParentMBB, *MF->getSubtarget().getInstrInfo());
3205
3206    // Splice the terminator of ParentMBB into SuccessMBB.
3207    SuccessMBB->splice(SuccessMBB->end(), ParentMBB, SplitPoint,
3208                       ParentMBB->end());
3209
3210    // Add compare/jump on neq/jump to the parent BB.
3211    if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3212      return false;
3213
3214    // CodeGen Failure MBB if we have not codegened it yet.
3215    MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();
3216    if (FailureMBB->empty()) {
3217      if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3218        return false;
3219    }
3220
3221    // Clear the Per-BB State.
3222    SPDescriptor.resetPerBBState();
3223  }
3224  return true;
3225}
3226
3227bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
3228                                          MachineBasicBlock *ParentBB) {
3229  CurBuilder->setInsertPt(*ParentBB, ParentBB->end());
3230  // First create the loads to the guard/stack slot for the comparison.
3231  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
3232  Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
3233  const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
3234  LLT PtrMemTy = getLLTForMVT(TLI.getPointerMemTy(*DL));
3235
3236  MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
3237  int FI = MFI.getStackProtectorIndex();
3238
3239  Register Guard;
3240  Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
3241  const Module &M = *ParentBB->getParent()->getFunction().getParent();
3242  Align Align = DL->getPrefTypeAlign(Type::getInt8PtrTy(M.getContext()));
3243
3244  // Generate code to load the content of the guard slot.
3245  Register GuardVal =
3246      CurBuilder
3247          ->buildLoad(PtrMemTy, StackSlotPtr,
3248                      MachinePointerInfo::getFixedStack(*MF, FI), Align,
3249                      MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile)
3250          .getReg(0);
3251
3252  if (TLI.useStackGuardXorFP()) {
3253    LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented");
3254    return false;
3255  }
3256
3257  // Retrieve guard check function, nullptr if instrumentation is inlined.
3258  if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
3259    // This path is currently untestable on GlobalISel, since the only platform
3260    // that needs this seems to be Windows, and we fall back on that currently.
3261    // The code still lives here in case that changes.
3262    // Silence warning about unused variable until the code below that uses
3263    // 'GuardCheckFn' is enabled.
3264    (void)GuardCheckFn;
3265    return false;
3266#if 0
3267    // The target provides a guard check function to validate the guard value.
3268    // Generate a call to that function with the content of the guard slot as
3269    // argument.
3270    FunctionType *FnTy = GuardCheckFn->getFunctionType();
3271    assert(FnTy->getNumParams() == 1 && "Invalid function signature");
3272    ISD::ArgFlagsTy Flags;
3273    if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
3274      Flags.setInReg();
3275    CallLowering::ArgInfo GuardArgInfo(
3276        {GuardVal, FnTy->getParamType(0), {Flags}});
3277
3278    CallLowering::CallLoweringInfo Info;
3279    Info.OrigArgs.push_back(GuardArgInfo);
3280    Info.CallConv = GuardCheckFn->getCallingConv();
3281    Info.Callee = MachineOperand::CreateGA(GuardCheckFn, 0);
3282    Info.OrigRet = {Register(), FnTy->getReturnType()};
3283    if (!CLI->lowerCall(MIRBuilder, Info)) {
3284      LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector check\n");
3285      return false;
3286    }
3287    return true;
3288#endif
3289  }
3290
3291  // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
3292  // Otherwise, emit a volatile load to retrieve the stack guard value.
3293  if (TLI.useLoadStackGuardNode()) {
3294    Guard =
3295        MRI->createGenericVirtualRegister(LLT::scalar(PtrTy.getSizeInBits()));
3296    getStackGuard(Guard, *CurBuilder);
3297  } else {
3298    // TODO: test using android subtarget when we support @llvm.thread.pointer.
3299    const Value *IRGuard = TLI.getSDagStackGuard(M);
3300    Register GuardPtr = getOrCreateVReg(*IRGuard);
3301
3302    Guard = CurBuilder
3303                ->buildLoad(PtrMemTy, GuardPtr,
3304                            MachinePointerInfo::getFixedStack(*MF, FI), Align,
3305                            MachineMemOperand::MOLoad |
3306                                MachineMemOperand::MOVolatile)
3307                .getReg(0);
3308  }
3309
3310  // Perform the comparison.
3311  auto Cmp =
3312      CurBuilder->buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Guard, GuardVal);
3313  // If the guard/stackslot do not equal, branch to failure MBB.
3314  CurBuilder->buildBrCond(Cmp, *SPD.getFailureMBB());
3315  // Otherwise branch to success MBB.
3316  CurBuilder->buildBr(*SPD.getSuccessMBB());
3317  return true;
3318}
3319
3320bool IRTranslator::emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
3321                                           MachineBasicBlock *FailureBB) {
3322  CurBuilder->setInsertPt(*FailureBB, FailureBB->end());
3323  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
3324
3325  const RTLIB::Libcall Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL;
3326  const char *Name = TLI.getLibcallName(Libcall);
3327
3328  CallLowering::CallLoweringInfo Info;
3329  Info.CallConv = TLI.getLibcallCallingConv(Libcall);
3330  Info.Callee = MachineOperand::CreateES(Name);
3331  Info.OrigRet = {Register(), Type::getVoidTy(MF->getFunction().getContext()),
3332                  0};
3333  if (!CLI->lowerCall(*CurBuilder, Info)) {
3334    LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector fail\n");
3335    return false;
3336  }
3337
3338  // On PS4/PS5, the "return address" must still be within the calling
3339  // function, even if it's at the very end, so emit an explicit TRAP here.
3340  // WebAssembly needs an unreachable instruction after a non-returning call,
3341  // because the function return type can be different from __stack_chk_fail's
3342  // return type (void).
3343  const TargetMachine &TM = MF->getTarget();
3344  if (TM.getTargetTriple().isPS() || TM.getTargetTriple().isWasm()) {
3345    LLVM_DEBUG(dbgs() << "Unhandled trap emission for stack protector fail\n");
3346    return false;
3347  }
3348  return true;
3349}
3350
3351void IRTranslator::finalizeFunction() {
3352  // Release the memory used by the different maps we
3353  // needed during the translation.
3354  PendingPHIs.clear();
3355  VMap.reset();
3356  FrameIndices.clear();
3357  MachinePreds.clear();
3358  // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
3359  // to avoid accessing free���d memory (in runOnMachineFunction) and to avoid
3360  // destroying it twice (in ~IRTranslator() and ~LLVMContext())
3361  EntryBuilder.reset();
3362  CurBuilder.reset();
3363  FuncInfo.clear();
3364  SPDescriptor.resetPerFunctionState();
3365}
3366
3367/// Returns true if a BasicBlock \p BB within a variadic function contains a
3368/// variadic musttail call.
3369static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
3370  if (!IsVarArg)
3371    return false;
3372
3373  // Walk the block backwards, because tail calls usually only appear at the end
3374  // of a block.
3375  return llvm::any_of(llvm::reverse(BB), [](const Instruction &I) {
3376    const auto *CI = dyn_cast<CallInst>(&I);
3377    return CI && CI->isMustTailCall();
3378  });
3379}
3380
3381bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
3382  MF = &CurMF;
3383  const Function &F = MF->getFunction();
3384  GISelCSEAnalysisWrapper &Wrapper =
3385      getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
3386  // Set the CSEConfig and run the analysis.
3387  GISelCSEInfo *CSEInfo = nullptr;
3388  TPC = &getAnalysis<TargetPassConfig>();
3389  bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
3390                       ? EnableCSEInIRTranslator
3391                       : TPC->isGISelCSEEnabled();
3392
3393  if (EnableCSE) {
3394    EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3395    CSEInfo = &Wrapper.get(TPC->getCSEConfig());
3396    EntryBuilder->setCSEInfo(CSEInfo);
3397    CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3398    CurBuilder->setCSEInfo(CSEInfo);
3399  } else {
3400    EntryBuilder = std::make_unique<MachineIRBuilder>();
3401    CurBuilder = std::make_unique<MachineIRBuilder>();
3402  }
3403  CLI = MF->getSubtarget().getCallLowering();
3404  CurBuilder->setMF(*MF);
3405  EntryBuilder->setMF(*MF);
3406  MRI = &MF->getRegInfo();
3407  DL = &F.getParent()->getDataLayout();
3408  ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
3409  const TargetMachine &TM = MF->getTarget();
3410  TM.resetTargetOptions(F);
3411  EnableOpts = OptLevel != CodeGenOpt::None && !skipFunction(F);
3412  FuncInfo.MF = MF;
3413  if (EnableOpts) {
3414    AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3415    FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
3416  } else {
3417    AA = nullptr;
3418    FuncInfo.BPI = nullptr;
3419  }
3420
3421  AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
3422      MF->getFunction());
3423  LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
3424  FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
3425
3426  const auto &TLI = *MF->getSubtarget().getTargetLowering();
3427
3428  SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
3429  SL->init(TLI, TM, *DL);
3430
3431
3432
3433  assert(PendingPHIs.empty() && "stale PHIs");
3434
3435  // Targets which want to use big endian can enable it using
3436  // enableBigEndian()
3437  if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
3438    // Currently we don't properly handle big endian code.
3439    OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3440                               F.getSubprogram(), &F.getEntryBlock());
3441    R << "unable to translate in big endian mode";
3442    reportTranslationError(*MF, *TPC, *ORE, R);
3443  }
3444
3445  // Release the per-function state when we return, whether we succeeded or not.
3446  auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
3447
3448  // Setup a separate basic-block for the arguments and constants
3449  MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
3450  MF->push_back(EntryBB);
3451  EntryBuilder->setMBB(*EntryBB);
3452
3453  DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
3454  SwiftError.setFunction(CurMF);
3455  SwiftError.createEntriesInEntryBlock(DbgLoc);
3456
3457  bool IsVarArg = F.isVarArg();
3458  bool HasMustTailInVarArgFn = false;
3459
3460  // Create all blocks, in IR order, to preserve the layout.
3461  for (const BasicBlock &BB: F) {
3462    auto *&MBB = BBToMBB[&BB];
3463
3464    MBB = MF->CreateMachineBasicBlock(&BB);
3465    MF->push_back(MBB);
3466
3467    if (BB.hasAddressTaken())
3468      MBB->setAddressTakenIRBlock(const_cast<BasicBlock *>(&BB));
3469
3470    if (!HasMustTailInVarArgFn)
3471      HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
3472  }
3473
3474  MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
3475
3476  // Make our arguments/constants entry block fallthrough to the IR entry block.
3477  EntryBB->addSuccessor(&getMBB(F.front()));
3478
3479  if (CLI->fallBackToDAGISel(*MF)) {
3480    OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3481                               F.getSubprogram(), &F.getEntryBlock());
3482    R << "unable to lower function: " << ore::NV("Prototype", F.getType());
3483    reportTranslationError(*MF, *TPC, *ORE, R);
3484    return false;
3485  }
3486
3487  // Lower the actual args into this basic block.
3488  SmallVector<ArrayRef<Register>, 8> VRegArgs;
3489  for (const Argument &Arg: F.args()) {
3490    if (DL->getTypeStoreSize(Arg.getType()).isZero())
3491      continue; // Don't handle zero sized types.
3492    ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
3493    VRegArgs.push_back(VRegs);
3494
3495    if (Arg.hasSwiftErrorAttr()) {
3496      assert(VRegs.size() == 1 && "Too many vregs for Swift error");
3497      SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
3498    }
3499  }
3500
3501  if (!CLI->lowerFormalArguments(*EntryBuilder, F, VRegArgs, FuncInfo)) {
3502    OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3503                               F.getSubprogram(), &F.getEntryBlock());
3504    R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
3505    reportTranslationError(*MF, *TPC, *ORE, R);
3506    return false;
3507  }
3508
3509  // Need to visit defs before uses when translating instructions.
3510  GISelObserverWrapper WrapperObserver;
3511  if (EnableCSE && CSEInfo)
3512    WrapperObserver.addObserver(CSEInfo);
3513  {
3514    ReversePostOrderTraversal<const Function *> RPOT(&F);
3515#ifndef NDEBUG
3516    DILocationVerifier Verifier;
3517    WrapperObserver.addObserver(&Verifier);
3518#endif // ifndef NDEBUG
3519    RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
3520    RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver);
3521    for (const BasicBlock *BB : RPOT) {
3522      MachineBasicBlock &MBB = getMBB(*BB);
3523      // Set the insertion point of all the following translations to
3524      // the end of this basic block.
3525      CurBuilder->setMBB(MBB);
3526      HasTailCall = false;
3527      for (const Instruction &Inst : *BB) {
3528        // If we translated a tail call in the last step, then we know
3529        // everything after the call is either a return, or something that is
3530        // handled by the call itself. (E.g. a lifetime marker or assume
3531        // intrinsic.) In this case, we should stop translating the block and
3532        // move on.
3533        if (HasTailCall)
3534          break;
3535#ifndef NDEBUG
3536        Verifier.setCurrentInst(&Inst);
3537#endif // ifndef NDEBUG
3538        if (translate(Inst))
3539          continue;
3540
3541        OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3542                                   Inst.getDebugLoc(), BB);
3543        R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
3544
3545        if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
3546          std::string InstStrStorage;
3547          raw_string_ostream InstStr(InstStrStorage);
3548          InstStr << Inst;
3549
3550          R << ": '" << InstStr.str() << "'";
3551        }
3552
3553        reportTranslationError(*MF, *TPC, *ORE, R);
3554        return false;
3555      }
3556
3557      if (!finalizeBasicBlock(*BB, MBB)) {
3558        OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3559                                   BB->getTerminator()->getDebugLoc(), BB);
3560        R << "unable to translate basic block";
3561        reportTranslationError(*MF, *TPC, *ORE, R);
3562        return false;
3563      }
3564    }
3565#ifndef NDEBUG
3566    WrapperObserver.removeObserver(&Verifier);
3567#endif
3568  }
3569
3570  finishPendingPhis();
3571
3572  SwiftError.propagateVRegs();
3573
3574  // Merge the argument lowering and constants block with its single
3575  // successor, the LLVM-IR entry block.  We want the basic block to
3576  // be maximal.
3577  assert(EntryBB->succ_size() == 1 &&
3578         "Custom BB used for lowering should have only one successor");
3579  // Get the successor of the current entry block.
3580  MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
3581  assert(NewEntryBB.pred_size() == 1 &&
3582         "LLVM-IR entry block has a predecessor!?");
3583  // Move all the instruction from the current entry block to the
3584  // new entry block.
3585  NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
3586                    EntryBB->end());
3587
3588  // Update the live-in information for the new entry block.
3589  for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
3590    NewEntryBB.addLiveIn(LiveIn);
3591  NewEntryBB.sortUniqueLiveIns();
3592
3593  // Get rid of the now empty basic block.
3594  EntryBB->removeSuccessor(&NewEntryBB);
3595  MF->remove(EntryBB);
3596  MF->deleteMachineBasicBlock(EntryBB);
3597
3598  assert(&MF->front() == &NewEntryBB &&
3599         "New entry wasn't next in the list of basic block!");
3600
3601  // Initialize stack protector information.
3602  StackProtector &SP = getAnalysis<StackProtector>();
3603  SP.copyToMachineFrameInfo(MF->getFrameInfo());
3604
3605  return false;
3606}
3607