1//===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the IRTranslator class.
10//===----------------------------------------------------------------------===//
11
12#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13#include "llvm/ADT/PostOrderIterator.h"
14#include "llvm/ADT/STLExtras.h"
15#include "llvm/ADT/ScopeExit.h"
16#include "llvm/ADT/SmallSet.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/Analysis/BranchProbabilityInfo.h"
19#include "llvm/Analysis/OptimizationRemarkEmitter.h"
20#include "llvm/Analysis/ValueTracking.h"
21#include "llvm/CodeGen/Analysis.h"
22#include "llvm/CodeGen/FunctionLoweringInfo.h"
23#include "llvm/CodeGen/GlobalISel/CallLowering.h"
24#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
25#include "llvm/CodeGen/LowLevelType.h"
26#include "llvm/CodeGen/MachineBasicBlock.h"
27#include "llvm/CodeGen/MachineFrameInfo.h"
28#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/MachineMemOperand.h"
31#include "llvm/CodeGen/MachineOperand.h"
32#include "llvm/CodeGen/MachineRegisterInfo.h"
33#include "llvm/CodeGen/StackProtector.h"
34#include "llvm/CodeGen/TargetFrameLowering.h"
35#include "llvm/CodeGen/TargetInstrInfo.h"
36#include "llvm/CodeGen/TargetLowering.h"
37#include "llvm/CodeGen/TargetPassConfig.h"
38#include "llvm/CodeGen/TargetRegisterInfo.h"
39#include "llvm/CodeGen/TargetSubtargetInfo.h"
40#include "llvm/IR/BasicBlock.h"
41#include "llvm/IR/CFG.h"
42#include "llvm/IR/Constant.h"
43#include "llvm/IR/Constants.h"
44#include "llvm/IR/DataLayout.h"
45#include "llvm/IR/DebugInfo.h"
46#include "llvm/IR/DerivedTypes.h"
47#include "llvm/IR/Function.h"
48#include "llvm/IR/GetElementPtrTypeIterator.h"
49#include "llvm/IR/InlineAsm.h"
50#include "llvm/IR/InstrTypes.h"
51#include "llvm/IR/Instructions.h"
52#include "llvm/IR/IntrinsicInst.h"
53#include "llvm/IR/Intrinsics.h"
54#include "llvm/IR/LLVMContext.h"
55#include "llvm/IR/Metadata.h"
56#include "llvm/IR/Type.h"
57#include "llvm/IR/User.h"
58#include "llvm/IR/Value.h"
59#include "llvm/InitializePasses.h"
60#include "llvm/MC/MCContext.h"
61#include "llvm/Pass.h"
62#include "llvm/Support/Casting.h"
63#include "llvm/Support/CodeGen.h"
64#include "llvm/Support/Debug.h"
65#include "llvm/Support/ErrorHandling.h"
66#include "llvm/Support/LowLevelTypeImpl.h"
67#include "llvm/Support/MathExtras.h"
68#include "llvm/Support/raw_ostream.h"
69#include "llvm/Target/TargetIntrinsicInfo.h"
70#include "llvm/Target/TargetMachine.h"
71#include <algorithm>
72#include <cassert>
73#include <cstdint>
74#include <iterator>
75#include <string>
76#include <utility>
77#include <vector>
78
79#define DEBUG_TYPE "irtranslator"
80
81using namespace llvm;
82
83static cl::opt<bool>
84    EnableCSEInIRTranslator("enable-cse-in-irtranslator",
85                            cl::desc("Should enable CSE in irtranslator"),
86                            cl::Optional, cl::init(false));
87char IRTranslator::ID = 0;
88
89INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
90                false, false)
91INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
92INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
93INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
94                false, false)
95
96static void reportTranslationError(MachineFunction &MF,
97                                   const TargetPassConfig &TPC,
98                                   OptimizationRemarkEmitter &ORE,
99                                   OptimizationRemarkMissed &R) {
100  MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
101
102  // Print the function name explicitly if we don't have a debug location (which
103  // makes the diagnostic less useful) or if we're going to emit a raw error.
104  if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
105    R << (" (in function: " + MF.getName() + ")").str();
106
107  if (TPC.isGlobalISelAbortEnabled())
108    report_fatal_error(R.getMsg());
109  else
110    ORE.emit(R);
111}
112
113IRTranslator::IRTranslator() : MachineFunctionPass(ID) { }
114
115#ifndef NDEBUG
116namespace {
117/// Verify that every instruction created has the same DILocation as the
118/// instruction being translated.
119class DILocationVerifier : public GISelChangeObserver {
120  const Instruction *CurrInst = nullptr;
121
122public:
123  DILocationVerifier() = default;
124  ~DILocationVerifier() = default;
125
126  const Instruction *getCurrentInst() const { return CurrInst; }
127  void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
128
129  void erasingInstr(MachineInstr &MI) override {}
130  void changingInstr(MachineInstr &MI) override {}
131  void changedInstr(MachineInstr &MI) override {}
132
133  void createdInstr(MachineInstr &MI) override {
134    assert(getCurrentInst() && "Inserted instruction without a current MI");
135
136    // Only print the check message if we're actually checking it.
137#ifndef NDEBUG
138    LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
139                      << " was copied to " << MI);
140#endif
141    // We allow insts in the entry block to have a debug loc line of 0 because
142    // they could have originated from constants, and we don't want a jumpy
143    // debug experience.
144    assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
145            MI.getDebugLoc().getLine() == 0) &&
146           "Line info was not transferred to all instructions");
147  }
148};
149} // namespace
150#endif // ifndef NDEBUG
151
152
153void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
154  AU.addRequired<StackProtector>();
155  AU.addRequired<TargetPassConfig>();
156  AU.addRequired<GISelCSEAnalysisWrapperPass>();
157  getSelectionDAGFallbackAnalysisUsage(AU);
158  MachineFunctionPass::getAnalysisUsage(AU);
159}
160
161IRTranslator::ValueToVRegInfo::VRegListT &
162IRTranslator::allocateVRegs(const Value &Val) {
163  assert(!VMap.contains(Val) && "Value already allocated in VMap");
164  auto *Regs = VMap.getVRegs(Val);
165  auto *Offsets = VMap.getOffsets(Val);
166  SmallVector<LLT, 4> SplitTys;
167  computeValueLLTs(*DL, *Val.getType(), SplitTys,
168                   Offsets->empty() ? Offsets : nullptr);
169  for (unsigned i = 0; i < SplitTys.size(); ++i)
170    Regs->push_back(0);
171  return *Regs;
172}
173
174ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
175  auto VRegsIt = VMap.findVRegs(Val);
176  if (VRegsIt != VMap.vregs_end())
177    return *VRegsIt->second;
178
179  if (Val.getType()->isVoidTy())
180    return *VMap.getVRegs(Val);
181
182  // Create entry for this type.
183  auto *VRegs = VMap.getVRegs(Val);
184  auto *Offsets = VMap.getOffsets(Val);
185
186  assert(Val.getType()->isSized() &&
187         "Don't know how to create an empty vreg");
188
189  SmallVector<LLT, 4> SplitTys;
190  computeValueLLTs(*DL, *Val.getType(), SplitTys,
191                   Offsets->empty() ? Offsets : nullptr);
192
193  if (!isa<Constant>(Val)) {
194    for (auto Ty : SplitTys)
195      VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
196    return *VRegs;
197  }
198
199  if (Val.getType()->isAggregateType()) {
200    // UndefValue, ConstantAggregateZero
201    auto &C = cast<Constant>(Val);
202    unsigned Idx = 0;
203    while (auto Elt = C.getAggregateElement(Idx++)) {
204      auto EltRegs = getOrCreateVRegs(*Elt);
205      llvm::copy(EltRegs, std::back_inserter(*VRegs));
206    }
207  } else {
208    assert(SplitTys.size() == 1 && "unexpectedly split LLT");
209    VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
210    bool Success = translate(cast<Constant>(Val), VRegs->front());
211    if (!Success) {
212      OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
213                                 MF->getFunction().getSubprogram(),
214                                 &MF->getFunction().getEntryBlock());
215      R << "unable to translate constant: " << ore::NV("Type", Val.getType());
216      reportTranslationError(*MF, *TPC, *ORE, R);
217      return *VRegs;
218    }
219  }
220
221  return *VRegs;
222}
223
224int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
225  if (FrameIndices.find(&AI) != FrameIndices.end())
226    return FrameIndices[&AI];
227
228  uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
229  uint64_t Size =
230      ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
231
232  // Always allocate at least one byte.
233  Size = std::max<uint64_t>(Size, 1u);
234
235  unsigned Alignment = AI.getAlignment();
236  if (!Alignment)
237    Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
238
239  int &FI = FrameIndices[&AI];
240  FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
241  return FI;
242}
243
244unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
245  unsigned Alignment = 0;
246  Type *ValTy = nullptr;
247  if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
248    Alignment = SI->getAlignment();
249    ValTy = SI->getValueOperand()->getType();
250  } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
251    Alignment = LI->getAlignment();
252    ValTy = LI->getType();
253  } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
254    // TODO(PR27168): This instruction has no alignment attribute, but unlike
255    // the default alignment for load/store, the default here is to assume
256    // it has NATURAL alignment, not DataLayout-specified alignment.
257    const DataLayout &DL = AI->getModule()->getDataLayout();
258    Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
259    ValTy = AI->getCompareOperand()->getType();
260  } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
261    // TODO(PR27168): This instruction has no alignment attribute, but unlike
262    // the default alignment for load/store, the default here is to assume
263    // it has NATURAL alignment, not DataLayout-specified alignment.
264    const DataLayout &DL = AI->getModule()->getDataLayout();
265    Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
266    ValTy = AI->getType();
267  } else {
268    OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
269    R << "unable to translate memop: " << ore::NV("Opcode", &I);
270    reportTranslationError(*MF, *TPC, *ORE, R);
271    return 1;
272  }
273
274  return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
275}
276
277MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
278  MachineBasicBlock *&MBB = BBToMBB[&BB];
279  assert(MBB && "BasicBlock was not encountered before");
280  return *MBB;
281}
282
283void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
284  assert(NewPred && "new predecessor must be a real MachineBasicBlock");
285  MachinePreds[Edge].push_back(NewPred);
286}
287
288bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
289                                     MachineIRBuilder &MIRBuilder) {
290  // Get or create a virtual register for each value.
291  // Unless the value is a Constant => loadimm cst?
292  // or inline constant each time?
293  // Creation of a virtual register needs to have a size.
294  Register Op0 = getOrCreateVReg(*U.getOperand(0));
295  Register Op1 = getOrCreateVReg(*U.getOperand(1));
296  Register Res = getOrCreateVReg(U);
297  uint16_t Flags = 0;
298  if (isa<Instruction>(U)) {
299    const Instruction &I = cast<Instruction>(U);
300    Flags = MachineInstr::copyFlagsFromInstruction(I);
301  }
302
303  MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
304  return true;
305}
306
307bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
308  // -0.0 - X --> G_FNEG
309  if (isa<Constant>(U.getOperand(0)) &&
310      U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
311    Register Op1 = getOrCreateVReg(*U.getOperand(1));
312    Register Res = getOrCreateVReg(U);
313    uint16_t Flags = 0;
314    if (isa<Instruction>(U)) {
315      const Instruction &I = cast<Instruction>(U);
316      Flags = MachineInstr::copyFlagsFromInstruction(I);
317    }
318    // Negate the last operand of the FSUB
319    MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op1}, Flags);
320    return true;
321  }
322  return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
323}
324
325bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
326  Register Op0 = getOrCreateVReg(*U.getOperand(0));
327  Register Res = getOrCreateVReg(U);
328  uint16_t Flags = 0;
329  if (isa<Instruction>(U)) {
330    const Instruction &I = cast<Instruction>(U);
331    Flags = MachineInstr::copyFlagsFromInstruction(I);
332  }
333  MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op0}, Flags);
334  return true;
335}
336
337bool IRTranslator::translateCompare(const User &U,
338                                    MachineIRBuilder &MIRBuilder) {
339  auto *CI = dyn_cast<CmpInst>(&U);
340  Register Op0 = getOrCreateVReg(*U.getOperand(0));
341  Register Op1 = getOrCreateVReg(*U.getOperand(1));
342  Register Res = getOrCreateVReg(U);
343  CmpInst::Predicate Pred =
344      CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
345                                    cast<ConstantExpr>(U).getPredicate());
346  if (CmpInst::isIntPredicate(Pred))
347    MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
348  else if (Pred == CmpInst::FCMP_FALSE)
349    MIRBuilder.buildCopy(
350        Res, getOrCreateVReg(*Constant::getNullValue(U.getType())));
351  else if (Pred == CmpInst::FCMP_TRUE)
352    MIRBuilder.buildCopy(
353        Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
354  else {
355    assert(CI && "Instruction should be CmpInst");
356    MIRBuilder.buildInstr(TargetOpcode::G_FCMP, {Res}, {Pred, Op0, Op1},
357                          MachineInstr::copyFlagsFromInstruction(*CI));
358  }
359
360  return true;
361}
362
363bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
364  const ReturnInst &RI = cast<ReturnInst>(U);
365  const Value *Ret = RI.getReturnValue();
366  if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
367    Ret = nullptr;
368
369  ArrayRef<Register> VRegs;
370  if (Ret)
371    VRegs = getOrCreateVRegs(*Ret);
372
373  Register SwiftErrorVReg = 0;
374  if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
375    SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
376        &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
377  }
378
379  // The target may mess up with the insertion point, but
380  // this is not important as a return is the last instruction
381  // of the block anyway.
382  return CLI->lowerReturn(MIRBuilder, Ret, VRegs, SwiftErrorVReg);
383}
384
385bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
386  const BranchInst &BrInst = cast<BranchInst>(U);
387  unsigned Succ = 0;
388  if (!BrInst.isUnconditional()) {
389    // We want a G_BRCOND to the true BB followed by an unconditional branch.
390    Register Tst = getOrCreateVReg(*BrInst.getCondition());
391    const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
392    MachineBasicBlock &TrueBB = getMBB(TrueTgt);
393    MIRBuilder.buildBrCond(Tst, TrueBB);
394  }
395
396  const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
397  MachineBasicBlock &TgtBB = getMBB(BrTgt);
398  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
399
400  // If the unconditional target is the layout successor, fallthrough.
401  if (!CurBB.isLayoutSuccessor(&TgtBB))
402    MIRBuilder.buildBr(TgtBB);
403
404  // Link successors.
405  for (const BasicBlock *Succ : successors(&BrInst))
406    CurBB.addSuccessor(&getMBB(*Succ));
407  return true;
408}
409
410void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
411                                        MachineBasicBlock *Dst,
412                                        BranchProbability Prob) {
413  if (!FuncInfo.BPI) {
414    Src->addSuccessorWithoutProb(Dst);
415    return;
416  }
417  if (Prob.isUnknown())
418    Prob = getEdgeProbability(Src, Dst);
419  Src->addSuccessor(Dst, Prob);
420}
421
422BranchProbability
423IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
424                                 const MachineBasicBlock *Dst) const {
425  const BasicBlock *SrcBB = Src->getBasicBlock();
426  const BasicBlock *DstBB = Dst->getBasicBlock();
427  if (!FuncInfo.BPI) {
428    // If BPI is not available, set the default probability as 1 / N, where N is
429    // the number of successors.
430    auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
431    return BranchProbability(1, SuccSize);
432  }
433  return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
434}
435
436bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
437  using namespace SwitchCG;
438  // Extract cases from the switch.
439  const SwitchInst &SI = cast<SwitchInst>(U);
440  BranchProbabilityInfo *BPI = FuncInfo.BPI;
441  CaseClusterVector Clusters;
442  Clusters.reserve(SI.getNumCases());
443  for (auto &I : SI.cases()) {
444    MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
445    assert(Succ && "Could not find successor mbb in mapping");
446    const ConstantInt *CaseVal = I.getCaseValue();
447    BranchProbability Prob =
448        BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
449            : BranchProbability(1, SI.getNumCases() + 1);
450    Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
451  }
452
453  MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
454
455  // Cluster adjacent cases with the same destination. We do this at all
456  // optimization levels because it's cheap to do and will make codegen faster
457  // if there are many clusters.
458  sortAndRangeify(Clusters);
459
460  MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
461
462  // If there is only the default destination, jump there directly.
463  if (Clusters.empty()) {
464    SwitchMBB->addSuccessor(DefaultMBB);
465    if (DefaultMBB != SwitchMBB->getNextNode())
466      MIB.buildBr(*DefaultMBB);
467    return true;
468  }
469
470  SL->findJumpTables(Clusters, &SI, DefaultMBB, nullptr, nullptr);
471
472  LLVM_DEBUG({
473    dbgs() << "Case clusters: ";
474    for (const CaseCluster &C : Clusters) {
475      if (C.Kind == CC_JumpTable)
476        dbgs() << "JT:";
477      if (C.Kind == CC_BitTests)
478        dbgs() << "BT:";
479
480      C.Low->getValue().print(dbgs(), true);
481      if (C.Low != C.High) {
482        dbgs() << '-';
483        C.High->getValue().print(dbgs(), true);
484      }
485      dbgs() << ' ';
486    }
487    dbgs() << '\n';
488  });
489
490  assert(!Clusters.empty());
491  SwitchWorkList WorkList;
492  CaseClusterIt First = Clusters.begin();
493  CaseClusterIt Last = Clusters.end() - 1;
494  auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
495  WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
496
497  // FIXME: At the moment we don't do any splitting optimizations here like
498  // SelectionDAG does, so this worklist only has one entry.
499  while (!WorkList.empty()) {
500    SwitchWorkListItem W = WorkList.back();
501    WorkList.pop_back();
502    if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
503      return false;
504  }
505  return true;
506}
507
508void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
509                                 MachineBasicBlock *MBB) {
510  // Emit the code for the jump table
511  assert(JT.Reg != -1U && "Should lower JT Header first!");
512  MachineIRBuilder MIB(*MBB->getParent());
513  MIB.setMBB(*MBB);
514  MIB.setDebugLoc(CurBuilder->getDebugLoc());
515
516  Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
517  const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
518
519  auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
520  MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
521}
522
523bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
524                                       SwitchCG::JumpTableHeader &JTH,
525                                       MachineBasicBlock *HeaderBB) {
526  MachineIRBuilder MIB(*HeaderBB->getParent());
527  MIB.setMBB(*HeaderBB);
528  MIB.setDebugLoc(CurBuilder->getDebugLoc());
529
530  const Value &SValue = *JTH.SValue;
531  // Subtract the lowest switch case value from the value being switched on.
532  const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
533  Register SwitchOpReg = getOrCreateVReg(SValue);
534  auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
535  auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
536
537  // This value may be smaller or larger than the target's pointer type, and
538  // therefore require extension or truncating.
539  Type *PtrIRTy = SValue.getType()->getPointerTo();
540  const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
541  Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
542
543  JT.Reg = Sub.getReg(0);
544
545  if (JTH.OmitRangeCheck) {
546    if (JT.MBB != HeaderBB->getNextNode())
547      MIB.buildBr(*JT.MBB);
548    return true;
549  }
550
551  // Emit the range check for the jump table, and branch to the default block
552  // for the switch statement if the value being switched on exceeds the
553  // largest case in the switch.
554  auto Cst = getOrCreateVReg(
555      *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
556  Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
557  auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
558
559  auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
560
561  // Avoid emitting unnecessary branches to the next block.
562  if (JT.MBB != HeaderBB->getNextNode())
563    BrCond = MIB.buildBr(*JT.MBB);
564  return true;
565}
566
567void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
568                                  MachineBasicBlock *SwitchBB,
569                                  MachineIRBuilder &MIB) {
570  Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
571  Register Cond;
572  DebugLoc OldDbgLoc = MIB.getDebugLoc();
573  MIB.setDebugLoc(CB.DbgLoc);
574  MIB.setMBB(*CB.ThisBB);
575
576  if (CB.PredInfo.NoCmp) {
577    // Branch or fall through to TrueBB.
578    addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
579    addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
580                      CB.ThisBB);
581    CB.ThisBB->normalizeSuccProbs();
582    if (CB.TrueBB != CB.ThisBB->getNextNode())
583      MIB.buildBr(*CB.TrueBB);
584    MIB.setDebugLoc(OldDbgLoc);
585    return;
586  }
587
588  const LLT i1Ty = LLT::scalar(1);
589  // Build the compare.
590  if (!CB.CmpMHS) {
591    Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
592    Cond = MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
593  } else {
594    assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&
595           "Can only handle SLE ranges");
596
597    const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
598    const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
599
600    Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
601    if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
602      Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
603      Cond =
604          MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
605    } else {
606      const LLT &CmpTy = MRI->getType(CmpOpReg);
607      auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
608      auto Diff = MIB.buildConstant(CmpTy, High - Low);
609      Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
610    }
611  }
612
613  // Update successor info
614  addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
615
616  addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
617                    CB.ThisBB);
618
619  // TrueBB and FalseBB are always different unless the incoming IR is
620  // degenerate. This only happens when running llc on weird IR.
621  if (CB.TrueBB != CB.FalseBB)
622    addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
623  CB.ThisBB->normalizeSuccProbs();
624
625  //  if (SwitchBB->getBasicBlock() != CB.FalseBB->getBasicBlock())
626    addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
627                      CB.ThisBB);
628
629  // If the lhs block is the next block, invert the condition so that we can
630  // fall through to the lhs instead of the rhs block.
631  if (CB.TrueBB == CB.ThisBB->getNextNode()) {
632    std::swap(CB.TrueBB, CB.FalseBB);
633    auto True = MIB.buildConstant(i1Ty, 1);
634    Cond = MIB.buildInstr(TargetOpcode::G_XOR, {i1Ty}, {Cond, True}, None)
635               .getReg(0);
636  }
637
638  MIB.buildBrCond(Cond, *CB.TrueBB);
639  MIB.buildBr(*CB.FalseBB);
640  MIB.setDebugLoc(OldDbgLoc);
641}
642
643bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
644                                          MachineBasicBlock *SwitchMBB,
645                                          MachineBasicBlock *CurMBB,
646                                          MachineBasicBlock *DefaultMBB,
647                                          MachineIRBuilder &MIB,
648                                          MachineFunction::iterator BBI,
649                                          BranchProbability UnhandledProbs,
650                                          SwitchCG::CaseClusterIt I,
651                                          MachineBasicBlock *Fallthrough,
652                                          bool FallthroughUnreachable) {
653  using namespace SwitchCG;
654  MachineFunction *CurMF = SwitchMBB->getParent();
655  // FIXME: Optimize away range check based on pivot comparisons.
656  JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
657  SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
658  BranchProbability DefaultProb = W.DefaultProb;
659
660  // The jump block hasn't been inserted yet; insert it here.
661  MachineBasicBlock *JumpMBB = JT->MBB;
662  CurMF->insert(BBI, JumpMBB);
663
664  // Since the jump table block is separate from the switch block, we need
665  // to keep track of it as a machine predecessor to the default block,
666  // otherwise we lose the phi edges.
667  addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
668                    CurMBB);
669  addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
670                    JumpMBB);
671
672  auto JumpProb = I->Prob;
673  auto FallthroughProb = UnhandledProbs;
674
675  // If the default statement is a target of the jump table, we evenly
676  // distribute the default probability to successors of CurMBB. Also
677  // update the probability on the edge from JumpMBB to Fallthrough.
678  for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
679                                        SE = JumpMBB->succ_end();
680       SI != SE; ++SI) {
681    if (*SI == DefaultMBB) {
682      JumpProb += DefaultProb / 2;
683      FallthroughProb -= DefaultProb / 2;
684      JumpMBB->setSuccProbability(SI, DefaultProb / 2);
685      JumpMBB->normalizeSuccProbs();
686    } else {
687      // Also record edges from the jump table block to it's successors.
688      addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
689                        JumpMBB);
690    }
691  }
692
693  // Skip the range check if the fallthrough block is unreachable.
694  if (FallthroughUnreachable)
695    JTH->OmitRangeCheck = true;
696
697  if (!JTH->OmitRangeCheck)
698    addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
699  addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
700  CurMBB->normalizeSuccProbs();
701
702  // The jump table header will be inserted in our current block, do the
703  // range check, and fall through to our fallthrough block.
704  JTH->HeaderBB = CurMBB;
705  JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
706
707  // If we're in the right place, emit the jump table header right now.
708  if (CurMBB == SwitchMBB) {
709    if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
710      return false;
711    JTH->Emitted = true;
712  }
713  return true;
714}
715bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
716                                            Value *Cond,
717                                            MachineBasicBlock *Fallthrough,
718                                            bool FallthroughUnreachable,
719                                            BranchProbability UnhandledProbs,
720                                            MachineBasicBlock *CurMBB,
721                                            MachineIRBuilder &MIB,
722                                            MachineBasicBlock *SwitchMBB) {
723  using namespace SwitchCG;
724  const Value *RHS, *LHS, *MHS;
725  CmpInst::Predicate Pred;
726  if (I->Low == I->High) {
727    // Check Cond == I->Low.
728    Pred = CmpInst::ICMP_EQ;
729    LHS = Cond;
730    RHS = I->Low;
731    MHS = nullptr;
732  } else {
733    // Check I->Low <= Cond <= I->High.
734    Pred = CmpInst::ICMP_SLE;
735    LHS = I->Low;
736    MHS = Cond;
737    RHS = I->High;
738  }
739
740  // If Fallthrough is unreachable, fold away the comparison.
741  // The false probability is the sum of all unhandled cases.
742  CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
743               CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
744
745  emitSwitchCase(CB, SwitchMBB, MIB);
746  return true;
747}
748
749bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
750                                       Value *Cond,
751                                       MachineBasicBlock *SwitchMBB,
752                                       MachineBasicBlock *DefaultMBB,
753                                       MachineIRBuilder &MIB) {
754  using namespace SwitchCG;
755  MachineFunction *CurMF = FuncInfo.MF;
756  MachineBasicBlock *NextMBB = nullptr;
757  MachineFunction::iterator BBI(W.MBB);
758  if (++BBI != FuncInfo.MF->end())
759    NextMBB = &*BBI;
760
761  if (EnableOpts) {
762    // Here, we order cases by probability so the most likely case will be
763    // checked first. However, two clusters can have the same probability in
764    // which case their relative ordering is non-deterministic. So we use Low
765    // as a tie-breaker as clusters are guaranteed to never overlap.
766    llvm::sort(W.FirstCluster, W.LastCluster + 1,
767               [](const CaseCluster &a, const CaseCluster &b) {
768                 return a.Prob != b.Prob
769                            ? a.Prob > b.Prob
770                            : a.Low->getValue().slt(b.Low->getValue());
771               });
772
773    // Rearrange the case blocks so that the last one falls through if possible
774    // without changing the order of probabilities.
775    for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
776      --I;
777      if (I->Prob > W.LastCluster->Prob)
778        break;
779      if (I->Kind == CC_Range && I->MBB == NextMBB) {
780        std::swap(*I, *W.LastCluster);
781        break;
782      }
783    }
784  }
785
786  // Compute total probability.
787  BranchProbability DefaultProb = W.DefaultProb;
788  BranchProbability UnhandledProbs = DefaultProb;
789  for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
790    UnhandledProbs += I->Prob;
791
792  MachineBasicBlock *CurMBB = W.MBB;
793  for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
794    bool FallthroughUnreachable = false;
795    MachineBasicBlock *Fallthrough;
796    if (I == W.LastCluster) {
797      // For the last cluster, fall through to the default destination.
798      Fallthrough = DefaultMBB;
799      FallthroughUnreachable = isa<UnreachableInst>(
800          DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
801    } else {
802      Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
803      CurMF->insert(BBI, Fallthrough);
804    }
805    UnhandledProbs -= I->Prob;
806
807    switch (I->Kind) {
808    case CC_BitTests: {
809      LLVM_DEBUG(dbgs() << "Switch to bit test optimization unimplemented");
810      return false; // Bit tests currently unimplemented.
811    }
812    case CC_JumpTable: {
813      if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
814                                  UnhandledProbs, I, Fallthrough,
815                                  FallthroughUnreachable)) {
816        LLVM_DEBUG(dbgs() << "Failed to lower jump table");
817        return false;
818      }
819      break;
820    }
821    case CC_Range: {
822      if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
823                                    FallthroughUnreachable, UnhandledProbs,
824                                    CurMBB, MIB, SwitchMBB)) {
825        LLVM_DEBUG(dbgs() << "Failed to lower switch range");
826        return false;
827      }
828      break;
829    }
830    }
831    CurMBB = Fallthrough;
832  }
833
834  return true;
835}
836
837bool IRTranslator::translateIndirectBr(const User &U,
838                                       MachineIRBuilder &MIRBuilder) {
839  const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
840
841  const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
842  MIRBuilder.buildBrIndirect(Tgt);
843
844  // Link successors.
845  MachineBasicBlock &CurBB = MIRBuilder.getMBB();
846  for (const BasicBlock *Succ : successors(&BrInst))
847    CurBB.addSuccessor(&getMBB(*Succ));
848
849  return true;
850}
851
852static bool isSwiftError(const Value *V) {
853  if (auto Arg = dyn_cast<Argument>(V))
854    return Arg->hasSwiftErrorAttr();
855  if (auto AI = dyn_cast<AllocaInst>(V))
856    return AI->isSwiftError();
857  return false;
858}
859
860bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
861  const LoadInst &LI = cast<LoadInst>(U);
862
863  auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
864                               : MachineMemOperand::MONone;
865  Flags |= MachineMemOperand::MOLoad;
866
867  if (DL->getTypeStoreSize(LI.getType()) == 0)
868    return true;
869
870  ArrayRef<Register> Regs = getOrCreateVRegs(LI);
871  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
872  Register Base = getOrCreateVReg(*LI.getPointerOperand());
873
874  Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType());
875  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
876
877  if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) {
878    assert(Regs.size() == 1 && "swifterror should be single pointer");
879    Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(),
880                                                    LI.getPointerOperand());
881    MIRBuilder.buildCopy(Regs[0], VReg);
882    return true;
883  }
884
885  const MDNode *Ranges =
886      Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
887  for (unsigned i = 0; i < Regs.size(); ++i) {
888    Register Addr;
889    MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
890
891    MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
892    unsigned BaseAlign = getMemOpAlignment(LI);
893    AAMDNodes AAMetadata;
894    LI.getAAMetadata(AAMetadata);
895    auto MMO = MF->getMachineMemOperand(
896        Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
897        MinAlign(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges,
898        LI.getSyncScopeID(), LI.getOrdering());
899    MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
900  }
901
902  return true;
903}
904
905bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
906  const StoreInst &SI = cast<StoreInst>(U);
907  auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
908                               : MachineMemOperand::MONone;
909  Flags |= MachineMemOperand::MOStore;
910
911  if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
912    return true;
913
914  ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
915  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
916  Register Base = getOrCreateVReg(*SI.getPointerOperand());
917
918  Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
919  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
920
921  if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
922    assert(Vals.size() == 1 && "swifterror should be single pointer");
923
924    Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
925                                                    SI.getPointerOperand());
926    MIRBuilder.buildCopy(VReg, Vals[0]);
927    return true;
928  }
929
930  for (unsigned i = 0; i < Vals.size(); ++i) {
931    Register Addr;
932    MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
933
934    MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
935    unsigned BaseAlign = getMemOpAlignment(SI);
936    AAMDNodes AAMetadata;
937    SI.getAAMetadata(AAMetadata);
938    auto MMO = MF->getMachineMemOperand(
939        Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
940        MinAlign(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr,
941        SI.getSyncScopeID(), SI.getOrdering());
942    MIRBuilder.buildStore(Vals[i], Addr, *MMO);
943  }
944  return true;
945}
946
947static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
948  const Value *Src = U.getOperand(0);
949  Type *Int32Ty = Type::getInt32Ty(U.getContext());
950
951  // getIndexedOffsetInType is designed for GEPs, so the first index is the
952  // usual array element rather than looking into the actual aggregate.
953  SmallVector<Value *, 1> Indices;
954  Indices.push_back(ConstantInt::get(Int32Ty, 0));
955
956  if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
957    for (auto Idx : EVI->indices())
958      Indices.push_back(ConstantInt::get(Int32Ty, Idx));
959  } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
960    for (auto Idx : IVI->indices())
961      Indices.push_back(ConstantInt::get(Int32Ty, Idx));
962  } else {
963    for (unsigned i = 1; i < U.getNumOperands(); ++i)
964      Indices.push_back(U.getOperand(i));
965  }
966
967  return 8 * static_cast<uint64_t>(
968                 DL.getIndexedOffsetInType(Src->getType(), Indices));
969}
970
971bool IRTranslator::translateExtractValue(const User &U,
972                                         MachineIRBuilder &MIRBuilder) {
973  const Value *Src = U.getOperand(0);
974  uint64_t Offset = getOffsetFromIndices(U, *DL);
975  ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
976  ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
977  unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
978  auto &DstRegs = allocateVRegs(U);
979
980  for (unsigned i = 0; i < DstRegs.size(); ++i)
981    DstRegs[i] = SrcRegs[Idx++];
982
983  return true;
984}
985
986bool IRTranslator::translateInsertValue(const User &U,
987                                        MachineIRBuilder &MIRBuilder) {
988  const Value *Src = U.getOperand(0);
989  uint64_t Offset = getOffsetFromIndices(U, *DL);
990  auto &DstRegs = allocateVRegs(U);
991  ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
992  ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
993  ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
994  auto InsertedIt = InsertedRegs.begin();
995
996  for (unsigned i = 0; i < DstRegs.size(); ++i) {
997    if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
998      DstRegs[i] = *InsertedIt++;
999    else
1000      DstRegs[i] = SrcRegs[i];
1001  }
1002
1003  return true;
1004}
1005
1006bool IRTranslator::translateSelect(const User &U,
1007                                   MachineIRBuilder &MIRBuilder) {
1008  Register Tst = getOrCreateVReg(*U.getOperand(0));
1009  ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1010  ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1011  ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1012
1013  const SelectInst &SI = cast<SelectInst>(U);
1014  uint16_t Flags = 0;
1015  if (const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition()))
1016    Flags = MachineInstr::copyFlagsFromInstruction(*Cmp);
1017
1018  for (unsigned i = 0; i < ResRegs.size(); ++i) {
1019    MIRBuilder.buildInstr(TargetOpcode::G_SELECT, {ResRegs[i]},
1020                          {Tst, Op0Regs[i], Op1Regs[i]}, Flags);
1021  }
1022
1023  return true;
1024}
1025
1026bool IRTranslator::translateBitCast(const User &U,
1027                                    MachineIRBuilder &MIRBuilder) {
1028  // If we're bitcasting to the source type, we can reuse the source vreg.
1029  if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1030      getLLTForType(*U.getType(), *DL)) {
1031    Register SrcReg = getOrCreateVReg(*U.getOperand(0));
1032    auto &Regs = *VMap.getVRegs(U);
1033    // If we already assigned a vreg for this bitcast, we can't change that.
1034    // Emit a copy to satisfy the users we already emitted.
1035    if (!Regs.empty())
1036      MIRBuilder.buildCopy(Regs[0], SrcReg);
1037    else {
1038      Regs.push_back(SrcReg);
1039      VMap.getOffsets(U)->push_back(0);
1040    }
1041    return true;
1042  }
1043  return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1044}
1045
1046bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1047                                 MachineIRBuilder &MIRBuilder) {
1048  Register Op = getOrCreateVReg(*U.getOperand(0));
1049  Register Res = getOrCreateVReg(U);
1050  MIRBuilder.buildInstr(Opcode, {Res}, {Op});
1051  return true;
1052}
1053
1054bool IRTranslator::translateGetElementPtr(const User &U,
1055                                          MachineIRBuilder &MIRBuilder) {
1056  // FIXME: support vector GEPs.
1057  if (U.getType()->isVectorTy())
1058    return false;
1059
1060  Value &Op0 = *U.getOperand(0);
1061  Register BaseReg = getOrCreateVReg(Op0);
1062  Type *PtrIRTy = Op0.getType();
1063  LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1064  Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1065  LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1066
1067  int64_t Offset = 0;
1068  for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1069       GTI != E; ++GTI) {
1070    const Value *Idx = GTI.getOperand();
1071    if (StructType *StTy = GTI.getStructTypeOrNull()) {
1072      unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1073      Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
1074      continue;
1075    } else {
1076      uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
1077
1078      // If this is a scalar constant or a splat vector of constants,
1079      // handle it quickly.
1080      if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1081        Offset += ElementSize * CI->getSExtValue();
1082        continue;
1083      }
1084
1085      if (Offset != 0) {
1086        LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1087        auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1088        BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1089                      .getReg(0);
1090        Offset = 0;
1091      }
1092
1093      Register IdxReg = getOrCreateVReg(*Idx);
1094      if (MRI->getType(IdxReg) != OffsetTy)
1095        IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1096
1097      // N = N + Idx * ElementSize;
1098      // Avoid doing it for ElementSize of 1.
1099      Register GepOffsetReg;
1100      if (ElementSize != 1) {
1101        auto ElementSizeMIB = MIRBuilder.buildConstant(
1102            getLLTForType(*OffsetIRTy, *DL), ElementSize);
1103        GepOffsetReg =
1104            MIRBuilder.buildMul(OffsetTy, ElementSizeMIB, IdxReg).getReg(0);
1105      } else
1106        GepOffsetReg = IdxReg;
1107
1108      BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);
1109    }
1110  }
1111
1112  if (Offset != 0) {
1113    auto OffsetMIB =
1114        MIRBuilder.buildConstant(getLLTForType(*OffsetIRTy, *DL), Offset);
1115    MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
1116    return true;
1117  }
1118
1119  MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1120  return true;
1121}
1122
1123bool IRTranslator::translateMemFunc(const CallInst &CI,
1124                                    MachineIRBuilder &MIRBuilder,
1125                                    Intrinsic::ID ID) {
1126
1127  // If the source is undef, then just emit a nop.
1128  if (isa<UndefValue>(CI.getArgOperand(1)))
1129    return true;
1130
1131  ArrayRef<Register> Res;
1132  auto ICall = MIRBuilder.buildIntrinsic(ID, Res, true);
1133  for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI)
1134    ICall.addUse(getOrCreateVReg(**AI));
1135
1136  unsigned DstAlign = 0, SrcAlign = 0;
1137  unsigned IsVol =
1138      cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1))
1139          ->getZExtValue();
1140
1141  if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1142    DstAlign = std::max<unsigned>(MCI->getDestAlignment(), 1);
1143    SrcAlign = std::max<unsigned>(MCI->getSourceAlignment(), 1);
1144  } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1145    DstAlign = std::max<unsigned>(MMI->getDestAlignment(), 1);
1146    SrcAlign = std::max<unsigned>(MMI->getSourceAlignment(), 1);
1147  } else {
1148    auto *MSI = cast<MemSetInst>(&CI);
1149    DstAlign = std::max<unsigned>(MSI->getDestAlignment(), 1);
1150  }
1151
1152  // We need to propagate the tail call flag from the IR inst as an argument.
1153  // Otherwise, we have to pessimize and assume later that we cannot tail call
1154  // any memory intrinsics.
1155  ICall.addImm(CI.isTailCall() ? 1 : 0);
1156
1157  // Create mem operands to store the alignment and volatile info.
1158  auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
1159  ICall.addMemOperand(MF->getMachineMemOperand(
1160      MachinePointerInfo(CI.getArgOperand(0)),
1161      MachineMemOperand::MOStore | VolFlag, 1, DstAlign));
1162  if (ID != Intrinsic::memset)
1163    ICall.addMemOperand(MF->getMachineMemOperand(
1164        MachinePointerInfo(CI.getArgOperand(1)),
1165        MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign));
1166
1167  return true;
1168}
1169
1170void IRTranslator::getStackGuard(Register DstReg,
1171                                 MachineIRBuilder &MIRBuilder) {
1172  const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1173  MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1174  auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
1175  MIB.addDef(DstReg);
1176
1177  auto &TLI = *MF->getSubtarget().getTargetLowering();
1178  Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
1179  if (!Global)
1180    return;
1181
1182  MachinePointerInfo MPInfo(Global);
1183  auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1184               MachineMemOperand::MODereferenceable;
1185  MachineMemOperand *MemRef =
1186      MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
1187                               DL->getPointerABIAlignment(0).value());
1188  MIB.setMemRefs({MemRef});
1189}
1190
1191bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1192                                              MachineIRBuilder &MIRBuilder) {
1193  ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1194  MIRBuilder.buildInstr(Op)
1195      .addDef(ResRegs[0])
1196      .addDef(ResRegs[1])
1197      .addUse(getOrCreateVReg(*CI.getOperand(0)))
1198      .addUse(getOrCreateVReg(*CI.getOperand(1)));
1199
1200  return true;
1201}
1202
1203unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1204  switch (ID) {
1205    default:
1206      break;
1207    case Intrinsic::bswap:
1208      return TargetOpcode::G_BSWAP;
1209  case Intrinsic::bitreverse:
1210      return TargetOpcode::G_BITREVERSE;
1211    case Intrinsic::ceil:
1212      return TargetOpcode::G_FCEIL;
1213    case Intrinsic::cos:
1214      return TargetOpcode::G_FCOS;
1215    case Intrinsic::ctpop:
1216      return TargetOpcode::G_CTPOP;
1217    case Intrinsic::exp:
1218      return TargetOpcode::G_FEXP;
1219    case Intrinsic::exp2:
1220      return TargetOpcode::G_FEXP2;
1221    case Intrinsic::fabs:
1222      return TargetOpcode::G_FABS;
1223    case Intrinsic::copysign:
1224      return TargetOpcode::G_FCOPYSIGN;
1225    case Intrinsic::minnum:
1226      return TargetOpcode::G_FMINNUM;
1227    case Intrinsic::maxnum:
1228      return TargetOpcode::G_FMAXNUM;
1229    case Intrinsic::minimum:
1230      return TargetOpcode::G_FMINIMUM;
1231    case Intrinsic::maximum:
1232      return TargetOpcode::G_FMAXIMUM;
1233    case Intrinsic::canonicalize:
1234      return TargetOpcode::G_FCANONICALIZE;
1235    case Intrinsic::floor:
1236      return TargetOpcode::G_FFLOOR;
1237    case Intrinsic::fma:
1238      return TargetOpcode::G_FMA;
1239    case Intrinsic::log:
1240      return TargetOpcode::G_FLOG;
1241    case Intrinsic::log2:
1242      return TargetOpcode::G_FLOG2;
1243    case Intrinsic::log10:
1244      return TargetOpcode::G_FLOG10;
1245    case Intrinsic::nearbyint:
1246      return TargetOpcode::G_FNEARBYINT;
1247    case Intrinsic::pow:
1248      return TargetOpcode::G_FPOW;
1249    case Intrinsic::rint:
1250      return TargetOpcode::G_FRINT;
1251    case Intrinsic::round:
1252      return TargetOpcode::G_INTRINSIC_ROUND;
1253    case Intrinsic::sin:
1254      return TargetOpcode::G_FSIN;
1255    case Intrinsic::sqrt:
1256      return TargetOpcode::G_FSQRT;
1257    case Intrinsic::trunc:
1258      return TargetOpcode::G_INTRINSIC_TRUNC;
1259    case Intrinsic::readcyclecounter:
1260      return TargetOpcode::G_READCYCLECOUNTER;
1261  }
1262  return Intrinsic::not_intrinsic;
1263}
1264
1265bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
1266                                            Intrinsic::ID ID,
1267                                            MachineIRBuilder &MIRBuilder) {
1268
1269  unsigned Op = getSimpleIntrinsicOpcode(ID);
1270
1271  // Is this a simple intrinsic?
1272  if (Op == Intrinsic::not_intrinsic)
1273    return false;
1274
1275  // Yes. Let's translate it.
1276  SmallVector<llvm::SrcOp, 4> VRegs;
1277  for (auto &Arg : CI.arg_operands())
1278    VRegs.push_back(getOrCreateVReg(*Arg));
1279
1280  MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
1281                        MachineInstr::copyFlagsFromInstruction(CI));
1282  return true;
1283}
1284
1285bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
1286                                           MachineIRBuilder &MIRBuilder) {
1287
1288  // If this is a simple intrinsic (that is, we just need to add a def of
1289  // a vreg, and uses for each arg operand, then translate it.
1290  if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
1291    return true;
1292
1293  switch (ID) {
1294  default:
1295    break;
1296  case Intrinsic::lifetime_start:
1297  case Intrinsic::lifetime_end: {
1298    // No stack colouring in O0, discard region information.
1299    if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
1300      return true;
1301
1302    unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
1303                                                  : TargetOpcode::LIFETIME_END;
1304
1305    // Get the underlying objects for the location passed on the lifetime
1306    // marker.
1307    SmallVector<const Value *, 4> Allocas;
1308    GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
1309
1310    // Iterate over each underlying object, creating lifetime markers for each
1311    // static alloca. Quit if we find a non-static alloca.
1312    for (const Value *V : Allocas) {
1313      const AllocaInst *AI = dyn_cast<AllocaInst>(V);
1314      if (!AI)
1315        continue;
1316
1317      if (!AI->isStaticAlloca())
1318        return true;
1319
1320      MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
1321    }
1322    return true;
1323  }
1324  case Intrinsic::dbg_declare: {
1325    const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
1326    assert(DI.getVariable() && "Missing variable");
1327
1328    const Value *Address = DI.getAddress();
1329    if (!Address || isa<UndefValue>(Address)) {
1330      LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
1331      return true;
1332    }
1333
1334    assert(DI.getVariable()->isValidLocationForIntrinsic(
1335               MIRBuilder.getDebugLoc()) &&
1336           "Expected inlined-at fields to agree");
1337    auto AI = dyn_cast<AllocaInst>(Address);
1338    if (AI && AI->isStaticAlloca()) {
1339      // Static allocas are tracked at the MF level, no need for DBG_VALUE
1340      // instructions (in fact, they get ignored if they *do* exist).
1341      MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
1342                             getOrCreateFrameIndex(*AI), DI.getDebugLoc());
1343    } else {
1344      // A dbg.declare describes the address of a source variable, so lower it
1345      // into an indirect DBG_VALUE.
1346      MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
1347                                       DI.getVariable(), DI.getExpression());
1348    }
1349    return true;
1350  }
1351  case Intrinsic::dbg_label: {
1352    const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
1353    assert(DI.getLabel() && "Missing label");
1354
1355    assert(DI.getLabel()->isValidLocationForIntrinsic(
1356               MIRBuilder.getDebugLoc()) &&
1357           "Expected inlined-at fields to agree");
1358
1359    MIRBuilder.buildDbgLabel(DI.getLabel());
1360    return true;
1361  }
1362  case Intrinsic::vaend:
1363    // No target I know of cares about va_end. Certainly no in-tree target
1364    // does. Simplest intrinsic ever!
1365    return true;
1366  case Intrinsic::vastart: {
1367    auto &TLI = *MF->getSubtarget().getTargetLowering();
1368    Value *Ptr = CI.getArgOperand(0);
1369    unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
1370
1371    // FIXME: Get alignment
1372    MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
1373        .addUse(getOrCreateVReg(*Ptr))
1374        .addMemOperand(MF->getMachineMemOperand(
1375            MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 1));
1376    return true;
1377  }
1378  case Intrinsic::dbg_value: {
1379    // This form of DBG_VALUE is target-independent.
1380    const DbgValueInst &DI = cast<DbgValueInst>(CI);
1381    const Value *V = DI.getValue();
1382    assert(DI.getVariable()->isValidLocationForIntrinsic(
1383               MIRBuilder.getDebugLoc()) &&
1384           "Expected inlined-at fields to agree");
1385    if (!V) {
1386      // Currently the optimizer can produce this; insert an undef to
1387      // help debugging.  Probably the optimizer should not do this.
1388      MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
1389    } else if (const auto *CI = dyn_cast<Constant>(V)) {
1390      MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
1391    } else {
1392      for (Register Reg : getOrCreateVRegs(*V)) {
1393        // FIXME: This does not handle register-indirect values at offset 0. The
1394        // direct/indirect thing shouldn't really be handled by something as
1395        // implicit as reg+noreg vs reg+imm in the first place, but it seems
1396        // pretty baked in right now.
1397        MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
1398      }
1399    }
1400    return true;
1401  }
1402  case Intrinsic::uadd_with_overflow:
1403    return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
1404  case Intrinsic::sadd_with_overflow:
1405    return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
1406  case Intrinsic::usub_with_overflow:
1407    return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
1408  case Intrinsic::ssub_with_overflow:
1409    return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
1410  case Intrinsic::umul_with_overflow:
1411    return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
1412  case Intrinsic::smul_with_overflow:
1413    return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
1414  case Intrinsic::fmuladd: {
1415    const TargetMachine &TM = MF->getTarget();
1416    const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1417    Register Dst = getOrCreateVReg(CI);
1418    Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
1419    Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
1420    Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
1421    if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
1422        TLI.isFMAFasterThanFMulAndFAdd(*MF,
1423                                       TLI.getValueType(*DL, CI.getType()))) {
1424      // TODO: Revisit this to see if we should move this part of the
1425      // lowering to the combiner.
1426      MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2},
1427                            MachineInstr::copyFlagsFromInstruction(CI));
1428    } else {
1429      LLT Ty = getLLTForType(*CI.getType(), *DL);
1430      auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1},
1431                                        MachineInstr::copyFlagsFromInstruction(CI));
1432      MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2},
1433                            MachineInstr::copyFlagsFromInstruction(CI));
1434    }
1435    return true;
1436  }
1437  case Intrinsic::memcpy:
1438  case Intrinsic::memmove:
1439  case Intrinsic::memset:
1440    return translateMemFunc(CI, MIRBuilder, ID);
1441  case Intrinsic::eh_typeid_for: {
1442    GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
1443    Register Reg = getOrCreateVReg(CI);
1444    unsigned TypeID = MF->getTypeIDFor(GV);
1445    MIRBuilder.buildConstant(Reg, TypeID);
1446    return true;
1447  }
1448  case Intrinsic::objectsize:
1449    llvm_unreachable("llvm.objectsize.* should have been lowered already");
1450
1451  case Intrinsic::is_constant:
1452    llvm_unreachable("llvm.is.constant.* should have been lowered already");
1453
1454  case Intrinsic::stackguard:
1455    getStackGuard(getOrCreateVReg(CI), MIRBuilder);
1456    return true;
1457  case Intrinsic::stackprotector: {
1458    LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1459    Register GuardVal = MRI->createGenericVirtualRegister(PtrTy);
1460    getStackGuard(GuardVal, MIRBuilder);
1461
1462    AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
1463    int FI = getOrCreateFrameIndex(*Slot);
1464    MF->getFrameInfo().setStackProtectorIndex(FI);
1465
1466    MIRBuilder.buildStore(
1467        GuardVal, getOrCreateVReg(*Slot),
1468        *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
1469                                  MachineMemOperand::MOStore |
1470                                      MachineMemOperand::MOVolatile,
1471                                  PtrTy.getSizeInBits() / 8, 8));
1472    return true;
1473  }
1474  case Intrinsic::stacksave: {
1475    // Save the stack pointer to the location provided by the intrinsic.
1476    Register Reg = getOrCreateVReg(CI);
1477    Register StackPtr = MF->getSubtarget()
1478                            .getTargetLowering()
1479                            ->getStackPointerRegisterToSaveRestore();
1480
1481    // If the target doesn't specify a stack pointer, then fall back.
1482    if (!StackPtr)
1483      return false;
1484
1485    MIRBuilder.buildCopy(Reg, StackPtr);
1486    return true;
1487  }
1488  case Intrinsic::stackrestore: {
1489    // Restore the stack pointer from the location provided by the intrinsic.
1490    Register Reg = getOrCreateVReg(*CI.getArgOperand(0));
1491    Register StackPtr = MF->getSubtarget()
1492                            .getTargetLowering()
1493                            ->getStackPointerRegisterToSaveRestore();
1494
1495    // If the target doesn't specify a stack pointer, then fall back.
1496    if (!StackPtr)
1497      return false;
1498
1499    MIRBuilder.buildCopy(StackPtr, Reg);
1500    return true;
1501  }
1502  case Intrinsic::cttz:
1503  case Intrinsic::ctlz: {
1504    ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
1505    bool isTrailing = ID == Intrinsic::cttz;
1506    unsigned Opcode = isTrailing
1507                          ? Cst->isZero() ? TargetOpcode::G_CTTZ
1508                                          : TargetOpcode::G_CTTZ_ZERO_UNDEF
1509                          : Cst->isZero() ? TargetOpcode::G_CTLZ
1510                                          : TargetOpcode::G_CTLZ_ZERO_UNDEF;
1511    MIRBuilder.buildInstr(Opcode)
1512        .addDef(getOrCreateVReg(CI))
1513        .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1514    return true;
1515  }
1516  case Intrinsic::invariant_start: {
1517    LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1518    Register Undef = MRI->createGenericVirtualRegister(PtrTy);
1519    MIRBuilder.buildUndef(Undef);
1520    return true;
1521  }
1522  case Intrinsic::invariant_end:
1523    return true;
1524  case Intrinsic::assume:
1525  case Intrinsic::var_annotation:
1526  case Intrinsic::sideeffect:
1527    // Discard annotate attributes, assumptions, and artificial side-effects.
1528    return true;
1529  case Intrinsic::read_register: {
1530    Value *Arg = CI.getArgOperand(0);
1531    MIRBuilder.buildInstr(TargetOpcode::G_READ_REGISTER)
1532      .addDef(getOrCreateVReg(CI))
1533      .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
1534    return true;
1535  }
1536  }
1537  return false;
1538}
1539
1540bool IRTranslator::translateInlineAsm(const CallInst &CI,
1541                                      MachineIRBuilder &MIRBuilder) {
1542  const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
1543  if (!IA.getConstraintString().empty())
1544    return false;
1545
1546  unsigned ExtraInfo = 0;
1547  if (IA.hasSideEffects())
1548    ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1549  if (IA.getDialect() == InlineAsm::AD_Intel)
1550    ExtraInfo |= InlineAsm::Extra_AsmDialect;
1551
1552  MIRBuilder.buildInstr(TargetOpcode::INLINEASM)
1553    .addExternalSymbol(IA.getAsmString().c_str())
1554    .addImm(ExtraInfo);
1555
1556  return true;
1557}
1558
1559bool IRTranslator::translateCallSite(const ImmutableCallSite &CS,
1560                                     MachineIRBuilder &MIRBuilder) {
1561  const Instruction &I = *CS.getInstruction();
1562  ArrayRef<Register> Res = getOrCreateVRegs(I);
1563
1564  SmallVector<ArrayRef<Register>, 8> Args;
1565  Register SwiftInVReg = 0;
1566  Register SwiftErrorVReg = 0;
1567  for (auto &Arg : CS.args()) {
1568    if (CLI->supportSwiftError() && isSwiftError(Arg)) {
1569      assert(SwiftInVReg == 0 && "Expected only one swift error argument");
1570      LLT Ty = getLLTForType(*Arg->getType(), *DL);
1571      SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
1572      MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
1573                                            &I, &MIRBuilder.getMBB(), Arg));
1574      Args.emplace_back(makeArrayRef(SwiftInVReg));
1575      SwiftErrorVReg =
1576          SwiftError.getOrCreateVRegDefAt(&I, &MIRBuilder.getMBB(), Arg);
1577      continue;
1578    }
1579    Args.push_back(getOrCreateVRegs(*Arg));
1580  }
1581
1582  // We don't set HasCalls on MFI here yet because call lowering may decide to
1583  // optimize into tail calls. Instead, we defer that to selection where a final
1584  // scan is done to check if any instructions are calls.
1585  bool Success =
1586      CLI->lowerCall(MIRBuilder, CS, Res, Args, SwiftErrorVReg,
1587                     [&]() { return getOrCreateVReg(*CS.getCalledValue()); });
1588
1589  // Check if we just inserted a tail call.
1590  if (Success) {
1591    assert(!HasTailCall && "Can't tail call return twice from block?");
1592    const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1593    HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
1594  }
1595
1596  return Success;
1597}
1598
1599bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
1600  const CallInst &CI = cast<CallInst>(U);
1601  auto TII = MF->getTarget().getIntrinsicInfo();
1602  const Function *F = CI.getCalledFunction();
1603
1604  // FIXME: support Windows dllimport function calls.
1605  if (F && (F->hasDLLImportStorageClass() ||
1606            (MF->getTarget().getTargetTriple().isOSWindows() &&
1607             F->hasExternalWeakLinkage())))
1608    return false;
1609
1610  // FIXME: support control flow guard targets.
1611  if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
1612    return false;
1613
1614  if (CI.isInlineAsm())
1615    return translateInlineAsm(CI, MIRBuilder);
1616
1617  Intrinsic::ID ID = Intrinsic::not_intrinsic;
1618  if (F && F->isIntrinsic()) {
1619    ID = F->getIntrinsicID();
1620    if (TII && ID == Intrinsic::not_intrinsic)
1621      ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
1622  }
1623
1624  if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
1625    return translateCallSite(&CI, MIRBuilder);
1626
1627  assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
1628
1629  if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1630    return true;
1631
1632  ArrayRef<Register> ResultRegs;
1633  if (!CI.getType()->isVoidTy())
1634    ResultRegs = getOrCreateVRegs(CI);
1635
1636  // Ignore the callsite attributes. Backend code is most likely not expecting
1637  // an intrinsic to sometimes have side effects and sometimes not.
1638  MachineInstrBuilder MIB =
1639      MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory());
1640  if (isa<FPMathOperator>(CI))
1641    MIB->copyIRFlags(CI);
1642
1643  for (auto &Arg : enumerate(CI.arg_operands())) {
1644    // Some intrinsics take metadata parameters. Reject them.
1645    if (isa<MetadataAsValue>(Arg.value()))
1646      return false;
1647
1648    // If this is required to be an immediate, don't materialize it in a
1649    // register.
1650    if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
1651      if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
1652        // imm arguments are more convenient than cimm (and realistically
1653        // probably sufficient), so use them.
1654        assert(CI->getBitWidth() <= 64 &&
1655               "large intrinsic immediates not handled");
1656        MIB.addImm(CI->getSExtValue());
1657      } else {
1658        MIB.addFPImm(cast<ConstantFP>(Arg.value()));
1659      }
1660    } else {
1661      ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
1662      if (VRegs.size() > 1)
1663        return false;
1664      MIB.addUse(VRegs[0]);
1665    }
1666  }
1667
1668  // Add a MachineMemOperand if it is a target mem intrinsic.
1669  const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1670  TargetLowering::IntrinsicInfo Info;
1671  // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1672  if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1673    MaybeAlign Align = Info.align;
1674    if (!Align)
1675      Align = MaybeAlign(
1676          DL->getABITypeAlignment(Info.memVT.getTypeForEVT(F->getContext())));
1677
1678    uint64_t Size = Info.memVT.getStoreSize();
1679    MIB.addMemOperand(MF->getMachineMemOperand(
1680        MachinePointerInfo(Info.ptrVal), Info.flags, Size, Align->value()));
1681  }
1682
1683  return true;
1684}
1685
1686bool IRTranslator::translateInvoke(const User &U,
1687                                   MachineIRBuilder &MIRBuilder) {
1688  const InvokeInst &I = cast<InvokeInst>(U);
1689  MCContext &Context = MF->getContext();
1690
1691  const BasicBlock *ReturnBB = I.getSuccessor(0);
1692  const BasicBlock *EHPadBB = I.getSuccessor(1);
1693
1694  const Value *Callee = I.getCalledValue();
1695  const Function *Fn = dyn_cast<Function>(Callee);
1696  if (isa<InlineAsm>(Callee))
1697    return false;
1698
1699  // FIXME: support invoking patchpoint and statepoint intrinsics.
1700  if (Fn && Fn->isIntrinsic())
1701    return false;
1702
1703  // FIXME: support whatever these are.
1704  if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
1705    return false;
1706
1707  // FIXME: support control flow guard targets.
1708  if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
1709    return false;
1710
1711  // FIXME: support Windows exception handling.
1712  if (!isa<LandingPadInst>(EHPadBB->front()))
1713    return false;
1714
1715  // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1716  // the region covered by the try.
1717  MCSymbol *BeginSymbol = Context.createTempSymbol();
1718  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1719
1720  if (!translateCallSite(&I, MIRBuilder))
1721    return false;
1722
1723  MCSymbol *EndSymbol = Context.createTempSymbol();
1724  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1725
1726  // FIXME: track probabilities.
1727  MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1728                    &ReturnMBB = getMBB(*ReturnBB);
1729  MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1730  MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1731  MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1732  MIRBuilder.buildBr(ReturnMBB);
1733
1734  return true;
1735}
1736
1737bool IRTranslator::translateCallBr(const User &U,
1738                                   MachineIRBuilder &MIRBuilder) {
1739  // FIXME: Implement this.
1740  return false;
1741}
1742
1743bool IRTranslator::translateLandingPad(const User &U,
1744                                       MachineIRBuilder &MIRBuilder) {
1745  const LandingPadInst &LP = cast<LandingPadInst>(U);
1746
1747  MachineBasicBlock &MBB = MIRBuilder.getMBB();
1748
1749  MBB.setIsEHPad();
1750
1751  // If there aren't registers to copy the values into (e.g., during SjLj
1752  // exceptions), then don't bother.
1753  auto &TLI = *MF->getSubtarget().getTargetLowering();
1754  const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1755  if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1756      TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1757    return true;
1758
1759  // If landingpad's return type is token type, we don't create DAG nodes
1760  // for its exception pointer and selector value. The extraction of exception
1761  // pointer or selector value from token type landingpads is not currently
1762  // supported.
1763  if (LP.getType()->isTokenTy())
1764    return true;
1765
1766  // Add a label to mark the beginning of the landing pad.  Deletion of the
1767  // landing pad can thus be detected via the MachineModuleInfo.
1768  MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
1769    .addSym(MF->addLandingPad(&MBB));
1770
1771  LLT Ty = getLLTForType(*LP.getType(), *DL);
1772  Register Undef = MRI->createGenericVirtualRegister(Ty);
1773  MIRBuilder.buildUndef(Undef);
1774
1775  SmallVector<LLT, 2> Tys;
1776  for (Type *Ty : cast<StructType>(LP.getType())->elements())
1777    Tys.push_back(getLLTForType(*Ty, *DL));
1778  assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1779
1780  // Mark exception register as live in.
1781  Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1782  if (!ExceptionReg)
1783    return false;
1784
1785  MBB.addLiveIn(ExceptionReg);
1786  ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
1787  MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1788
1789  Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1790  if (!SelectorReg)
1791    return false;
1792
1793  MBB.addLiveIn(SelectorReg);
1794  Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1795  MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1796  MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1797
1798  return true;
1799}
1800
1801bool IRTranslator::translateAlloca(const User &U,
1802                                   MachineIRBuilder &MIRBuilder) {
1803  auto &AI = cast<AllocaInst>(U);
1804
1805  if (AI.isSwiftError())
1806    return true;
1807
1808  if (AI.isStaticAlloca()) {
1809    Register Res = getOrCreateVReg(AI);
1810    int FI = getOrCreateFrameIndex(AI);
1811    MIRBuilder.buildFrameIndex(Res, FI);
1812    return true;
1813  }
1814
1815  // FIXME: support stack probing for Windows.
1816  if (MF->getTarget().getTargetTriple().isOSWindows())
1817    return false;
1818
1819  // Now we're in the harder dynamic case.
1820  Type *Ty = AI.getAllocatedType();
1821  unsigned Align =
1822      std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1823
1824  Register NumElts = getOrCreateVReg(*AI.getArraySize());
1825
1826  Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1827  LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1828  if (MRI->getType(NumElts) != IntPtrTy) {
1829    Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1830    MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1831    NumElts = ExtElts;
1832  }
1833
1834  Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1835  Register TySize =
1836      getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
1837  MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1838
1839  unsigned StackAlign =
1840      MF->getSubtarget().getFrameLowering()->getStackAlignment();
1841  if (Align <= StackAlign)
1842    Align = 0;
1843
1844  // Round the size of the allocation up to the stack alignment size
1845  // by add SA-1 to the size. This doesn't overflow because we're computing
1846  // an address inside an alloca.
1847  auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign - 1);
1848  auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
1849                                      MachineInstr::NoUWrap);
1850  auto AlignCst =
1851      MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign - 1));
1852  auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
1853
1854  MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Align);
1855
1856  MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1857  assert(MF->getFrameInfo().hasVarSizedObjects());
1858  return true;
1859}
1860
1861bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1862  // FIXME: We may need more info about the type. Because of how LLT works,
1863  // we're completely discarding the i64/double distinction here (amongst
1864  // others). Fortunately the ABIs I know of where that matters don't use va_arg
1865  // anyway but that's not guaranteed.
1866  MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1867    .addDef(getOrCreateVReg(U))
1868    .addUse(getOrCreateVReg(*U.getOperand(0)))
1869    .addImm(DL->getABITypeAlignment(U.getType()));
1870  return true;
1871}
1872
1873bool IRTranslator::translateInsertElement(const User &U,
1874                                          MachineIRBuilder &MIRBuilder) {
1875  // If it is a <1 x Ty> vector, use the scalar as it is
1876  // not a legal vector type in LLT.
1877  if (U.getType()->getVectorNumElements() == 1) {
1878    Register Elt = getOrCreateVReg(*U.getOperand(1));
1879    auto &Regs = *VMap.getVRegs(U);
1880    if (Regs.empty()) {
1881      Regs.push_back(Elt);
1882      VMap.getOffsets(U)->push_back(0);
1883    } else {
1884      MIRBuilder.buildCopy(Regs[0], Elt);
1885    }
1886    return true;
1887  }
1888
1889  Register Res = getOrCreateVReg(U);
1890  Register Val = getOrCreateVReg(*U.getOperand(0));
1891  Register Elt = getOrCreateVReg(*U.getOperand(1));
1892  Register Idx = getOrCreateVReg(*U.getOperand(2));
1893  MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1894  return true;
1895}
1896
1897bool IRTranslator::translateExtractElement(const User &U,
1898                                           MachineIRBuilder &MIRBuilder) {
1899  // If it is a <1 x Ty> vector, use the scalar as it is
1900  // not a legal vector type in LLT.
1901  if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1902    Register Elt = getOrCreateVReg(*U.getOperand(0));
1903    auto &Regs = *VMap.getVRegs(U);
1904    if (Regs.empty()) {
1905      Regs.push_back(Elt);
1906      VMap.getOffsets(U)->push_back(0);
1907    } else {
1908      MIRBuilder.buildCopy(Regs[0], Elt);
1909    }
1910    return true;
1911  }
1912  Register Res = getOrCreateVReg(U);
1913  Register Val = getOrCreateVReg(*U.getOperand(0));
1914  const auto &TLI = *MF->getSubtarget().getTargetLowering();
1915  unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
1916  Register Idx;
1917  if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
1918    if (CI->getBitWidth() != PreferredVecIdxWidth) {
1919      APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
1920      auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
1921      Idx = getOrCreateVReg(*NewIdxCI);
1922    }
1923  }
1924  if (!Idx)
1925    Idx = getOrCreateVReg(*U.getOperand(1));
1926  if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
1927    const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
1928    Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg();
1929  }
1930  MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1931  return true;
1932}
1933
1934bool IRTranslator::translateShuffleVector(const User &U,
1935                                          MachineIRBuilder &MIRBuilder) {
1936  SmallVector<int, 8> Mask;
1937  ShuffleVectorInst::getShuffleMask(cast<Constant>(U.getOperand(2)), Mask);
1938  ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
1939  MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1940      .addDef(getOrCreateVReg(U))
1941      .addUse(getOrCreateVReg(*U.getOperand(0)))
1942      .addUse(getOrCreateVReg(*U.getOperand(1)))
1943      .addShuffleMask(MaskAlloc);
1944  return true;
1945}
1946
1947bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1948  const PHINode &PI = cast<PHINode>(U);
1949
1950  SmallVector<MachineInstr *, 4> Insts;
1951  for (auto Reg : getOrCreateVRegs(PI)) {
1952    auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
1953    Insts.push_back(MIB.getInstr());
1954  }
1955
1956  PendingPHIs.emplace_back(&PI, std::move(Insts));
1957  return true;
1958}
1959
1960bool IRTranslator::translateAtomicCmpXchg(const User &U,
1961                                          MachineIRBuilder &MIRBuilder) {
1962  const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
1963
1964  if (I.isWeak())
1965    return false;
1966
1967  auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1968                              : MachineMemOperand::MONone;
1969  Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1970
1971  Type *ResType = I.getType();
1972  Type *ValType = ResType->Type::getStructElementType(0);
1973
1974  auto Res = getOrCreateVRegs(I);
1975  Register OldValRes = Res[0];
1976  Register SuccessRes = Res[1];
1977  Register Addr = getOrCreateVReg(*I.getPointerOperand());
1978  Register Cmp = getOrCreateVReg(*I.getCompareOperand());
1979  Register NewVal = getOrCreateVReg(*I.getNewValOperand());
1980
1981  AAMDNodes AAMetadata;
1982  I.getAAMetadata(AAMetadata);
1983
1984  MIRBuilder.buildAtomicCmpXchgWithSuccess(
1985      OldValRes, SuccessRes, Addr, Cmp, NewVal,
1986      *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1987                                Flags, DL->getTypeStoreSize(ValType),
1988                                getMemOpAlignment(I), AAMetadata, nullptr,
1989                                I.getSyncScopeID(), I.getSuccessOrdering(),
1990                                I.getFailureOrdering()));
1991  return true;
1992}
1993
1994bool IRTranslator::translateAtomicRMW(const User &U,
1995                                      MachineIRBuilder &MIRBuilder) {
1996  const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
1997
1998  auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1999                              : MachineMemOperand::MONone;
2000  Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
2001
2002  Type *ResType = I.getType();
2003
2004  Register Res = getOrCreateVReg(I);
2005  Register Addr = getOrCreateVReg(*I.getPointerOperand());
2006  Register Val = getOrCreateVReg(*I.getValOperand());
2007
2008  unsigned Opcode = 0;
2009  switch (I.getOperation()) {
2010  default:
2011    return false;
2012  case AtomicRMWInst::Xchg:
2013    Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
2014    break;
2015  case AtomicRMWInst::Add:
2016    Opcode = TargetOpcode::G_ATOMICRMW_ADD;
2017    break;
2018  case AtomicRMWInst::Sub:
2019    Opcode = TargetOpcode::G_ATOMICRMW_SUB;
2020    break;
2021  case AtomicRMWInst::And:
2022    Opcode = TargetOpcode::G_ATOMICRMW_AND;
2023    break;
2024  case AtomicRMWInst::Nand:
2025    Opcode = TargetOpcode::G_ATOMICRMW_NAND;
2026    break;
2027  case AtomicRMWInst::Or:
2028    Opcode = TargetOpcode::G_ATOMICRMW_OR;
2029    break;
2030  case AtomicRMWInst::Xor:
2031    Opcode = TargetOpcode::G_ATOMICRMW_XOR;
2032    break;
2033  case AtomicRMWInst::Max:
2034    Opcode = TargetOpcode::G_ATOMICRMW_MAX;
2035    break;
2036  case AtomicRMWInst::Min:
2037    Opcode = TargetOpcode::G_ATOMICRMW_MIN;
2038    break;
2039  case AtomicRMWInst::UMax:
2040    Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
2041    break;
2042  case AtomicRMWInst::UMin:
2043    Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
2044    break;
2045  case AtomicRMWInst::FAdd:
2046    Opcode = TargetOpcode::G_ATOMICRMW_FADD;
2047    break;
2048  case AtomicRMWInst::FSub:
2049    Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
2050    break;
2051  }
2052
2053  AAMDNodes AAMetadata;
2054  I.getAAMetadata(AAMetadata);
2055
2056  MIRBuilder.buildAtomicRMW(
2057      Opcode, Res, Addr, Val,
2058      *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
2059                                Flags, DL->getTypeStoreSize(ResType),
2060                                getMemOpAlignment(I), AAMetadata,
2061                                nullptr, I.getSyncScopeID(), I.getOrdering()));
2062  return true;
2063}
2064
2065bool IRTranslator::translateFence(const User &U,
2066                                  MachineIRBuilder &MIRBuilder) {
2067  const FenceInst &Fence = cast<FenceInst>(U);
2068  MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
2069                        Fence.getSyncScopeID());
2070  return true;
2071}
2072
2073void IRTranslator::finishPendingPhis() {
2074#ifndef NDEBUG
2075  DILocationVerifier Verifier;
2076  GISelObserverWrapper WrapperObserver(&Verifier);
2077  RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2078#endif // ifndef NDEBUG
2079  for (auto &Phi : PendingPHIs) {
2080    const PHINode *PI = Phi.first;
2081    ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
2082    MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
2083    EntryBuilder->setDebugLoc(PI->getDebugLoc());
2084#ifndef NDEBUG
2085    Verifier.setCurrentInst(PI);
2086#endif // ifndef NDEBUG
2087
2088    SmallSet<const MachineBasicBlock *, 16> SeenPreds;
2089    for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
2090      auto IRPred = PI->getIncomingBlock(i);
2091      ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
2092      for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
2093        if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
2094          continue;
2095        SeenPreds.insert(Pred);
2096        for (unsigned j = 0; j < ValRegs.size(); ++j) {
2097          MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
2098          MIB.addUse(ValRegs[j]);
2099          MIB.addMBB(Pred);
2100        }
2101      }
2102    }
2103  }
2104}
2105
2106bool IRTranslator::valueIsSplit(const Value &V,
2107                                SmallVectorImpl<uint64_t> *Offsets) {
2108  SmallVector<LLT, 4> SplitTys;
2109  if (Offsets && !Offsets->empty())
2110    Offsets->clear();
2111  computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
2112  return SplitTys.size() > 1;
2113}
2114
2115bool IRTranslator::translate(const Instruction &Inst) {
2116  CurBuilder->setDebugLoc(Inst.getDebugLoc());
2117  // We only emit constants into the entry block from here. To prevent jumpy
2118  // debug behaviour set the line to 0.
2119  if (const DebugLoc &DL = Inst.getDebugLoc())
2120    EntryBuilder->setDebugLoc(
2121        DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt()));
2122  else
2123    EntryBuilder->setDebugLoc(DebugLoc());
2124
2125  switch (Inst.getOpcode()) {
2126#define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
2127  case Instruction::OPCODE:                                                    \
2128    return translate##OPCODE(Inst, *CurBuilder.get());
2129#include "llvm/IR/Instruction.def"
2130  default:
2131    return false;
2132  }
2133}
2134
2135bool IRTranslator::translate(const Constant &C, Register Reg) {
2136  if (auto CI = dyn_cast<ConstantInt>(&C))
2137    EntryBuilder->buildConstant(Reg, *CI);
2138  else if (auto CF = dyn_cast<ConstantFP>(&C))
2139    EntryBuilder->buildFConstant(Reg, *CF);
2140  else if (isa<UndefValue>(C))
2141    EntryBuilder->buildUndef(Reg);
2142  else if (isa<ConstantPointerNull>(C)) {
2143    // As we are trying to build a constant val of 0 into a pointer,
2144    // insert a cast to make them correct with respect to types.
2145    unsigned NullSize = DL->getTypeSizeInBits(C.getType());
2146    auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
2147    auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
2148    Register ZeroReg = getOrCreateVReg(*ZeroVal);
2149    EntryBuilder->buildCast(Reg, ZeroReg);
2150  } else if (auto GV = dyn_cast<GlobalValue>(&C))
2151    EntryBuilder->buildGlobalValue(Reg, GV);
2152  else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
2153    if (!CAZ->getType()->isVectorTy())
2154      return false;
2155    // Return the scalar if it is a <1 x Ty> vector.
2156    if (CAZ->getNumElements() == 1)
2157      return translate(*CAZ->getElementValue(0u), Reg);
2158    SmallVector<Register, 4> Ops;
2159    for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
2160      Constant &Elt = *CAZ->getElementValue(i);
2161      Ops.push_back(getOrCreateVReg(Elt));
2162    }
2163    EntryBuilder->buildBuildVector(Reg, Ops);
2164  } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
2165    // Return the scalar if it is a <1 x Ty> vector.
2166    if (CV->getNumElements() == 1)
2167      return translate(*CV->getElementAsConstant(0), Reg);
2168    SmallVector<Register, 4> Ops;
2169    for (unsigned i = 0; i < CV->getNumElements(); ++i) {
2170      Constant &Elt = *CV->getElementAsConstant(i);
2171      Ops.push_back(getOrCreateVReg(Elt));
2172    }
2173    EntryBuilder->buildBuildVector(Reg, Ops);
2174  } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
2175    switch(CE->getOpcode()) {
2176#define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
2177  case Instruction::OPCODE:                                                    \
2178    return translate##OPCODE(*CE, *EntryBuilder.get());
2179#include "llvm/IR/Instruction.def"
2180    default:
2181      return false;
2182    }
2183  } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
2184    if (CV->getNumOperands() == 1)
2185      return translate(*CV->getOperand(0), Reg);
2186    SmallVector<Register, 4> Ops;
2187    for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
2188      Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
2189    }
2190    EntryBuilder->buildBuildVector(Reg, Ops);
2191  } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
2192    EntryBuilder->buildBlockAddress(Reg, BA);
2193  } else
2194    return false;
2195
2196  return true;
2197}
2198
2199void IRTranslator::finalizeBasicBlock() {
2200  for (auto &JTCase : SL->JTCases) {
2201    // Emit header first, if it wasn't already emitted.
2202    if (!JTCase.first.Emitted)
2203      emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
2204
2205    emitJumpTable(JTCase.second, JTCase.second.MBB);
2206  }
2207  SL->JTCases.clear();
2208}
2209
2210void IRTranslator::finalizeFunction() {
2211  // Release the memory used by the different maps we
2212  // needed during the translation.
2213  PendingPHIs.clear();
2214  VMap.reset();
2215  FrameIndices.clear();
2216  MachinePreds.clear();
2217  // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
2218  // to avoid accessing free���d memory (in runOnMachineFunction) and to avoid
2219  // destroying it twice (in ~IRTranslator() and ~LLVMContext())
2220  EntryBuilder.reset();
2221  CurBuilder.reset();
2222  FuncInfo.clear();
2223}
2224
2225/// Returns true if a BasicBlock \p BB within a variadic function contains a
2226/// variadic musttail call.
2227static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
2228  if (!IsVarArg)
2229    return false;
2230
2231  // Walk the block backwards, because tail calls usually only appear at the end
2232  // of a block.
2233  return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) {
2234    const auto *CI = dyn_cast<CallInst>(&I);
2235    return CI && CI->isMustTailCall();
2236  });
2237}
2238
2239bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
2240  MF = &CurMF;
2241  const Function &F = MF->getFunction();
2242  if (F.empty())
2243    return false;
2244  GISelCSEAnalysisWrapper &Wrapper =
2245      getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
2246  // Set the CSEConfig and run the analysis.
2247  GISelCSEInfo *CSEInfo = nullptr;
2248  TPC = &getAnalysis<TargetPassConfig>();
2249  bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
2250                       ? EnableCSEInIRTranslator
2251                       : TPC->isGISelCSEEnabled();
2252
2253  if (EnableCSE) {
2254    EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
2255    CSEInfo = &Wrapper.get(TPC->getCSEConfig());
2256    EntryBuilder->setCSEInfo(CSEInfo);
2257    CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
2258    CurBuilder->setCSEInfo(CSEInfo);
2259  } else {
2260    EntryBuilder = std::make_unique<MachineIRBuilder>();
2261    CurBuilder = std::make_unique<MachineIRBuilder>();
2262  }
2263  CLI = MF->getSubtarget().getCallLowering();
2264  CurBuilder->setMF(*MF);
2265  EntryBuilder->setMF(*MF);
2266  MRI = &MF->getRegInfo();
2267  DL = &F.getParent()->getDataLayout();
2268  ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
2269  FuncInfo.MF = MF;
2270  FuncInfo.BPI = nullptr;
2271  const auto &TLI = *MF->getSubtarget().getTargetLowering();
2272  const TargetMachine &TM = MF->getTarget();
2273  SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
2274  SL->init(TLI, TM, *DL);
2275
2276  EnableOpts = TM.getOptLevel() != CodeGenOpt::None && !skipFunction(F);
2277
2278  assert(PendingPHIs.empty() && "stale PHIs");
2279
2280  if (!DL->isLittleEndian()) {
2281    // Currently we don't properly handle big endian code.
2282    OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2283                               F.getSubprogram(), &F.getEntryBlock());
2284    R << "unable to translate in big endian mode";
2285    reportTranslationError(*MF, *TPC, *ORE, R);
2286  }
2287
2288  // Release the per-function state when we return, whether we succeeded or not.
2289  auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
2290
2291  // Setup a separate basic-block for the arguments and constants
2292  MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
2293  MF->push_back(EntryBB);
2294  EntryBuilder->setMBB(*EntryBB);
2295
2296  DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
2297  SwiftError.setFunction(CurMF);
2298  SwiftError.createEntriesInEntryBlock(DbgLoc);
2299
2300  bool IsVarArg = F.isVarArg();
2301  bool HasMustTailInVarArgFn = false;
2302
2303  // Create all blocks, in IR order, to preserve the layout.
2304  for (const BasicBlock &BB: F) {
2305    auto *&MBB = BBToMBB[&BB];
2306
2307    MBB = MF->CreateMachineBasicBlock(&BB);
2308    MF->push_back(MBB);
2309
2310    if (BB.hasAddressTaken())
2311      MBB->setHasAddressTaken();
2312
2313    if (!HasMustTailInVarArgFn)
2314      HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
2315  }
2316
2317  MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
2318
2319  // Make our arguments/constants entry block fallthrough to the IR entry block.
2320  EntryBB->addSuccessor(&getMBB(F.front()));
2321
2322  // Lower the actual args into this basic block.
2323  SmallVector<ArrayRef<Register>, 8> VRegArgs;
2324  for (const Argument &Arg: F.args()) {
2325    if (DL->getTypeStoreSize(Arg.getType()) == 0)
2326      continue; // Don't handle zero sized types.
2327    ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
2328    VRegArgs.push_back(VRegs);
2329
2330    if (Arg.hasSwiftErrorAttr()) {
2331      assert(VRegs.size() == 1 && "Too many vregs for Swift error");
2332      SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
2333    }
2334  }
2335
2336  if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) {
2337    OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2338                               F.getSubprogram(), &F.getEntryBlock());
2339    R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
2340    reportTranslationError(*MF, *TPC, *ORE, R);
2341    return false;
2342  }
2343
2344  // Need to visit defs before uses when translating instructions.
2345  GISelObserverWrapper WrapperObserver;
2346  if (EnableCSE && CSEInfo)
2347    WrapperObserver.addObserver(CSEInfo);
2348  {
2349    ReversePostOrderTraversal<const Function *> RPOT(&F);
2350#ifndef NDEBUG
2351    DILocationVerifier Verifier;
2352    WrapperObserver.addObserver(&Verifier);
2353#endif // ifndef NDEBUG
2354    RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2355    for (const BasicBlock *BB : RPOT) {
2356      MachineBasicBlock &MBB = getMBB(*BB);
2357      // Set the insertion point of all the following translations to
2358      // the end of this basic block.
2359      CurBuilder->setMBB(MBB);
2360      HasTailCall = false;
2361      for (const Instruction &Inst : *BB) {
2362        // If we translated a tail call in the last step, then we know
2363        // everything after the call is either a return, or something that is
2364        // handled by the call itself. (E.g. a lifetime marker or assume
2365        // intrinsic.) In this case, we should stop translating the block and
2366        // move on.
2367        if (HasTailCall)
2368          break;
2369#ifndef NDEBUG
2370        Verifier.setCurrentInst(&Inst);
2371#endif // ifndef NDEBUG
2372        if (translate(Inst))
2373          continue;
2374
2375        OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2376                                   Inst.getDebugLoc(), BB);
2377        R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
2378
2379        if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
2380          std::string InstStrStorage;
2381          raw_string_ostream InstStr(InstStrStorage);
2382          InstStr << Inst;
2383
2384          R << ": '" << InstStr.str() << "'";
2385        }
2386
2387        reportTranslationError(*MF, *TPC, *ORE, R);
2388        return false;
2389      }
2390
2391      finalizeBasicBlock();
2392    }
2393#ifndef NDEBUG
2394    WrapperObserver.removeObserver(&Verifier);
2395#endif
2396  }
2397
2398  finishPendingPhis();
2399
2400  SwiftError.propagateVRegs();
2401
2402  // Merge the argument lowering and constants block with its single
2403  // successor, the LLVM-IR entry block.  We want the basic block to
2404  // be maximal.
2405  assert(EntryBB->succ_size() == 1 &&
2406         "Custom BB used for lowering should have only one successor");
2407  // Get the successor of the current entry block.
2408  MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
2409  assert(NewEntryBB.pred_size() == 1 &&
2410         "LLVM-IR entry block has a predecessor!?");
2411  // Move all the instruction from the current entry block to the
2412  // new entry block.
2413  NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
2414                    EntryBB->end());
2415
2416  // Update the live-in information for the new entry block.
2417  for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
2418    NewEntryBB.addLiveIn(LiveIn);
2419  NewEntryBB.sortUniqueLiveIns();
2420
2421  // Get rid of the now empty basic block.
2422  EntryBB->removeSuccessor(&NewEntryBB);
2423  MF->remove(EntryBB);
2424  MF->DeleteMachineBasicBlock(EntryBB);
2425
2426  assert(&MF->front() == &NewEntryBB &&
2427         "New entry wasn't next in the list of basic block!");
2428
2429  // Initialize stack protector information.
2430  StackProtector &SP = getAnalysis<StackProtector>();
2431  SP.copyToMachineFrameInfo(MF->getFrameInfo());
2432
2433  return false;
2434}
2435