1//===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the implementation of the FastISel class.
10//
11// "Fast" instruction selection is designed to emit very poor code quickly.
12// Also, it is not designed to be able to do much lowering, so most illegal
13// types (e.g. i64 on 32-bit targets) and operations are not supported.  It is
14// also not intended to be able to do much optimization, except in a few cases
15// where doing optimizations reduces overall compile time.  For example, folding
16// constants into immediate fields is often done, because it's cheap and it
17// reduces the number of instructions later phases have to examine.
18//
19// "Fast" instruction selection is able to fail gracefully and transfer
20// control to the SelectionDAG selector for operations that it doesn't
21// support.  In many cases, this allows us to avoid duplicating a lot of
22// the complicated lowering logic that SelectionDAG currently has.
23//
24// The intended use for "fast" instruction selection is "-O0" mode
25// compilation, where the quality of the generated code is irrelevant when
26// weighed against the speed at which the code can be generated.  Also,
27// at -O0, the LLVM optimizers are not running, and this makes the
28// compile time of codegen a much higher portion of the overall compile
29// time.  Despite its limitations, "fast" instruction selection is able to
30// handle enough code on its own to provide noticeable overall speedups
31// in -O0 compiles.
32//
33// Basic operations are supported in a target-independent way, by reading
34// the same instruction descriptions that the SelectionDAG selector reads,
35// and identifying simple arithmetic operations that can be directly selected
36// from simple operators.  More complicated operations currently require
37// target-specific code.
38//
39//===----------------------------------------------------------------------===//
40
41#include "llvm/CodeGen/FastISel.h"
42#include "llvm/ADT/APFloat.h"
43#include "llvm/ADT/APSInt.h"
44#include "llvm/ADT/DenseMap.h"
45#include "llvm/ADT/Optional.h"
46#include "llvm/ADT/SmallPtrSet.h"
47#include "llvm/ADT/SmallString.h"
48#include "llvm/ADT/SmallVector.h"
49#include "llvm/ADT/Statistic.h"
50#include "llvm/Analysis/BranchProbabilityInfo.h"
51#include "llvm/Analysis/TargetLibraryInfo.h"
52#include "llvm/CodeGen/Analysis.h"
53#include "llvm/CodeGen/FunctionLoweringInfo.h"
54#include "llvm/CodeGen/ISDOpcodes.h"
55#include "llvm/CodeGen/MachineBasicBlock.h"
56#include "llvm/CodeGen/MachineFrameInfo.h"
57#include "llvm/CodeGen/MachineInstr.h"
58#include "llvm/CodeGen/MachineInstrBuilder.h"
59#include "llvm/CodeGen/MachineMemOperand.h"
60#include "llvm/CodeGen/MachineModuleInfo.h"
61#include "llvm/CodeGen/MachineOperand.h"
62#include "llvm/CodeGen/MachineRegisterInfo.h"
63#include "llvm/CodeGen/StackMaps.h"
64#include "llvm/CodeGen/TargetInstrInfo.h"
65#include "llvm/CodeGen/TargetLowering.h"
66#include "llvm/CodeGen/TargetSubtargetInfo.h"
67#include "llvm/CodeGen/ValueTypes.h"
68#include "llvm/IR/Argument.h"
69#include "llvm/IR/Attributes.h"
70#include "llvm/IR/BasicBlock.h"
71#include "llvm/IR/CallingConv.h"
72#include "llvm/IR/Constant.h"
73#include "llvm/IR/Constants.h"
74#include "llvm/IR/DataLayout.h"
75#include "llvm/IR/DebugInfo.h"
76#include "llvm/IR/DebugLoc.h"
77#include "llvm/IR/DerivedTypes.h"
78#include "llvm/IR/Function.h"
79#include "llvm/IR/GetElementPtrTypeIterator.h"
80#include "llvm/IR/GlobalValue.h"
81#include "llvm/IR/InlineAsm.h"
82#include "llvm/IR/InstrTypes.h"
83#include "llvm/IR/Instruction.h"
84#include "llvm/IR/Instructions.h"
85#include "llvm/IR/IntrinsicInst.h"
86#include "llvm/IR/LLVMContext.h"
87#include "llvm/IR/Mangler.h"
88#include "llvm/IR/Metadata.h"
89#include "llvm/IR/Operator.h"
90#include "llvm/IR/PatternMatch.h"
91#include "llvm/IR/Type.h"
92#include "llvm/IR/User.h"
93#include "llvm/IR/Value.h"
94#include "llvm/MC/MCContext.h"
95#include "llvm/MC/MCInstrDesc.h"
96#include "llvm/MC/MCRegisterInfo.h"
97#include "llvm/Support/Casting.h"
98#include "llvm/Support/Debug.h"
99#include "llvm/Support/ErrorHandling.h"
100#include "llvm/Support/MachineValueType.h"
101#include "llvm/Support/MathExtras.h"
102#include "llvm/Support/raw_ostream.h"
103#include "llvm/Target/TargetMachine.h"
104#include "llvm/Target/TargetOptions.h"
105#include <algorithm>
106#include <cassert>
107#include <cstdint>
108#include <iterator>
109#include <utility>
110
111using namespace llvm;
112using namespace PatternMatch;
113
114#define DEBUG_TYPE "isel"
115
116// FIXME: Remove this after the feature has proven reliable.
117static cl::opt<bool> SinkLocalValues("fast-isel-sink-local-values",
118                                     cl::init(true), cl::Hidden,
119                                     cl::desc("Sink local values in FastISel"));
120
121STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
122                                         "target-independent selector");
123STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
124                                    "target-specific selector");
125STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
126
127/// Set the current block to which generated machine instructions will be
128/// appended.
129void FastISel::startNewBlock() {
130  assert(LocalValueMap.empty() &&
131         "local values should be cleared after finishing a BB");
132
133  // Instructions are appended to FuncInfo.MBB. If the basic block already
134  // contains labels or copies, use the last instruction as the last local
135  // value.
136  EmitStartPt = nullptr;
137  if (!FuncInfo.MBB->empty())
138    EmitStartPt = &FuncInfo.MBB->back();
139  LastLocalValue = EmitStartPt;
140}
141
142/// Flush the local CSE map and sink anything we can.
143void FastISel::finishBasicBlock() { flushLocalValueMap(); }
144
145bool FastISel::lowerArguments() {
146  if (!FuncInfo.CanLowerReturn)
147    // Fallback to SDISel argument lowering code to deal with sret pointer
148    // parameter.
149    return false;
150
151  if (!fastLowerArguments())
152    return false;
153
154  // Enter arguments into ValueMap for uses in non-entry BBs.
155  for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
156                                    E = FuncInfo.Fn->arg_end();
157       I != E; ++I) {
158    DenseMap<const Value *, Register>::iterator VI = LocalValueMap.find(&*I);
159    assert(VI != LocalValueMap.end() && "Missed an argument?");
160    FuncInfo.ValueMap[&*I] = VI->second;
161  }
162  return true;
163}
164
165/// Return the defined register if this instruction defines exactly one
166/// virtual register and uses no other virtual registers. Otherwise return 0.
167static Register findSinkableLocalRegDef(MachineInstr &MI) {
168  Register RegDef;
169  for (const MachineOperand &MO : MI.operands()) {
170    if (!MO.isReg())
171      continue;
172    if (MO.isDef()) {
173      if (RegDef)
174        return 0;
175      RegDef = MO.getReg();
176    } else if (MO.getReg().isVirtual()) {
177      // This is another use of a vreg. Don't try to sink it.
178      return Register();
179    }
180  }
181  return RegDef;
182}
183
184void FastISel::flushLocalValueMap() {
185  // Try to sink local values down to their first use so that we can give them a
186  // better debug location. This has the side effect of shrinking local value
187  // live ranges, which helps out fast regalloc.
188  if (SinkLocalValues && LastLocalValue != EmitStartPt) {
189    // Sink local value materialization instructions between EmitStartPt and
190    // LastLocalValue. Visit them bottom-up, starting from LastLocalValue, to
191    // avoid inserting into the range that we're iterating over.
192    MachineBasicBlock::reverse_iterator RE =
193        EmitStartPt ? MachineBasicBlock::reverse_iterator(EmitStartPt)
194                    : FuncInfo.MBB->rend();
195    MachineBasicBlock::reverse_iterator RI(LastLocalValue);
196
197    InstOrderMap OrderMap;
198    for (; RI != RE;) {
199      MachineInstr &LocalMI = *RI;
200      ++RI;
201      bool Store = true;
202      if (!LocalMI.isSafeToMove(nullptr, Store))
203        continue;
204      Register DefReg = findSinkableLocalRegDef(LocalMI);
205      if (DefReg == 0)
206        continue;
207
208      sinkLocalValueMaterialization(LocalMI, DefReg, OrderMap);
209    }
210  }
211
212  LocalValueMap.clear();
213  LastLocalValue = EmitStartPt;
214  recomputeInsertPt();
215  SavedInsertPt = FuncInfo.InsertPt;
216  LastFlushPoint = FuncInfo.InsertPt;
217}
218
219static bool isRegUsedByPhiNodes(Register DefReg,
220                                FunctionLoweringInfo &FuncInfo) {
221  for (auto &P : FuncInfo.PHINodesToUpdate)
222    if (P.second == DefReg)
223      return true;
224  return false;
225}
226
227static bool isTerminatingEHLabel(MachineBasicBlock *MBB, MachineInstr &MI) {
228  // Ignore non-EH labels.
229  if (!MI.isEHLabel())
230    return false;
231
232  // Any EH label outside a landing pad must be for an invoke. Consider it a
233  // terminator.
234  if (!MBB->isEHPad())
235    return true;
236
237  // If this is a landingpad, the first non-phi instruction will be an EH_LABEL.
238  // Don't consider that label to be a terminator.
239  return MI.getIterator() != MBB->getFirstNonPHI();
240}
241
242/// Build a map of instruction orders. Return the first terminator and its
243/// order. Consider EH_LABEL instructions to be terminators as well, since local
244/// values for phis after invokes must be materialized before the call.
245void FastISel::InstOrderMap::initialize(
246    MachineBasicBlock *MBB, MachineBasicBlock::iterator LastFlushPoint) {
247  unsigned Order = 0;
248  for (MachineInstr &I : *MBB) {
249    if (!FirstTerminator &&
250        (I.isTerminator() || isTerminatingEHLabel(MBB, I))) {
251      FirstTerminator = &I;
252      FirstTerminatorOrder = Order;
253    }
254    Orders[&I] = Order++;
255
256    // We don't need to order instructions past the last flush point.
257    if (I.getIterator() == LastFlushPoint)
258      break;
259  }
260}
261
262void FastISel::sinkLocalValueMaterialization(MachineInstr &LocalMI,
263                                             Register DefReg,
264                                             InstOrderMap &OrderMap) {
265  // If this register is used by a register fixup, MRI will not contain all
266  // the uses until after register fixups, so don't attempt to sink or DCE
267  // this instruction. Register fixups typically come from no-op cast
268  // instructions, which replace the cast instruction vreg with the local
269  // value vreg.
270  if (FuncInfo.RegsWithFixups.count(DefReg))
271    return;
272
273  // We can DCE this instruction if there are no uses and it wasn't a
274  // materialized for a successor PHI node.
275  bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
276  if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
277    if (EmitStartPt == &LocalMI)
278      EmitStartPt = EmitStartPt->getPrevNode();
279    LLVM_DEBUG(dbgs() << "removing dead local value materialization "
280                      << LocalMI);
281    OrderMap.Orders.erase(&LocalMI);
282    LocalMI.eraseFromParent();
283    return;
284  }
285
286  // Number the instructions if we haven't yet so we can efficiently find the
287  // earliest use.
288  if (OrderMap.Orders.empty())
289    OrderMap.initialize(FuncInfo.MBB, LastFlushPoint);
290
291  // Find the first user in the BB.
292  MachineInstr *FirstUser = nullptr;
293  unsigned FirstOrder = std::numeric_limits<unsigned>::max();
294  for (MachineInstr &UseInst : MRI.use_nodbg_instructions(DefReg)) {
295    auto I = OrderMap.Orders.find(&UseInst);
296    assert(I != OrderMap.Orders.end() &&
297           "local value used by instruction outside local region");
298    unsigned UseOrder = I->second;
299    if (UseOrder < FirstOrder) {
300      FirstOrder = UseOrder;
301      FirstUser = &UseInst;
302    }
303  }
304
305  // The insertion point will be the first terminator or the first user,
306  // whichever came first. If there was no terminator, this must be a
307  // fallthrough block and the insertion point is the end of the block.
308  MachineBasicBlock::instr_iterator SinkPos;
309  if (UsedByPHI && OrderMap.FirstTerminatorOrder < FirstOrder) {
310    FirstOrder = OrderMap.FirstTerminatorOrder;
311    SinkPos = OrderMap.FirstTerminator->getIterator();
312  } else if (FirstUser) {
313    SinkPos = FirstUser->getIterator();
314  } else {
315    assert(UsedByPHI && "must be users if not used by a phi");
316    SinkPos = FuncInfo.MBB->instr_end();
317  }
318
319  // Collect all DBG_VALUEs before the new insertion position so that we can
320  // sink them.
321  SmallVector<MachineInstr *, 1> DbgValues;
322  for (MachineInstr &DbgVal : MRI.use_instructions(DefReg)) {
323    if (!DbgVal.isDebugValue())
324      continue;
325    unsigned UseOrder = OrderMap.Orders[&DbgVal];
326    if (UseOrder < FirstOrder)
327      DbgValues.push_back(&DbgVal);
328  }
329
330  // Sink LocalMI before SinkPos and assign it the same DebugLoc.
331  LLVM_DEBUG(dbgs() << "sinking local value to first use " << LocalMI);
332  FuncInfo.MBB->remove(&LocalMI);
333  FuncInfo.MBB->insert(SinkPos, &LocalMI);
334  if (SinkPos != FuncInfo.MBB->end())
335    LocalMI.setDebugLoc(SinkPos->getDebugLoc());
336
337  // Sink any debug values that we've collected.
338  for (MachineInstr *DI : DbgValues) {
339    FuncInfo.MBB->remove(DI);
340    FuncInfo.MBB->insert(SinkPos, DI);
341  }
342}
343
344bool FastISel::hasTrivialKill(const Value *V) {
345  // Don't consider constants or arguments to have trivial kills.
346  const Instruction *I = dyn_cast<Instruction>(V);
347  if (!I)
348    return false;
349
350  // No-op casts are trivially coalesced by fast-isel.
351  if (const auto *Cast = dyn_cast<CastInst>(I))
352    if (Cast->isNoopCast(DL) && !hasTrivialKill(Cast->getOperand(0)))
353      return false;
354
355  // Even the value might have only one use in the LLVM IR, it is possible that
356  // FastISel might fold the use into another instruction and now there is more
357  // than one use at the Machine Instruction level.
358  Register Reg = lookUpRegForValue(V);
359  if (Reg && !MRI.use_empty(Reg))
360    return false;
361
362  // GEPs with all zero indices are trivially coalesced by fast-isel.
363  if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
364    if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
365      return false;
366
367  // Only instructions with a single use in the same basic block are considered
368  // to have trivial kills.
369  return I->hasOneUse() &&
370         !(I->getOpcode() == Instruction::BitCast ||
371           I->getOpcode() == Instruction::PtrToInt ||
372           I->getOpcode() == Instruction::IntToPtr) &&
373         cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
374}
375
376Register FastISel::getRegForValue(const Value *V) {
377  EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
378  // Don't handle non-simple values in FastISel.
379  if (!RealVT.isSimple())
380    return Register();
381
382  // Ignore illegal types. We must do this before looking up the value
383  // in ValueMap because Arguments are given virtual registers regardless
384  // of whether FastISel can handle them.
385  MVT VT = RealVT.getSimpleVT();
386  if (!TLI.isTypeLegal(VT)) {
387    // Handle integer promotions, though, because they're common and easy.
388    if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
389      VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
390    else
391      return Register();
392  }
393
394  // Look up the value to see if we already have a register for it.
395  Register Reg = lookUpRegForValue(V);
396  if (Reg)
397    return Reg;
398
399  // In bottom-up mode, just create the virtual register which will be used
400  // to hold the value. It will be materialized later.
401  if (isa<Instruction>(V) &&
402      (!isa<AllocaInst>(V) ||
403       !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
404    return FuncInfo.InitializeRegForValue(V);
405
406  SavePoint SaveInsertPt = enterLocalValueArea();
407
408  // Materialize the value in a register. Emit any instructions in the
409  // local value area.
410  Reg = materializeRegForValue(V, VT);
411
412  leaveLocalValueArea(SaveInsertPt);
413
414  return Reg;
415}
416
417Register FastISel::materializeConstant(const Value *V, MVT VT) {
418  Register Reg;
419  if (const auto *CI = dyn_cast<ConstantInt>(V)) {
420    if (CI->getValue().getActiveBits() <= 64)
421      Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
422  } else if (isa<AllocaInst>(V))
423    Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
424  else if (isa<ConstantPointerNull>(V))
425    // Translate this as an integer zero so that it can be
426    // local-CSE'd with actual integer zeros.
427    Reg =
428        getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getType())));
429  else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
430    if (CF->isNullValue())
431      Reg = fastMaterializeFloatZero(CF);
432    else
433      // Try to emit the constant directly.
434      Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
435
436    if (!Reg) {
437      // Try to emit the constant by using an integer constant with a cast.
438      const APFloat &Flt = CF->getValueAPF();
439      EVT IntVT = TLI.getPointerTy(DL);
440      uint32_t IntBitWidth = IntVT.getSizeInBits();
441      APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
442      bool isExact;
443      (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
444      if (isExact) {
445        Register IntegerReg =
446            getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
447        if (IntegerReg)
448          Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
449                           /*Kill=*/false);
450      }
451    }
452  } else if (const auto *Op = dyn_cast<Operator>(V)) {
453    if (!selectOperator(Op, Op->getOpcode()))
454      if (!isa<Instruction>(Op) ||
455          !fastSelectInstruction(cast<Instruction>(Op)))
456        return 0;
457    Reg = lookUpRegForValue(Op);
458  } else if (isa<UndefValue>(V)) {
459    Reg = createResultReg(TLI.getRegClassFor(VT));
460    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
461            TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
462  }
463  return Reg;
464}
465
466/// Helper for getRegForValue. This function is called when the value isn't
467/// already available in a register and must be materialized with new
468/// instructions.
469Register FastISel::materializeRegForValue(const Value *V, MVT VT) {
470  Register Reg;
471  // Give the target-specific code a try first.
472  if (isa<Constant>(V))
473    Reg = fastMaterializeConstant(cast<Constant>(V));
474
475  // If target-specific code couldn't or didn't want to handle the value, then
476  // give target-independent code a try.
477  if (!Reg)
478    Reg = materializeConstant(V, VT);
479
480  // Don't cache constant materializations in the general ValueMap.
481  // To do so would require tracking what uses they dominate.
482  if (Reg) {
483    LocalValueMap[V] = Reg;
484    LastLocalValue = MRI.getVRegDef(Reg);
485  }
486  return Reg;
487}
488
489Register FastISel::lookUpRegForValue(const Value *V) {
490  // Look up the value to see if we already have a register for it. We
491  // cache values defined by Instructions across blocks, and other values
492  // only locally. This is because Instructions already have the SSA
493  // def-dominates-use requirement enforced.
494  DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(V);
495  if (I != FuncInfo.ValueMap.end())
496    return I->second;
497  return LocalValueMap[V];
498}
499
500void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) {
501  if (!isa<Instruction>(I)) {
502    LocalValueMap[I] = Reg;
503    return;
504  }
505
506  Register &AssignedReg = FuncInfo.ValueMap[I];
507  if (!AssignedReg)
508    // Use the new register.
509    AssignedReg = Reg;
510  else if (Reg != AssignedReg) {
511    // Arrange for uses of AssignedReg to be replaced by uses of Reg.
512    for (unsigned i = 0; i < NumRegs; i++) {
513      FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
514      FuncInfo.RegsWithFixups.insert(Reg + i);
515    }
516
517    AssignedReg = Reg;
518  }
519}
520
521std::pair<Register, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
522  Register IdxN = getRegForValue(Idx);
523  if (!IdxN)
524    // Unhandled operand. Halt "fast" selection and bail.
525    return std::pair<Register, bool>(Register(), false);
526
527  bool IdxNIsKill = hasTrivialKill(Idx);
528
529  // If the index is smaller or larger than intptr_t, truncate or extend it.
530  MVT PtrVT = TLI.getPointerTy(DL);
531  EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
532  if (IdxVT.bitsLT(PtrVT)) {
533    IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
534                      IdxNIsKill);
535    IdxNIsKill = true;
536  } else if (IdxVT.bitsGT(PtrVT)) {
537    IdxN =
538        fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
539    IdxNIsKill = true;
540  }
541  return std::pair<Register, bool>(IdxN, IdxNIsKill);
542}
543
544void FastISel::recomputeInsertPt() {
545  if (getLastLocalValue()) {
546    FuncInfo.InsertPt = getLastLocalValue();
547    FuncInfo.MBB = FuncInfo.InsertPt->getParent();
548    ++FuncInfo.InsertPt;
549  } else
550    FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
551
552  // Now skip past any EH_LABELs, which must remain at the beginning.
553  while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
554         FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
555    ++FuncInfo.InsertPt;
556}
557
558void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
559                              MachineBasicBlock::iterator E) {
560  assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
561         "Invalid iterator!");
562  while (I != E) {
563    if (LastFlushPoint == I)
564      LastFlushPoint = E;
565    if (SavedInsertPt == I)
566      SavedInsertPt = E;
567    if (EmitStartPt == I)
568      EmitStartPt = E.isValid() ? &*E : nullptr;
569    if (LastLocalValue == I)
570      LastLocalValue = E.isValid() ? &*E : nullptr;
571
572    MachineInstr *Dead = &*I;
573    ++I;
574    Dead->eraseFromParent();
575    ++NumFastIselDead;
576  }
577  recomputeInsertPt();
578}
579
580FastISel::SavePoint FastISel::enterLocalValueArea() {
581  MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
582  DebugLoc OldDL = DbgLoc;
583  recomputeInsertPt();
584  DbgLoc = DebugLoc();
585  SavePoint SP = {OldInsertPt, OldDL};
586  return SP;
587}
588
589void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
590  if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
591    LastLocalValue = &*std::prev(FuncInfo.InsertPt);
592
593  // Restore the previous insert position.
594  FuncInfo.InsertPt = OldInsertPt.InsertPt;
595  DbgLoc = OldInsertPt.DL;
596}
597
598bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
599  EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
600  if (VT == MVT::Other || !VT.isSimple())
601    // Unhandled type. Halt "fast" selection and bail.
602    return false;
603
604  // We only handle legal types. For example, on x86-32 the instruction
605  // selector contains all of the 64-bit instructions from x86-64,
606  // under the assumption that i64 won't be used if the target doesn't
607  // support it.
608  if (!TLI.isTypeLegal(VT)) {
609    // MVT::i1 is special. Allow AND, OR, or XOR because they
610    // don't require additional zeroing, which makes them easy.
611    if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
612                          ISDOpcode == ISD::XOR))
613      VT = TLI.getTypeToTransformTo(I->getContext(), VT);
614    else
615      return false;
616  }
617
618  // Check if the first operand is a constant, and handle it as "ri".  At -O0,
619  // we don't have anything that canonicalizes operand order.
620  if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
621    if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
622      Register Op1 = getRegForValue(I->getOperand(1));
623      if (!Op1)
624        return false;
625      bool Op1IsKill = hasTrivialKill(I->getOperand(1));
626
627      Register ResultReg =
628          fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
629                       CI->getZExtValue(), VT.getSimpleVT());
630      if (!ResultReg)
631        return false;
632
633      // We successfully emitted code for the given LLVM Instruction.
634      updateValueMap(I, ResultReg);
635      return true;
636    }
637
638  Register Op0 = getRegForValue(I->getOperand(0));
639  if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
640    return false;
641  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
642
643  // Check if the second operand is a constant and handle it appropriately.
644  if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
645    uint64_t Imm = CI->getSExtValue();
646
647    // Transform "sdiv exact X, 8" -> "sra X, 3".
648    if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
649        cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
650      Imm = Log2_64(Imm);
651      ISDOpcode = ISD::SRA;
652    }
653
654    // Transform "urem x, pow2" -> "and x, pow2-1".
655    if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
656        isPowerOf2_64(Imm)) {
657      --Imm;
658      ISDOpcode = ISD::AND;
659    }
660
661    Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
662                                      Op0IsKill, Imm, VT.getSimpleVT());
663    if (!ResultReg)
664      return false;
665
666    // We successfully emitted code for the given LLVM Instruction.
667    updateValueMap(I, ResultReg);
668    return true;
669  }
670
671  Register Op1 = getRegForValue(I->getOperand(1));
672  if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
673    return false;
674  bool Op1IsKill = hasTrivialKill(I->getOperand(1));
675
676  // Now we have both operands in registers. Emit the instruction.
677  Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
678                                   ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
679  if (!ResultReg)
680    // Target-specific code wasn't able to find a machine opcode for
681    // the given ISD opcode and type. Halt "fast" selection and bail.
682    return false;
683
684  // We successfully emitted code for the given LLVM Instruction.
685  updateValueMap(I, ResultReg);
686  return true;
687}
688
689bool FastISel::selectGetElementPtr(const User *I) {
690  Register N = getRegForValue(I->getOperand(0));
691  if (!N) // Unhandled operand. Halt "fast" selection and bail.
692    return false;
693
694  // FIXME: The code below does not handle vector GEPs. Halt "fast" selection
695  // and bail.
696  if (isa<VectorType>(I->getType()))
697    return false;
698
699  bool NIsKill = hasTrivialKill(I->getOperand(0));
700
701  // Keep a running tab of the total offset to coalesce multiple N = N + Offset
702  // into a single N = N + TotalOffset.
703  uint64_t TotalOffs = 0;
704  // FIXME: What's a good SWAG number for MaxOffs?
705  uint64_t MaxOffs = 2048;
706  MVT VT = TLI.getPointerTy(DL);
707  for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
708       GTI != E; ++GTI) {
709    const Value *Idx = GTI.getOperand();
710    if (StructType *StTy = GTI.getStructTypeOrNull()) {
711      uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
712      if (Field) {
713        // N = N + Offset
714        TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
715        if (TotalOffs >= MaxOffs) {
716          N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
717          if (!N) // Unhandled operand. Halt "fast" selection and bail.
718            return false;
719          NIsKill = true;
720          TotalOffs = 0;
721        }
722      }
723    } else {
724      Type *Ty = GTI.getIndexedType();
725
726      // If this is a constant subscript, handle it quickly.
727      if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
728        if (CI->isZero())
729          continue;
730        // N = N + Offset
731        uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
732        TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
733        if (TotalOffs >= MaxOffs) {
734          N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
735          if (!N) // Unhandled operand. Halt "fast" selection and bail.
736            return false;
737          NIsKill = true;
738          TotalOffs = 0;
739        }
740        continue;
741      }
742      if (TotalOffs) {
743        N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
744        if (!N) // Unhandled operand. Halt "fast" selection and bail.
745          return false;
746        NIsKill = true;
747        TotalOffs = 0;
748      }
749
750      // N = N + Idx * ElementSize;
751      uint64_t ElementSize = DL.getTypeAllocSize(Ty);
752      std::pair<Register, bool> Pair = getRegForGEPIndex(Idx);
753      Register IdxN = Pair.first;
754      bool IdxNIsKill = Pair.second;
755      if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
756        return false;
757
758      if (ElementSize != 1) {
759        IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
760        if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
761          return false;
762        IdxNIsKill = true;
763      }
764      N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
765      if (!N) // Unhandled operand. Halt "fast" selection and bail.
766        return false;
767    }
768  }
769  if (TotalOffs) {
770    N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
771    if (!N) // Unhandled operand. Halt "fast" selection and bail.
772      return false;
773  }
774
775  // We successfully emitted code for the given LLVM Instruction.
776  updateValueMap(I, N);
777  return true;
778}
779
780bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
781                                   const CallInst *CI, unsigned StartIdx) {
782  for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
783    Value *Val = CI->getArgOperand(i);
784    // Check for constants and encode them with a StackMaps::ConstantOp prefix.
785    if (const auto *C = dyn_cast<ConstantInt>(Val)) {
786      Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
787      Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
788    } else if (isa<ConstantPointerNull>(Val)) {
789      Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
790      Ops.push_back(MachineOperand::CreateImm(0));
791    } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
792      // Values coming from a stack location also require a special encoding,
793      // but that is added later on by the target specific frame index
794      // elimination implementation.
795      auto SI = FuncInfo.StaticAllocaMap.find(AI);
796      if (SI != FuncInfo.StaticAllocaMap.end())
797        Ops.push_back(MachineOperand::CreateFI(SI->second));
798      else
799        return false;
800    } else {
801      Register Reg = getRegForValue(Val);
802      if (!Reg)
803        return false;
804      Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
805    }
806  }
807  return true;
808}
809
810bool FastISel::selectStackmap(const CallInst *I) {
811  // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
812  //                                  [live variables...])
813  assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
814         "Stackmap cannot return a value.");
815
816  // The stackmap intrinsic only records the live variables (the arguments
817  // passed to it) and emits NOPS (if requested). Unlike the patchpoint
818  // intrinsic, this won't be lowered to a function call. This means we don't
819  // have to worry about calling conventions and target-specific lowering code.
820  // Instead we perform the call lowering right here.
821  //
822  // CALLSEQ_START(0, 0...)
823  // STACKMAP(id, nbytes, ...)
824  // CALLSEQ_END(0, 0)
825  //
826  SmallVector<MachineOperand, 32> Ops;
827
828  // Add the <id> and <numBytes> constants.
829  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
830         "Expected a constant integer.");
831  const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
832  Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
833
834  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
835         "Expected a constant integer.");
836  const auto *NumBytes =
837      cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
838  Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
839
840  // Push live variables for the stack map (skipping the first two arguments
841  // <id> and <numBytes>).
842  if (!addStackMapLiveVars(Ops, I, 2))
843    return false;
844
845  // We are not adding any register mask info here, because the stackmap doesn't
846  // clobber anything.
847
848  // Add scratch registers as implicit def and early clobber.
849  CallingConv::ID CC = I->getCallingConv();
850  const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
851  for (unsigned i = 0; ScratchRegs[i]; ++i)
852    Ops.push_back(MachineOperand::CreateReg(
853        ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
854        /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
855
856  // Issue CALLSEQ_START
857  unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
858  auto Builder =
859      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
860  const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
861  for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
862    Builder.addImm(0);
863
864  // Issue STACKMAP.
865  MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
866                                    TII.get(TargetOpcode::STACKMAP));
867  for (auto const &MO : Ops)
868    MIB.add(MO);
869
870  // Issue CALLSEQ_END
871  unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
872  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
873      .addImm(0)
874      .addImm(0);
875
876  // Inform the Frame Information that we have a stackmap in this function.
877  FuncInfo.MF->getFrameInfo().setHasStackMap();
878
879  return true;
880}
881
882/// Lower an argument list according to the target calling convention.
883///
884/// This is a helper for lowering intrinsics that follow a target calling
885/// convention or require stack pointer adjustment. Only a subset of the
886/// intrinsic's operands need to participate in the calling convention.
887bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
888                                 unsigned NumArgs, const Value *Callee,
889                                 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
890  ArgListTy Args;
891  Args.reserve(NumArgs);
892
893  // Populate the argument list.
894  for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
895    Value *V = CI->getOperand(ArgI);
896
897    assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
898
899    ArgListEntry Entry;
900    Entry.Val = V;
901    Entry.Ty = V->getType();
902    Entry.setAttributes(CI, ArgI);
903    Args.push_back(Entry);
904  }
905
906  Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
907                               : CI->getType();
908  CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
909
910  return lowerCallTo(CLI);
911}
912
913FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee(
914    const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
915    StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
916  SmallString<32> MangledName;
917  Mangler::getNameWithPrefix(MangledName, Target, DL);
918  MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
919  return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
920}
921
922bool FastISel::selectPatchpoint(const CallInst *I) {
923  // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
924  //                                                 i32 <numBytes>,
925  //                                                 i8* <target>,
926  //                                                 i32 <numArgs>,
927  //                                                 [Args...],
928  //                                                 [live variables...])
929  CallingConv::ID CC = I->getCallingConv();
930  bool IsAnyRegCC = CC == CallingConv::AnyReg;
931  bool HasDef = !I->getType()->isVoidTy();
932  Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
933
934  // Get the real number of arguments participating in the call <numArgs>
935  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
936         "Expected a constant integer.");
937  const auto *NumArgsVal =
938      cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
939  unsigned NumArgs = NumArgsVal->getZExtValue();
940
941  // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
942  // This includes all meta-operands up to but not including CC.
943  unsigned NumMetaOpers = PatchPointOpers::CCPos;
944  assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
945         "Not enough arguments provided to the patchpoint intrinsic");
946
947  // For AnyRegCC the arguments are lowered later on manually.
948  unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
949  CallLoweringInfo CLI;
950  CLI.setIsPatchPoint();
951  if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
952    return false;
953
954  assert(CLI.Call && "No call instruction specified.");
955
956  SmallVector<MachineOperand, 32> Ops;
957
958  // Add an explicit result reg if we use the anyreg calling convention.
959  if (IsAnyRegCC && HasDef) {
960    assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
961    CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
962    CLI.NumResultRegs = 1;
963    Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true));
964  }
965
966  // Add the <id> and <numBytes> constants.
967  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
968         "Expected a constant integer.");
969  const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
970  Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
971
972  assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
973         "Expected a constant integer.");
974  const auto *NumBytes =
975      cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
976  Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
977
978  // Add the call target.
979  if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
980    uint64_t CalleeConstAddr =
981      cast<ConstantInt>(C->getOperand(0))->getZExtValue();
982    Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
983  } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
984    if (C->getOpcode() == Instruction::IntToPtr) {
985      uint64_t CalleeConstAddr =
986        cast<ConstantInt>(C->getOperand(0))->getZExtValue();
987      Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
988    } else
989      llvm_unreachable("Unsupported ConstantExpr.");
990  } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
991    Ops.push_back(MachineOperand::CreateGA(GV, 0));
992  } else if (isa<ConstantPointerNull>(Callee))
993    Ops.push_back(MachineOperand::CreateImm(0));
994  else
995    llvm_unreachable("Unsupported callee address.");
996
997  // Adjust <numArgs> to account for any arguments that have been passed on
998  // the stack instead.
999  unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
1000  Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
1001
1002  // Add the calling convention
1003  Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
1004
1005  // Add the arguments we omitted previously. The register allocator should
1006  // place these in any free register.
1007  if (IsAnyRegCC) {
1008    for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
1009      Register Reg = getRegForValue(I->getArgOperand(i));
1010      if (!Reg)
1011        return false;
1012      Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
1013    }
1014  }
1015
1016  // Push the arguments from the call instruction.
1017  for (auto Reg : CLI.OutRegs)
1018    Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
1019
1020  // Push live variables for the stack map.
1021  if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
1022    return false;
1023
1024  // Push the register mask info.
1025  Ops.push_back(MachineOperand::CreateRegMask(
1026      TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
1027
1028  // Add scratch registers as implicit def and early clobber.
1029  const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
1030  for (unsigned i = 0; ScratchRegs[i]; ++i)
1031    Ops.push_back(MachineOperand::CreateReg(
1032        ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
1033        /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
1034
1035  // Add implicit defs (return values).
1036  for (auto Reg : CLI.InRegs)
1037    Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true,
1038                                            /*isImp=*/true));
1039
1040  // Insert the patchpoint instruction before the call generated by the target.
1041  MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
1042                                    TII.get(TargetOpcode::PATCHPOINT));
1043
1044  for (auto &MO : Ops)
1045    MIB.add(MO);
1046
1047  MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1048
1049  // Delete the original call instruction.
1050  CLI.Call->eraseFromParent();
1051
1052  // Inform the Frame Information that we have a patchpoint in this function.
1053  FuncInfo.MF->getFrameInfo().setHasPatchPoint();
1054
1055  if (CLI.NumResultRegs)
1056    updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
1057  return true;
1058}
1059
1060bool FastISel::selectXRayCustomEvent(const CallInst *I) {
1061  const auto &Triple = TM.getTargetTriple();
1062  if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
1063    return true; // don't do anything to this instruction.
1064  SmallVector<MachineOperand, 8> Ops;
1065  Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
1066                                          /*isDef=*/false));
1067  Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
1068                                          /*isDef=*/false));
1069  MachineInstrBuilder MIB =
1070      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1071              TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
1072  for (auto &MO : Ops)
1073    MIB.add(MO);
1074
1075  // Insert the Patchable Event Call instruction, that gets lowered properly.
1076  return true;
1077}
1078
1079bool FastISel::selectXRayTypedEvent(const CallInst *I) {
1080  const auto &Triple = TM.getTargetTriple();
1081  if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
1082    return true; // don't do anything to this instruction.
1083  SmallVector<MachineOperand, 8> Ops;
1084  Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
1085                                          /*isDef=*/false));
1086  Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
1087                                          /*isDef=*/false));
1088  Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)),
1089                                          /*isDef=*/false));
1090  MachineInstrBuilder MIB =
1091      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1092              TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));
1093  for (auto &MO : Ops)
1094    MIB.add(MO);
1095
1096  // Insert the Patchable Typed Event Call instruction, that gets lowered properly.
1097  return true;
1098}
1099
1100/// Returns an AttributeList representing the attributes applied to the return
1101/// value of the given call.
1102static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
1103  SmallVector<Attribute::AttrKind, 2> Attrs;
1104  if (CLI.RetSExt)
1105    Attrs.push_back(Attribute::SExt);
1106  if (CLI.RetZExt)
1107    Attrs.push_back(Attribute::ZExt);
1108  if (CLI.IsInReg)
1109    Attrs.push_back(Attribute::InReg);
1110
1111  return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
1112                            Attrs);
1113}
1114
1115bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
1116                           unsigned NumArgs) {
1117  MCContext &Ctx = MF->getContext();
1118  SmallString<32> MangledName;
1119  Mangler::getNameWithPrefix(MangledName, SymName, DL);
1120  MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
1121  return lowerCallTo(CI, Sym, NumArgs);
1122}
1123
1124bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol,
1125                           unsigned NumArgs) {
1126  FunctionType *FTy = CI->getFunctionType();
1127  Type *RetTy = CI->getType();
1128
1129  ArgListTy Args;
1130  Args.reserve(NumArgs);
1131
1132  // Populate the argument list.
1133  // Attributes for args start at offset 1, after the return attribute.
1134  for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
1135    Value *V = CI->getOperand(ArgI);
1136
1137    assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
1138
1139    ArgListEntry Entry;
1140    Entry.Val = V;
1141    Entry.Ty = V->getType();
1142    Entry.setAttributes(CI, ArgI);
1143    Args.push_back(Entry);
1144  }
1145  TLI.markLibCallAttributes(MF, CI->getCallingConv(), Args);
1146
1147  CallLoweringInfo CLI;
1148  CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), *CI, NumArgs);
1149
1150  return lowerCallTo(CLI);
1151}
1152
1153bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
1154  // Handle the incoming return values from the call.
1155  CLI.clearIns();
1156  SmallVector<EVT, 4> RetTys;
1157  ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
1158
1159  SmallVector<ISD::OutputArg, 4> Outs;
1160  GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
1161
1162  bool CanLowerReturn = TLI.CanLowerReturn(
1163      CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
1164
1165  // FIXME: sret demotion isn't supported yet - bail out.
1166  if (!CanLowerReturn)
1167    return false;
1168
1169  for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
1170    EVT VT = RetTys[I];
1171    MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
1172    unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
1173    for (unsigned i = 0; i != NumRegs; ++i) {
1174      ISD::InputArg MyFlags;
1175      MyFlags.VT = RegisterVT;
1176      MyFlags.ArgVT = VT;
1177      MyFlags.Used = CLI.IsReturnValueUsed;
1178      if (CLI.RetSExt)
1179        MyFlags.Flags.setSExt();
1180      if (CLI.RetZExt)
1181        MyFlags.Flags.setZExt();
1182      if (CLI.IsInReg)
1183        MyFlags.Flags.setInReg();
1184      CLI.Ins.push_back(MyFlags);
1185    }
1186  }
1187
1188  // Handle all of the outgoing arguments.
1189  CLI.clearOuts();
1190  for (auto &Arg : CLI.getArgs()) {
1191    Type *FinalType = Arg.Ty;
1192    if (Arg.IsByVal)
1193      FinalType = cast<PointerType>(Arg.Ty)->getElementType();
1194    bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1195        FinalType, CLI.CallConv, CLI.IsVarArg);
1196
1197    ISD::ArgFlagsTy Flags;
1198    if (Arg.IsZExt)
1199      Flags.setZExt();
1200    if (Arg.IsSExt)
1201      Flags.setSExt();
1202    if (Arg.IsInReg)
1203      Flags.setInReg();
1204    if (Arg.IsSRet)
1205      Flags.setSRet();
1206    if (Arg.IsSwiftSelf)
1207      Flags.setSwiftSelf();
1208    if (Arg.IsSwiftError)
1209      Flags.setSwiftError();
1210    if (Arg.IsCFGuardTarget)
1211      Flags.setCFGuardTarget();
1212    if (Arg.IsByVal)
1213      Flags.setByVal();
1214    if (Arg.IsInAlloca) {
1215      Flags.setInAlloca();
1216      // Set the byval flag for CCAssignFn callbacks that don't know about
1217      // inalloca. This way we can know how many bytes we should've allocated
1218      // and how many bytes a callee cleanup function will pop.  If we port
1219      // inalloca to more targets, we'll have to add custom inalloca handling in
1220      // the various CC lowering callbacks.
1221      Flags.setByVal();
1222    }
1223    if (Arg.IsPreallocated) {
1224      Flags.setPreallocated();
1225      // Set the byval flag for CCAssignFn callbacks that don't know about
1226      // preallocated. This way we can know how many bytes we should've
1227      // allocated and how many bytes a callee cleanup function will pop.  If we
1228      // port preallocated to more targets, we'll have to add custom
1229      // preallocated handling in the various CC lowering callbacks.
1230      Flags.setByVal();
1231    }
1232    if (Arg.IsByVal || Arg.IsInAlloca || Arg.IsPreallocated) {
1233      PointerType *Ty = cast<PointerType>(Arg.Ty);
1234      Type *ElementTy = Ty->getElementType();
1235      unsigned FrameSize =
1236          DL.getTypeAllocSize(Arg.ByValType ? Arg.ByValType : ElementTy);
1237
1238      // For ByVal, alignment should come from FE. BE will guess if this info
1239      // is not there, but there are cases it cannot get right.
1240      MaybeAlign FrameAlign = Arg.Alignment;
1241      if (!FrameAlign)
1242        FrameAlign = Align(TLI.getByValTypeAlignment(ElementTy, DL));
1243      Flags.setByValSize(FrameSize);
1244      Flags.setByValAlign(*FrameAlign);
1245    }
1246    if (Arg.IsNest)
1247      Flags.setNest();
1248    if (NeedsRegBlock)
1249      Flags.setInConsecutiveRegs();
1250    Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
1251
1252    CLI.OutVals.push_back(Arg.Val);
1253    CLI.OutFlags.push_back(Flags);
1254  }
1255
1256  if (!fastLowerCall(CLI))
1257    return false;
1258
1259  // Set all unused physreg defs as dead.
1260  assert(CLI.Call && "No call instruction specified.");
1261  CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1262
1263  if (CLI.NumResultRegs && CLI.CB)
1264    updateValueMap(CLI.CB, CLI.ResultReg, CLI.NumResultRegs);
1265
1266  // Set labels for heapallocsite call.
1267  if (CLI.CB)
1268    if (MDNode *MD = CLI.CB->getMetadata("heapallocsite"))
1269      CLI.Call->setHeapAllocMarker(*MF, MD);
1270
1271  return true;
1272}
1273
1274bool FastISel::lowerCall(const CallInst *CI) {
1275  FunctionType *FuncTy = CI->getFunctionType();
1276  Type *RetTy = CI->getType();
1277
1278  ArgListTy Args;
1279  ArgListEntry Entry;
1280  Args.reserve(CI->arg_size());
1281
1282  for (auto i = CI->arg_begin(), e = CI->arg_end(); i != e; ++i) {
1283    Value *V = *i;
1284
1285    // Skip empty types
1286    if (V->getType()->isEmptyTy())
1287      continue;
1288
1289    Entry.Val = V;
1290    Entry.Ty = V->getType();
1291
1292    // Skip the first return-type Attribute to get to params.
1293    Entry.setAttributes(CI, i - CI->arg_begin());
1294    Args.push_back(Entry);
1295  }
1296
1297  // Check if target-independent constraints permit a tail call here.
1298  // Target-dependent constraints are checked within fastLowerCall.
1299  bool IsTailCall = CI->isTailCall();
1300  if (IsTailCall && !isInTailCallPosition(*CI, TM))
1301    IsTailCall = false;
1302  if (IsTailCall && MF->getFunction()
1303                            .getFnAttribute("disable-tail-calls")
1304                            .getValueAsString() == "true")
1305    IsTailCall = false;
1306
1307  CallLoweringInfo CLI;
1308  CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI)
1309      .setTailCall(IsTailCall);
1310
1311  return lowerCallTo(CLI);
1312}
1313
1314bool FastISel::selectCall(const User *I) {
1315  const CallInst *Call = cast<CallInst>(I);
1316
1317  // Handle simple inline asms.
1318  if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) {
1319    // If the inline asm has side effects, then make sure that no local value
1320    // lives across by flushing the local value map.
1321    if (IA->hasSideEffects())
1322      flushLocalValueMap();
1323
1324    // Don't attempt to handle constraints.
1325    if (!IA->getConstraintString().empty())
1326      return false;
1327
1328    unsigned ExtraInfo = 0;
1329    if (IA->hasSideEffects())
1330      ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1331    if (IA->isAlignStack())
1332      ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1333    if (Call->isConvergent())
1334      ExtraInfo |= InlineAsm::Extra_IsConvergent;
1335    ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
1336
1337    MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1338                                      TII.get(TargetOpcode::INLINEASM));
1339    MIB.addExternalSymbol(IA->getAsmString().c_str());
1340    MIB.addImm(ExtraInfo);
1341
1342    const MDNode *SrcLoc = Call->getMetadata("srcloc");
1343    if (SrcLoc)
1344      MIB.addMetadata(SrcLoc);
1345
1346    return true;
1347  }
1348
1349  // Handle intrinsic function calls.
1350  if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1351    return selectIntrinsicCall(II);
1352
1353  // Usually, it does not make sense to initialize a value,
1354  // make an unrelated function call and use the value, because
1355  // it tends to be spilled on the stack. So, we move the pointer
1356  // to the last local value to the beginning of the block, so that
1357  // all the values which have already been materialized,
1358  // appear after the call. It also makes sense to skip intrinsics
1359  // since they tend to be inlined.
1360  flushLocalValueMap();
1361
1362  return lowerCall(Call);
1363}
1364
1365bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
1366  switch (II->getIntrinsicID()) {
1367  default:
1368    break;
1369  // At -O0 we don't care about the lifetime intrinsics.
1370  case Intrinsic::lifetime_start:
1371  case Intrinsic::lifetime_end:
1372  // The donothing intrinsic does, well, nothing.
1373  case Intrinsic::donothing:
1374  // Neither does the sideeffect intrinsic.
1375  case Intrinsic::sideeffect:
1376  // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1377  case Intrinsic::assume:
1378    return true;
1379  case Intrinsic::dbg_declare: {
1380    const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1381    assert(DI->getVariable() && "Missing variable");
1382    if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1383      LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1384                        << " (!hasDebugInfo)\n");
1385      return true;
1386    }
1387
1388    const Value *Address = DI->getAddress();
1389    if (!Address || isa<UndefValue>(Address)) {
1390      LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1391                        << " (bad/undef address)\n");
1392      return true;
1393    }
1394
1395    // Byval arguments with frame indices were already handled after argument
1396    // lowering and before isel.
1397    const auto *Arg =
1398        dyn_cast<Argument>(Address->stripInBoundsConstantOffsets());
1399    if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX)
1400      return true;
1401
1402    Optional<MachineOperand> Op;
1403    if (Register Reg = lookUpRegForValue(Address))
1404      Op = MachineOperand::CreateReg(Reg, false);
1405
1406    // If we have a VLA that has a "use" in a metadata node that's then used
1407    // here but it has no other uses, then we have a problem. E.g.,
1408    //
1409    //   int foo (const int *x) {
1410    //     char a[*x];
1411    //     return 0;
1412    //   }
1413    //
1414    // If we assign 'a' a vreg and fast isel later on has to use the selection
1415    // DAG isel, it will want to copy the value to the vreg. However, there are
1416    // no uses, which goes counter to what selection DAG isel expects.
1417    if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1418        (!isa<AllocaInst>(Address) ||
1419         !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1420      Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1421                                     false);
1422
1423    if (Op) {
1424      assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1425             "Expected inlined-at fields to agree");
1426      // A dbg.declare describes the address of a source variable, so lower it
1427      // into an indirect DBG_VALUE.
1428      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1429              TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true,
1430              *Op, DI->getVariable(), DI->getExpression());
1431    } else {
1432      // We can't yet handle anything else here because it would require
1433      // generating code, thus altering codegen because of debug info.
1434      LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1435                        << " (no materialized reg for address)\n");
1436    }
1437    return true;
1438  }
1439  case Intrinsic::dbg_value: {
1440    // This form of DBG_VALUE is target-independent.
1441    const DbgValueInst *DI = cast<DbgValueInst>(II);
1442    const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1443    const Value *V = DI->getValue();
1444    assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1445           "Expected inlined-at fields to agree");
1446    if (!V || isa<UndefValue>(V)) {
1447      // Currently the optimizer can produce this; insert an undef to
1448      // help debugging.
1449      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, false, 0U,
1450              DI->getVariable(), DI->getExpression());
1451    } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1452      if (CI->getBitWidth() > 64)
1453        BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1454            .addCImm(CI)
1455            .addImm(0U)
1456            .addMetadata(DI->getVariable())
1457            .addMetadata(DI->getExpression());
1458      else
1459        BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1460            .addImm(CI->getZExtValue())
1461            .addImm(0U)
1462            .addMetadata(DI->getVariable())
1463            .addMetadata(DI->getExpression());
1464    } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1465      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1466          .addFPImm(CF)
1467          .addImm(0U)
1468          .addMetadata(DI->getVariable())
1469          .addMetadata(DI->getExpression());
1470    } else if (Register Reg = lookUpRegForValue(V)) {
1471      // FIXME: This does not handle register-indirect values at offset 0.
1472      bool IsIndirect = false;
1473      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1474              DI->getVariable(), DI->getExpression());
1475    } else {
1476      // We don't know how to handle other cases, so we drop.
1477      LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1478    }
1479    return true;
1480  }
1481  case Intrinsic::dbg_label: {
1482    const DbgLabelInst *DI = cast<DbgLabelInst>(II);
1483    assert(DI->getLabel() && "Missing label");
1484    if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1485      LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1486      return true;
1487    }
1488
1489    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1490            TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel());
1491    return true;
1492  }
1493  case Intrinsic::objectsize:
1494    llvm_unreachable("llvm.objectsize.* should have been lowered already");
1495
1496  case Intrinsic::is_constant:
1497    llvm_unreachable("llvm.is.constant.* should have been lowered already");
1498
1499  case Intrinsic::launder_invariant_group:
1500  case Intrinsic::strip_invariant_group:
1501  case Intrinsic::expect: {
1502    Register ResultReg = getRegForValue(II->getArgOperand(0));
1503    if (!ResultReg)
1504      return false;
1505    updateValueMap(II, ResultReg);
1506    return true;
1507  }
1508  case Intrinsic::experimental_stackmap:
1509    return selectStackmap(II);
1510  case Intrinsic::experimental_patchpoint_void:
1511  case Intrinsic::experimental_patchpoint_i64:
1512    return selectPatchpoint(II);
1513
1514  case Intrinsic::xray_customevent:
1515    return selectXRayCustomEvent(II);
1516  case Intrinsic::xray_typedevent:
1517    return selectXRayTypedEvent(II);
1518  }
1519
1520  return fastLowerIntrinsicCall(II);
1521}
1522
1523bool FastISel::selectCast(const User *I, unsigned Opcode) {
1524  EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1525  EVT DstVT = TLI.getValueType(DL, I->getType());
1526
1527  if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1528      !DstVT.isSimple())
1529    // Unhandled type. Halt "fast" selection and bail.
1530    return false;
1531
1532  // Check if the destination type is legal.
1533  if (!TLI.isTypeLegal(DstVT))
1534    return false;
1535
1536  // Check if the source operand is legal.
1537  if (!TLI.isTypeLegal(SrcVT))
1538    return false;
1539
1540  Register InputReg = getRegForValue(I->getOperand(0));
1541  if (!InputReg)
1542    // Unhandled operand.  Halt "fast" selection and bail.
1543    return false;
1544
1545  bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1546
1547  Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1548                                  Opcode, InputReg, InputRegIsKill);
1549  if (!ResultReg)
1550    return false;
1551
1552  updateValueMap(I, ResultReg);
1553  return true;
1554}
1555
1556bool FastISel::selectBitCast(const User *I) {
1557  // If the bitcast doesn't change the type, just use the operand value.
1558  if (I->getType() == I->getOperand(0)->getType()) {
1559    Register Reg = getRegForValue(I->getOperand(0));
1560    if (!Reg)
1561      return false;
1562    updateValueMap(I, Reg);
1563    return true;
1564  }
1565
1566  // Bitcasts of other values become reg-reg copies or BITCAST operators.
1567  EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1568  EVT DstEVT = TLI.getValueType(DL, I->getType());
1569  if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1570      !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1571    // Unhandled type. Halt "fast" selection and bail.
1572    return false;
1573
1574  MVT SrcVT = SrcEVT.getSimpleVT();
1575  MVT DstVT = DstEVT.getSimpleVT();
1576  Register Op0 = getRegForValue(I->getOperand(0));
1577  if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1578    return false;
1579  bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1580
1581  // First, try to perform the bitcast by inserting a reg-reg copy.
1582  Register ResultReg;
1583  if (SrcVT == DstVT) {
1584    const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1585    const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1586    // Don't attempt a cross-class copy. It will likely fail.
1587    if (SrcClass == DstClass) {
1588      ResultReg = createResultReg(DstClass);
1589      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1590              TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1591    }
1592  }
1593
1594  // If the reg-reg copy failed, select a BITCAST opcode.
1595  if (!ResultReg)
1596    ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1597
1598  if (!ResultReg)
1599    return false;
1600
1601  updateValueMap(I, ResultReg);
1602  return true;
1603}
1604
1605bool FastISel::selectFreeze(const User *I) {
1606  Register Reg = getRegForValue(I->getOperand(0));
1607  if (!Reg)
1608    // Unhandled operand.
1609    return false;
1610
1611  EVT ETy = TLI.getValueType(DL, I->getOperand(0)->getType());
1612  if (ETy == MVT::Other || !TLI.isTypeLegal(ETy))
1613    // Unhandled type, bail out.
1614    return false;
1615
1616  MVT Ty = ETy.getSimpleVT();
1617  const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty);
1618  Register ResultReg = createResultReg(TyRegClass);
1619  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1620          TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg);
1621
1622  updateValueMap(I, ResultReg);
1623  return true;
1624}
1625
1626// Remove local value instructions starting from the instruction after
1627// SavedLastLocalValue to the current function insert point.
1628void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1629{
1630  MachineInstr *CurLastLocalValue = getLastLocalValue();
1631  if (CurLastLocalValue != SavedLastLocalValue) {
1632    // Find the first local value instruction to be deleted.
1633    // This is the instruction after SavedLastLocalValue if it is non-NULL.
1634    // Otherwise it's the first instruction in the block.
1635    MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1636    if (SavedLastLocalValue)
1637      ++FirstDeadInst;
1638    else
1639      FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1640    setLastLocalValue(SavedLastLocalValue);
1641    removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1642  }
1643}
1644
1645bool FastISel::selectInstruction(const Instruction *I) {
1646  MachineInstr *SavedLastLocalValue = getLastLocalValue();
1647  // Just before the terminator instruction, insert instructions to
1648  // feed PHI nodes in successor blocks.
1649  if (I->isTerminator()) {
1650    if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1651      // PHI node handling may have generated local value instructions,
1652      // even though it failed to handle all PHI nodes.
1653      // We remove these instructions because SelectionDAGISel will generate
1654      // them again.
1655      removeDeadLocalValueCode(SavedLastLocalValue);
1656      return false;
1657    }
1658  }
1659
1660  // FastISel does not handle any operand bundles except OB_funclet.
1661  if (auto *Call = dyn_cast<CallBase>(I))
1662    for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i)
1663      if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1664        return false;
1665
1666  DbgLoc = I->getDebugLoc();
1667
1668  SavedInsertPt = FuncInfo.InsertPt;
1669
1670  if (const auto *Call = dyn_cast<CallInst>(I)) {
1671    const Function *F = Call->getCalledFunction();
1672    LibFunc Func;
1673
1674    // As a special case, don't handle calls to builtin library functions that
1675    // may be translated directly to target instructions.
1676    if (F && !F->hasLocalLinkage() && F->hasName() &&
1677        LibInfo->getLibFunc(F->getName(), Func) &&
1678        LibInfo->hasOptimizedCodeGen(Func))
1679      return false;
1680
1681    // Don't handle Intrinsic::trap if a trap function is specified.
1682    if (F && F->getIntrinsicID() == Intrinsic::trap &&
1683        Call->hasFnAttr("trap-func-name"))
1684      return false;
1685  }
1686
1687  // First, try doing target-independent selection.
1688  if (!SkipTargetIndependentISel) {
1689    if (selectOperator(I, I->getOpcode())) {
1690      ++NumFastIselSuccessIndependent;
1691      DbgLoc = DebugLoc();
1692      return true;
1693    }
1694    // Remove dead code.
1695    recomputeInsertPt();
1696    if (SavedInsertPt != FuncInfo.InsertPt)
1697      removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1698    SavedInsertPt = FuncInfo.InsertPt;
1699  }
1700  // Next, try calling the target to attempt to handle the instruction.
1701  if (fastSelectInstruction(I)) {
1702    ++NumFastIselSuccessTarget;
1703    DbgLoc = DebugLoc();
1704    return true;
1705  }
1706  // Remove dead code.
1707  recomputeInsertPt();
1708  if (SavedInsertPt != FuncInfo.InsertPt)
1709    removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1710
1711  DbgLoc = DebugLoc();
1712  // Undo phi node updates, because they will be added again by SelectionDAG.
1713  if (I->isTerminator()) {
1714    // PHI node handling may have generated local value instructions.
1715    // We remove them because SelectionDAGISel will generate them again.
1716    removeDeadLocalValueCode(SavedLastLocalValue);
1717    FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1718  }
1719  return false;
1720}
1721
1722/// Emit an unconditional branch to the given block, unless it is the immediate
1723/// (fall-through) successor, and update the CFG.
1724void FastISel::fastEmitBranch(MachineBasicBlock *MSucc,
1725                              const DebugLoc &DbgLoc) {
1726  if (FuncInfo.MBB->getBasicBlock()->sizeWithoutDebug() > 1 &&
1727      FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1728    // For more accurate line information if this is the only non-debug
1729    // instruction in the block then emit it, otherwise we have the
1730    // unconditional fall-through case, which needs no instructions.
1731  } else {
1732    // The unconditional branch case.
1733    TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1734                     SmallVector<MachineOperand, 0>(), DbgLoc);
1735  }
1736  if (FuncInfo.BPI) {
1737    auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
1738        FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1739    FuncInfo.MBB->addSuccessor(MSucc, BranchProbability);
1740  } else
1741    FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
1742}
1743
1744void FastISel::finishCondBranch(const BasicBlock *BranchBB,
1745                                MachineBasicBlock *TrueMBB,
1746                                MachineBasicBlock *FalseMBB) {
1747  // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1748  // happen in degenerate IR and MachineIR forbids to have a block twice in the
1749  // successor/predecessor lists.
1750  if (TrueMBB != FalseMBB) {
1751    if (FuncInfo.BPI) {
1752      auto BranchProbability =
1753          FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1754      FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability);
1755    } else
1756      FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
1757  }
1758
1759  fastEmitBranch(FalseMBB, DbgLoc);
1760}
1761
1762/// Emit an FNeg operation.
1763bool FastISel::selectFNeg(const User *I, const Value *In) {
1764  Register OpReg = getRegForValue(In);
1765  if (!OpReg)
1766    return false;
1767  bool OpRegIsKill = hasTrivialKill(In);
1768
1769  // If the target has ISD::FNEG, use it.
1770  EVT VT = TLI.getValueType(DL, I->getType());
1771  Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1772                                  OpReg, OpRegIsKill);
1773  if (ResultReg) {
1774    updateValueMap(I, ResultReg);
1775    return true;
1776  }
1777
1778  // Bitcast the value to integer, twiddle the sign bit with xor,
1779  // and then bitcast it back to floating-point.
1780  if (VT.getSizeInBits() > 64)
1781    return false;
1782  EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1783  if (!TLI.isTypeLegal(IntVT))
1784    return false;
1785
1786  Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1787                               ISD::BITCAST, OpReg, OpRegIsKill);
1788  if (!IntReg)
1789    return false;
1790
1791  Register IntResultReg = fastEmit_ri_(
1792      IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
1793      UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1794  if (!IntResultReg)
1795    return false;
1796
1797  ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1798                         IntResultReg, /*IsKill=*/true);
1799  if (!ResultReg)
1800    return false;
1801
1802  updateValueMap(I, ResultReg);
1803  return true;
1804}
1805
1806bool FastISel::selectExtractValue(const User *U) {
1807  const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1808  if (!EVI)
1809    return false;
1810
1811  // Make sure we only try to handle extracts with a legal result.  But also
1812  // allow i1 because it's easy.
1813  EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1814  if (!RealVT.isSimple())
1815    return false;
1816  MVT VT = RealVT.getSimpleVT();
1817  if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1818    return false;
1819
1820  const Value *Op0 = EVI->getOperand(0);
1821  Type *AggTy = Op0->getType();
1822
1823  // Get the base result register.
1824  unsigned ResultReg;
1825  DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(Op0);
1826  if (I != FuncInfo.ValueMap.end())
1827    ResultReg = I->second;
1828  else if (isa<Instruction>(Op0))
1829    ResultReg = FuncInfo.InitializeRegForValue(Op0);
1830  else
1831    return false; // fast-isel can't handle aggregate constants at the moment
1832
1833  // Get the actual result register, which is an offset from the base register.
1834  unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1835
1836  SmallVector<EVT, 4> AggValueVTs;
1837  ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1838
1839  for (unsigned i = 0; i < VTIndex; i++)
1840    ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1841
1842  updateValueMap(EVI, ResultReg);
1843  return true;
1844}
1845
1846bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1847  switch (Opcode) {
1848  case Instruction::Add:
1849    return selectBinaryOp(I, ISD::ADD);
1850  case Instruction::FAdd:
1851    return selectBinaryOp(I, ISD::FADD);
1852  case Instruction::Sub:
1853    return selectBinaryOp(I, ISD::SUB);
1854  case Instruction::FSub: {
1855    // FNeg is currently represented in LLVM IR as a special case of FSub.
1856    Value *X;
1857    if (match(I, m_FNeg(m_Value(X))))
1858       return selectFNeg(I, X);
1859    return selectBinaryOp(I, ISD::FSUB);
1860  }
1861  case Instruction::Mul:
1862    return selectBinaryOp(I, ISD::MUL);
1863  case Instruction::FMul:
1864    return selectBinaryOp(I, ISD::FMUL);
1865  case Instruction::SDiv:
1866    return selectBinaryOp(I, ISD::SDIV);
1867  case Instruction::UDiv:
1868    return selectBinaryOp(I, ISD::UDIV);
1869  case Instruction::FDiv:
1870    return selectBinaryOp(I, ISD::FDIV);
1871  case Instruction::SRem:
1872    return selectBinaryOp(I, ISD::SREM);
1873  case Instruction::URem:
1874    return selectBinaryOp(I, ISD::UREM);
1875  case Instruction::FRem:
1876    return selectBinaryOp(I, ISD::FREM);
1877  case Instruction::Shl:
1878    return selectBinaryOp(I, ISD::SHL);
1879  case Instruction::LShr:
1880    return selectBinaryOp(I, ISD::SRL);
1881  case Instruction::AShr:
1882    return selectBinaryOp(I, ISD::SRA);
1883  case Instruction::And:
1884    return selectBinaryOp(I, ISD::AND);
1885  case Instruction::Or:
1886    return selectBinaryOp(I, ISD::OR);
1887  case Instruction::Xor:
1888    return selectBinaryOp(I, ISD::XOR);
1889
1890  case Instruction::FNeg:
1891    return selectFNeg(I, I->getOperand(0));
1892
1893  case Instruction::GetElementPtr:
1894    return selectGetElementPtr(I);
1895
1896  case Instruction::Br: {
1897    const BranchInst *BI = cast<BranchInst>(I);
1898
1899    if (BI->isUnconditional()) {
1900      const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1901      MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1902      fastEmitBranch(MSucc, BI->getDebugLoc());
1903      return true;
1904    }
1905
1906    // Conditional branches are not handed yet.
1907    // Halt "fast" selection and bail.
1908    return false;
1909  }
1910
1911  case Instruction::Unreachable:
1912    if (TM.Options.TrapUnreachable)
1913      return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1914    else
1915      return true;
1916
1917  case Instruction::Alloca:
1918    // FunctionLowering has the static-sized case covered.
1919    if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1920      return true;
1921
1922    // Dynamic-sized alloca is not handled yet.
1923    return false;
1924
1925  case Instruction::Call:
1926    // On AIX, call lowering uses the DAG-ISEL path currently so that the
1927    // callee of the direct function call instruction will be mapped to the
1928    // symbol for the function's entry point, which is distinct from the
1929    // function descriptor symbol. The latter is the symbol whose XCOFF symbol
1930    // name is the C-linkage name of the source level function.
1931    if (TM.getTargetTriple().isOSAIX())
1932      return false;
1933    return selectCall(I);
1934
1935  case Instruction::BitCast:
1936    return selectBitCast(I);
1937
1938  case Instruction::FPToSI:
1939    return selectCast(I, ISD::FP_TO_SINT);
1940  case Instruction::ZExt:
1941    return selectCast(I, ISD::ZERO_EXTEND);
1942  case Instruction::SExt:
1943    return selectCast(I, ISD::SIGN_EXTEND);
1944  case Instruction::Trunc:
1945    return selectCast(I, ISD::TRUNCATE);
1946  case Instruction::SIToFP:
1947    return selectCast(I, ISD::SINT_TO_FP);
1948
1949  case Instruction::IntToPtr: // Deliberate fall-through.
1950  case Instruction::PtrToInt: {
1951    EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1952    EVT DstVT = TLI.getValueType(DL, I->getType());
1953    if (DstVT.bitsGT(SrcVT))
1954      return selectCast(I, ISD::ZERO_EXTEND);
1955    if (DstVT.bitsLT(SrcVT))
1956      return selectCast(I, ISD::TRUNCATE);
1957    Register Reg = getRegForValue(I->getOperand(0));
1958    if (!Reg)
1959      return false;
1960    updateValueMap(I, Reg);
1961    return true;
1962  }
1963
1964  case Instruction::ExtractValue:
1965    return selectExtractValue(I);
1966
1967  case Instruction::Freeze:
1968    return selectFreeze(I);
1969
1970  case Instruction::PHI:
1971    llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1972
1973  default:
1974    // Unhandled instruction. Halt "fast" selection and bail.
1975    return false;
1976  }
1977}
1978
1979FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
1980                   const TargetLibraryInfo *LibInfo,
1981                   bool SkipTargetIndependentISel)
1982    : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1983      MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1984      TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1985      TII(*MF->getSubtarget().getInstrInfo()),
1986      TLI(*MF->getSubtarget().getTargetLowering()),
1987      TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1988      SkipTargetIndependentISel(SkipTargetIndependentISel),
1989      LastLocalValue(nullptr), EmitStartPt(nullptr) {}
1990
1991FastISel::~FastISel() = default;
1992
1993bool FastISel::fastLowerArguments() { return false; }
1994
1995bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1996
1997bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
1998  return false;
1999}
2000
2001unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
2002
2003unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
2004                              bool /*Op0IsKill*/) {
2005  return 0;
2006}
2007
2008unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
2009                               bool /*Op0IsKill*/, unsigned /*Op1*/,
2010                               bool /*Op1IsKill*/) {
2011  return 0;
2012}
2013
2014unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
2015  return 0;
2016}
2017
2018unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
2019                              const ConstantFP * /*FPImm*/) {
2020  return 0;
2021}
2022
2023unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
2024                               bool /*Op0IsKill*/, uint64_t /*Imm*/) {
2025  return 0;
2026}
2027
2028/// This method is a wrapper of fastEmit_ri. It first tries to emit an
2029/// instruction with an immediate operand using fastEmit_ri.
2030/// If that fails, it materializes the immediate into a register and try
2031/// fastEmit_rr instead.
2032Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
2033                                bool Op0IsKill, uint64_t Imm, MVT ImmType) {
2034  // If this is a multiply by a power of two, emit this as a shift left.
2035  if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
2036    Opcode = ISD::SHL;
2037    Imm = Log2_64(Imm);
2038  } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
2039    // div x, 8 -> srl x, 3
2040    Opcode = ISD::SRL;
2041    Imm = Log2_64(Imm);
2042  }
2043
2044  // Horrible hack (to be removed), check to make sure shift amounts are
2045  // in-range.
2046  if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
2047      Imm >= VT.getSizeInBits())
2048    return 0;
2049
2050  // First check if immediate type is legal. If not, we can't use the ri form.
2051  Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
2052  if (ResultReg)
2053    return ResultReg;
2054  Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
2055  bool IsImmKill = true;
2056  if (!MaterialReg) {
2057    // This is a bit ugly/slow, but failing here means falling out of
2058    // fast-isel, which would be very slow.
2059    IntegerType *ITy =
2060        IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
2061    MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
2062    if (!MaterialReg)
2063      return 0;
2064    // FIXME: If the materialized register here has no uses yet then this
2065    // will be the first use and we should be able to mark it as killed.
2066    // However, the local value area for materialising constant expressions
2067    // grows down, not up, which means that any constant expressions we generate
2068    // later which also use 'Imm' could be after this instruction and therefore
2069    // after this kill.
2070    IsImmKill = false;
2071  }
2072  return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
2073}
2074
2075Register FastISel::createResultReg(const TargetRegisterClass *RC) {
2076  return MRI.createVirtualRegister(RC);
2077}
2078
2079Register FastISel::constrainOperandRegClass(const MCInstrDesc &II, Register Op,
2080                                            unsigned OpNum) {
2081  if (Op.isVirtual()) {
2082    const TargetRegisterClass *RegClass =
2083        TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
2084    if (!MRI.constrainRegClass(Op, RegClass)) {
2085      // If it's not legal to COPY between the register classes, something
2086      // has gone very wrong before we got here.
2087      Register NewOp = createResultReg(RegClass);
2088      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2089              TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
2090      return NewOp;
2091    }
2092  }
2093  return Op;
2094}
2095
2096Register FastISel::fastEmitInst_(unsigned MachineInstOpcode,
2097                                 const TargetRegisterClass *RC) {
2098  Register ResultReg = createResultReg(RC);
2099  const MCInstrDesc &II = TII.get(MachineInstOpcode);
2100
2101  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
2102  return ResultReg;
2103}
2104
2105Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
2106                                  const TargetRegisterClass *RC, unsigned Op0,
2107                                  bool Op0IsKill) {
2108  const MCInstrDesc &II = TII.get(MachineInstOpcode);
2109
2110  Register ResultReg = createResultReg(RC);
2111  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2112
2113  if (II.getNumDefs() >= 1)
2114    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2115        .addReg(Op0, getKillRegState(Op0IsKill));
2116  else {
2117    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2118        .addReg(Op0, getKillRegState(Op0IsKill));
2119    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2120            TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2121  }
2122
2123  return ResultReg;
2124}
2125
2126Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2127                                   const TargetRegisterClass *RC, unsigned Op0,
2128                                   bool Op0IsKill, unsigned Op1,
2129                                   bool Op1IsKill) {
2130  const MCInstrDesc &II = TII.get(MachineInstOpcode);
2131
2132  Register ResultReg = createResultReg(RC);
2133  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2134  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2135
2136  if (II.getNumDefs() >= 1)
2137    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2138        .addReg(Op0, getKillRegState(Op0IsKill))
2139        .addReg(Op1, getKillRegState(Op1IsKill));
2140  else {
2141    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2142        .addReg(Op0, getKillRegState(Op0IsKill))
2143        .addReg(Op1, getKillRegState(Op1IsKill));
2144    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2145            TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2146  }
2147  return ResultReg;
2148}
2149
2150Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
2151                                    const TargetRegisterClass *RC, unsigned Op0,
2152                                    bool Op0IsKill, unsigned Op1,
2153                                    bool Op1IsKill, unsigned Op2,
2154                                    bool Op2IsKill) {
2155  const MCInstrDesc &II = TII.get(MachineInstOpcode);
2156
2157  Register ResultReg = createResultReg(RC);
2158  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2159  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2160  Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
2161
2162  if (II.getNumDefs() >= 1)
2163    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2164        .addReg(Op0, getKillRegState(Op0IsKill))
2165        .addReg(Op1, getKillRegState(Op1IsKill))
2166        .addReg(Op2, getKillRegState(Op2IsKill));
2167  else {
2168    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2169        .addReg(Op0, getKillRegState(Op0IsKill))
2170        .addReg(Op1, getKillRegState(Op1IsKill))
2171        .addReg(Op2, getKillRegState(Op2IsKill));
2172    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2173            TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2174  }
2175  return ResultReg;
2176}
2177
2178Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
2179                                   const TargetRegisterClass *RC, unsigned Op0,
2180                                   bool Op0IsKill, uint64_t Imm) {
2181  const MCInstrDesc &II = TII.get(MachineInstOpcode);
2182
2183  Register ResultReg = createResultReg(RC);
2184  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2185
2186  if (II.getNumDefs() >= 1)
2187    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2188        .addReg(Op0, getKillRegState(Op0IsKill))
2189        .addImm(Imm);
2190  else {
2191    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2192        .addReg(Op0, getKillRegState(Op0IsKill))
2193        .addImm(Imm);
2194    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2195            TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2196  }
2197  return ResultReg;
2198}
2199
2200Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
2201                                    const TargetRegisterClass *RC, unsigned Op0,
2202                                    bool Op0IsKill, uint64_t Imm1,
2203                                    uint64_t Imm2) {
2204  const MCInstrDesc &II = TII.get(MachineInstOpcode);
2205
2206  Register ResultReg = createResultReg(RC);
2207  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2208
2209  if (II.getNumDefs() >= 1)
2210    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2211        .addReg(Op0, getKillRegState(Op0IsKill))
2212        .addImm(Imm1)
2213        .addImm(Imm2);
2214  else {
2215    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2216        .addReg(Op0, getKillRegState(Op0IsKill))
2217        .addImm(Imm1)
2218        .addImm(Imm2);
2219    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2220            TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2221  }
2222  return ResultReg;
2223}
2224
2225Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
2226                                  const TargetRegisterClass *RC,
2227                                  const ConstantFP *FPImm) {
2228  const MCInstrDesc &II = TII.get(MachineInstOpcode);
2229
2230  Register ResultReg = createResultReg(RC);
2231
2232  if (II.getNumDefs() >= 1)
2233    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2234        .addFPImm(FPImm);
2235  else {
2236    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2237        .addFPImm(FPImm);
2238    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2239            TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2240  }
2241  return ResultReg;
2242}
2243
2244Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
2245                                    const TargetRegisterClass *RC, unsigned Op0,
2246                                    bool Op0IsKill, unsigned Op1,
2247                                    bool Op1IsKill, uint64_t Imm) {
2248  const MCInstrDesc &II = TII.get(MachineInstOpcode);
2249
2250  Register ResultReg = createResultReg(RC);
2251  Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2252  Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2253
2254  if (II.getNumDefs() >= 1)
2255    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2256        .addReg(Op0, getKillRegState(Op0IsKill))
2257        .addReg(Op1, getKillRegState(Op1IsKill))
2258        .addImm(Imm);
2259  else {
2260    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2261        .addReg(Op0, getKillRegState(Op0IsKill))
2262        .addReg(Op1, getKillRegState(Op1IsKill))
2263        .addImm(Imm);
2264    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2265            TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2266  }
2267  return ResultReg;
2268}
2269
2270Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
2271                                  const TargetRegisterClass *RC, uint64_t Imm) {
2272  Register ResultReg = createResultReg(RC);
2273  const MCInstrDesc &II = TII.get(MachineInstOpcode);
2274
2275  if (II.getNumDefs() >= 1)
2276    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2277        .addImm(Imm);
2278  else {
2279    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
2280    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2281            TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2282  }
2283  return ResultReg;
2284}
2285
2286Register FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
2287                                              bool Op0IsKill, uint32_t Idx) {
2288  Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2289  assert(Register::isVirtualRegister(Op0) &&
2290         "Cannot yet extract from physregs");
2291  const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2292  MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
2293  BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
2294          ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
2295  return ResultReg;
2296}
2297
2298/// Emit MachineInstrs to compute the value of Op with all but the least
2299/// significant bit set to zero.
2300Register FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
2301  return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
2302}
2303
2304/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2305/// Emit code to ensure constants are copied into registers when needed.
2306/// Remember the virtual registers that need to be added to the Machine PHI
2307/// nodes as input.  We cannot just directly add them, because expansion
2308/// might result in multiple MBB's for one BB.  As such, the start of the
2309/// BB might correspond to a different MBB than the end.
2310bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2311  const Instruction *TI = LLVMBB->getTerminator();
2312
2313  SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
2314  FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
2315
2316  // Check successor nodes' PHI nodes that expect a constant to be available
2317  // from this block.
2318  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2319    const BasicBlock *SuccBB = TI->getSuccessor(succ);
2320    if (!isa<PHINode>(SuccBB->begin()))
2321      continue;
2322    MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2323
2324    // If this terminator has multiple identical successors (common for
2325    // switches), only handle each succ once.
2326    if (!SuccsHandled.insert(SuccMBB).second)
2327      continue;
2328
2329    MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2330
2331    // At this point we know that there is a 1-1 correspondence between LLVM PHI
2332    // nodes and Machine PHI nodes, but the incoming operands have not been
2333    // emitted yet.
2334    for (const PHINode &PN : SuccBB->phis()) {
2335      // Ignore dead phi's.
2336      if (PN.use_empty())
2337        continue;
2338
2339      // Only handle legal types. Two interesting things to note here. First,
2340      // by bailing out early, we may leave behind some dead instructions,
2341      // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2342      // own moves. Second, this check is necessary because FastISel doesn't
2343      // use CreateRegs to create registers, so it always creates
2344      // exactly one register for each non-void instruction.
2345      EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true);
2346      if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2347        // Handle integer promotions, though, because they're common and easy.
2348        if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2349          FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2350          return false;
2351        }
2352      }
2353
2354      const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
2355
2356      // Set the DebugLoc for the copy. Prefer the location of the operand
2357      // if there is one; use the location of the PHI otherwise.
2358      DbgLoc = PN.getDebugLoc();
2359      if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2360        DbgLoc = Inst->getDebugLoc();
2361
2362      Register Reg = getRegForValue(PHIOp);
2363      if (!Reg) {
2364        FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2365        return false;
2366      }
2367      FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2368      DbgLoc = DebugLoc();
2369    }
2370  }
2371
2372  return true;
2373}
2374
2375bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2376  assert(LI->hasOneUse() &&
2377         "tryToFoldLoad expected a LoadInst with a single use");
2378  // We know that the load has a single use, but don't know what it is.  If it
2379  // isn't one of the folded instructions, then we can't succeed here.  Handle
2380  // this by scanning the single-use users of the load until we get to FoldInst.
2381  unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2382
2383  const Instruction *TheUser = LI->user_back();
2384  while (TheUser != FoldInst && // Scan up until we find FoldInst.
2385         // Stay in the right block.
2386         TheUser->getParent() == FoldInst->getParent() &&
2387         --MaxUsers) { // Don't scan too far.
2388    // If there are multiple or no uses of this instruction, then bail out.
2389    if (!TheUser->hasOneUse())
2390      return false;
2391
2392    TheUser = TheUser->user_back();
2393  }
2394
2395  // If we didn't find the fold instruction, then we failed to collapse the
2396  // sequence.
2397  if (TheUser != FoldInst)
2398    return false;
2399
2400  // Don't try to fold volatile loads.  Target has to deal with alignment
2401  // constraints.
2402  if (LI->isVolatile())
2403    return false;
2404
2405  // Figure out which vreg this is going into.  If there is no assigned vreg yet
2406  // then there actually was no reference to it.  Perhaps the load is referenced
2407  // by a dead instruction.
2408  Register LoadReg = getRegForValue(LI);
2409  if (!LoadReg)
2410    return false;
2411
2412  // We can't fold if this vreg has no uses or more than one use.  Multiple uses
2413  // may mean that the instruction got lowered to multiple MIs, or the use of
2414  // the loaded value ended up being multiple operands of the result.
2415  if (!MRI.hasOneUse(LoadReg))
2416    return false;
2417
2418  MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2419  MachineInstr *User = RI->getParent();
2420
2421  // Set the insertion point properly.  Folding the load can cause generation of
2422  // other random instructions (like sign extends) for addressing modes; make
2423  // sure they get inserted in a logical place before the new instruction.
2424  FuncInfo.InsertPt = User;
2425  FuncInfo.MBB = User->getParent();
2426
2427  // Ask the target to try folding the load.
2428  return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2429}
2430
2431bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2432  // Must be an add.
2433  if (!isa<AddOperator>(Add))
2434    return false;
2435  // Type size needs to match.
2436  if (DL.getTypeSizeInBits(GEP->getType()) !=
2437      DL.getTypeSizeInBits(Add->getType()))
2438    return false;
2439  // Must be in the same basic block.
2440  if (isa<Instruction>(Add) &&
2441      FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2442    return false;
2443  // Must have a constant operand.
2444  return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2445}
2446
2447MachineMemOperand *
2448FastISel::createMachineMemOperandFor(const Instruction *I) const {
2449  const Value *Ptr;
2450  Type *ValTy;
2451  MaybeAlign Alignment;
2452  MachineMemOperand::Flags Flags;
2453  bool IsVolatile;
2454
2455  if (const auto *LI = dyn_cast<LoadInst>(I)) {
2456    Alignment = LI->getAlign();
2457    IsVolatile = LI->isVolatile();
2458    Flags = MachineMemOperand::MOLoad;
2459    Ptr = LI->getPointerOperand();
2460    ValTy = LI->getType();
2461  } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2462    Alignment = SI->getAlign();
2463    IsVolatile = SI->isVolatile();
2464    Flags = MachineMemOperand::MOStore;
2465    Ptr = SI->getPointerOperand();
2466    ValTy = SI->getValueOperand()->getType();
2467  } else
2468    return nullptr;
2469
2470  bool IsNonTemporal = I->hasMetadata(LLVMContext::MD_nontemporal);
2471  bool IsInvariant = I->hasMetadata(LLVMContext::MD_invariant_load);
2472  bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable);
2473  const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2474
2475  AAMDNodes AAInfo;
2476  I->getAAMetadata(AAInfo);
2477
2478  if (!Alignment) // Ensure that codegen never sees alignment 0.
2479    Alignment = DL.getABITypeAlign(ValTy);
2480
2481  unsigned Size = DL.getTypeStoreSize(ValTy);
2482
2483  if (IsVolatile)
2484    Flags |= MachineMemOperand::MOVolatile;
2485  if (IsNonTemporal)
2486    Flags |= MachineMemOperand::MONonTemporal;
2487  if (IsDereferenceable)
2488    Flags |= MachineMemOperand::MODereferenceable;
2489  if (IsInvariant)
2490    Flags |= MachineMemOperand::MOInvariant;
2491
2492  return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2493                                           *Alignment, AAInfo, Ranges);
2494}
2495
2496CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
2497  // If both operands are the same, then try to optimize or fold the cmp.
2498  CmpInst::Predicate Predicate = CI->getPredicate();
2499  if (CI->getOperand(0) != CI->getOperand(1))
2500    return Predicate;
2501
2502  switch (Predicate) {
2503  default: llvm_unreachable("Invalid predicate!");
2504  case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2505  case CmpInst::FCMP_OEQ:   Predicate = CmpInst::FCMP_ORD;   break;
2506  case CmpInst::FCMP_OGT:   Predicate = CmpInst::FCMP_FALSE; break;
2507  case CmpInst::FCMP_OGE:   Predicate = CmpInst::FCMP_ORD;   break;
2508  case CmpInst::FCMP_OLT:   Predicate = CmpInst::FCMP_FALSE; break;
2509  case CmpInst::FCMP_OLE:   Predicate = CmpInst::FCMP_ORD;   break;
2510  case CmpInst::FCMP_ONE:   Predicate = CmpInst::FCMP_FALSE; break;
2511  case CmpInst::FCMP_ORD:   Predicate = CmpInst::FCMP_ORD;   break;
2512  case CmpInst::FCMP_UNO:   Predicate = CmpInst::FCMP_UNO;   break;
2513  case CmpInst::FCMP_UEQ:   Predicate = CmpInst::FCMP_TRUE;  break;
2514  case CmpInst::FCMP_UGT:   Predicate = CmpInst::FCMP_UNO;   break;
2515  case CmpInst::FCMP_UGE:   Predicate = CmpInst::FCMP_TRUE;  break;
2516  case CmpInst::FCMP_ULT:   Predicate = CmpInst::FCMP_UNO;   break;
2517  case CmpInst::FCMP_ULE:   Predicate = CmpInst::FCMP_TRUE;  break;
2518  case CmpInst::FCMP_UNE:   Predicate = CmpInst::FCMP_UNO;   break;
2519  case CmpInst::FCMP_TRUE:  Predicate = CmpInst::FCMP_TRUE;  break;
2520
2521  case CmpInst::ICMP_EQ:    Predicate = CmpInst::FCMP_TRUE;  break;
2522  case CmpInst::ICMP_NE:    Predicate = CmpInst::FCMP_FALSE; break;
2523  case CmpInst::ICMP_UGT:   Predicate = CmpInst::FCMP_FALSE; break;
2524  case CmpInst::ICMP_UGE:   Predicate = CmpInst::FCMP_TRUE;  break;
2525  case CmpInst::ICMP_ULT:   Predicate = CmpInst::FCMP_FALSE; break;
2526  case CmpInst::ICMP_ULE:   Predicate = CmpInst::FCMP_TRUE;  break;
2527  case CmpInst::ICMP_SGT:   Predicate = CmpInst::FCMP_FALSE; break;
2528  case CmpInst::ICMP_SGE:   Predicate = CmpInst::FCMP_TRUE;  break;
2529  case CmpInst::ICMP_SLT:   Predicate = CmpInst::FCMP_FALSE; break;
2530  case CmpInst::ICMP_SLE:   Predicate = CmpInst::FCMP_TRUE;  break;
2531  }
2532
2533  return Predicate;
2534}
2535