1//===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines several CodeGen-specific LLVM IR analysis utilities.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/Analysis.h"
14#include "llvm/Analysis/ValueTracking.h"
15#include "llvm/CodeGen/MachineFunction.h"
16#include "llvm/CodeGen/TargetInstrInfo.h"
17#include "llvm/CodeGen/TargetLowering.h"
18#include "llvm/CodeGen/TargetSubtargetInfo.h"
19#include "llvm/IR/DataLayout.h"
20#include "llvm/IR/DerivedTypes.h"
21#include "llvm/IR/Function.h"
22#include "llvm/IR/Instructions.h"
23#include "llvm/IR/IntrinsicInst.h"
24#include "llvm/IR/LLVMContext.h"
25#include "llvm/IR/Module.h"
26#include "llvm/Support/ErrorHandling.h"
27#include "llvm/Support/MathExtras.h"
28#include "llvm/Target/TargetMachine.h"
29#include "llvm/Transforms/Utils/GlobalStatus.h"
30
31using namespace llvm;
32
33/// Compute the linearized index of a member in a nested aggregate/struct/array
34/// by recursing and accumulating CurIndex as long as there are indices in the
35/// index list.
36unsigned llvm::ComputeLinearIndex(Type *Ty,
37                                  const unsigned *Indices,
38                                  const unsigned *IndicesEnd,
39                                  unsigned CurIndex) {
40  // Base case: We're done.
41  if (Indices && Indices == IndicesEnd)
42    return CurIndex;
43
44  // Given a struct type, recursively traverse the elements.
45  if (StructType *STy = dyn_cast<StructType>(Ty)) {
46    for (StructType::element_iterator EB = STy->element_begin(),
47                                      EI = EB,
48                                      EE = STy->element_end();
49        EI != EE; ++EI) {
50      if (Indices && *Indices == unsigned(EI - EB))
51        return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
52      CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
53    }
54    assert(!Indices && "Unexpected out of bound");
55    return CurIndex;
56  }
57  // Given an array type, recursively traverse the elements.
58  else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
59    Type *EltTy = ATy->getElementType();
60    unsigned NumElts = ATy->getNumElements();
61    // Compute the Linear offset when jumping one element of the array
62    unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
63    if (Indices) {
64      assert(*Indices < NumElts && "Unexpected out of bound");
65      // If the indice is inside the array, compute the index to the requested
66      // elt and recurse inside the element with the end of the indices list
67      CurIndex += EltLinearOffset* *Indices;
68      return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
69    }
70    CurIndex += EltLinearOffset*NumElts;
71    return CurIndex;
72  }
73  // We haven't found the type we're looking for, so keep searching.
74  return CurIndex + 1;
75}
76
77/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
78/// EVTs that represent all the individual underlying
79/// non-aggregate types that comprise it.
80///
81/// If Offsets is non-null, it points to a vector to be filled in
82/// with the in-memory offsets of each of the individual values.
83///
84void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
85                           Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
86                           SmallVectorImpl<EVT> *MemVTs,
87                           SmallVectorImpl<uint64_t> *Offsets,
88                           uint64_t StartingOffset) {
89  // Given a struct type, recursively traverse the elements.
90  if (StructType *STy = dyn_cast<StructType>(Ty)) {
91    const StructLayout *SL = DL.getStructLayout(STy);
92    for (StructType::element_iterator EB = STy->element_begin(),
93                                      EI = EB,
94                                      EE = STy->element_end();
95         EI != EE; ++EI)
96      ComputeValueVTs(TLI, DL, *EI, ValueVTs, MemVTs, Offsets,
97                      StartingOffset + SL->getElementOffset(EI - EB));
98    return;
99  }
100  // Given an array type, recursively traverse the elements.
101  if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
102    Type *EltTy = ATy->getElementType();
103    uint64_t EltSize = DL.getTypeAllocSize(EltTy);
104    for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
105      ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets,
106                      StartingOffset + i * EltSize);
107    return;
108  }
109  // Interpret void as zero return values.
110  if (Ty->isVoidTy())
111    return;
112  // Base case: we can get an EVT for this LLVM IR type.
113  ValueVTs.push_back(TLI.getValueType(DL, Ty));
114  if (MemVTs)
115    MemVTs->push_back(TLI.getMemValueType(DL, Ty));
116  if (Offsets)
117    Offsets->push_back(StartingOffset);
118}
119
120void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
121                           Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
122                           SmallVectorImpl<uint64_t> *Offsets,
123                           uint64_t StartingOffset) {
124  return ComputeValueVTs(TLI, DL, Ty, ValueVTs, /*MemVTs=*/nullptr, Offsets,
125                         StartingOffset);
126}
127
128void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
129                            SmallVectorImpl<LLT> &ValueTys,
130                            SmallVectorImpl<uint64_t> *Offsets,
131                            uint64_t StartingOffset) {
132  // Given a struct type, recursively traverse the elements.
133  if (StructType *STy = dyn_cast<StructType>(&Ty)) {
134    const StructLayout *SL = DL.getStructLayout(STy);
135    for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
136      computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
137                       StartingOffset + SL->getElementOffset(I));
138    return;
139  }
140  // Given an array type, recursively traverse the elements.
141  if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
142    Type *EltTy = ATy->getElementType();
143    uint64_t EltSize = DL.getTypeAllocSize(EltTy);
144    for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
145      computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
146                       StartingOffset + i * EltSize);
147    return;
148  }
149  // Interpret void as zero return values.
150  if (Ty.isVoidTy())
151    return;
152  // Base case: we can get an LLT for this LLVM IR type.
153  ValueTys.push_back(getLLTForType(Ty, DL));
154  if (Offsets != nullptr)
155    Offsets->push_back(StartingOffset * 8);
156}
157
158/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
159GlobalValue *llvm::ExtractTypeInfo(Value *V) {
160  V = V->stripPointerCasts();
161  GlobalValue *GV = dyn_cast<GlobalValue>(V);
162  GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
163
164  if (Var && Var->getName() == "llvm.eh.catch.all.value") {
165    assert(Var->hasInitializer() &&
166           "The EH catch-all value must have an initializer");
167    Value *Init = Var->getInitializer();
168    GV = dyn_cast<GlobalValue>(Init);
169    if (!GV) V = cast<ConstantPointerNull>(Init);
170  }
171
172  assert((GV || isa<ConstantPointerNull>(V)) &&
173         "TypeInfo must be a global variable or NULL");
174  return GV;
175}
176
177/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
178/// processed uses a memory 'm' constraint.
179bool
180llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
181                                const TargetLowering &TLI) {
182  for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
183    InlineAsm::ConstraintInfo &CI = CInfos[i];
184    for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
185      TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
186      if (CType == TargetLowering::C_Memory)
187        return true;
188    }
189
190    // Indirect operand accesses access memory.
191    if (CI.isIndirect)
192      return true;
193  }
194
195  return false;
196}
197
198/// getFCmpCondCode - Return the ISD condition code corresponding to
199/// the given LLVM IR floating-point condition code.  This includes
200/// consideration of global floating-point math flags.
201///
202ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
203  switch (Pred) {
204  case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
205  case FCmpInst::FCMP_OEQ:   return ISD::SETOEQ;
206  case FCmpInst::FCMP_OGT:   return ISD::SETOGT;
207  case FCmpInst::FCMP_OGE:   return ISD::SETOGE;
208  case FCmpInst::FCMP_OLT:   return ISD::SETOLT;
209  case FCmpInst::FCMP_OLE:   return ISD::SETOLE;
210  case FCmpInst::FCMP_ONE:   return ISD::SETONE;
211  case FCmpInst::FCMP_ORD:   return ISD::SETO;
212  case FCmpInst::FCMP_UNO:   return ISD::SETUO;
213  case FCmpInst::FCMP_UEQ:   return ISD::SETUEQ;
214  case FCmpInst::FCMP_UGT:   return ISD::SETUGT;
215  case FCmpInst::FCMP_UGE:   return ISD::SETUGE;
216  case FCmpInst::FCMP_ULT:   return ISD::SETULT;
217  case FCmpInst::FCMP_ULE:   return ISD::SETULE;
218  case FCmpInst::FCMP_UNE:   return ISD::SETUNE;
219  case FCmpInst::FCMP_TRUE:  return ISD::SETTRUE;
220  default: llvm_unreachable("Invalid FCmp predicate opcode!");
221  }
222}
223
224ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
225  switch (CC) {
226    case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
227    case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
228    case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
229    case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
230    case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
231    case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
232    default: return CC;
233  }
234}
235
236/// getICmpCondCode - Return the ISD condition code corresponding to
237/// the given LLVM IR integer condition code.
238///
239ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
240  switch (Pred) {
241  case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
242  case ICmpInst::ICMP_NE:  return ISD::SETNE;
243  case ICmpInst::ICMP_SLE: return ISD::SETLE;
244  case ICmpInst::ICMP_ULE: return ISD::SETULE;
245  case ICmpInst::ICMP_SGE: return ISD::SETGE;
246  case ICmpInst::ICMP_UGE: return ISD::SETUGE;
247  case ICmpInst::ICMP_SLT: return ISD::SETLT;
248  case ICmpInst::ICMP_ULT: return ISD::SETULT;
249  case ICmpInst::ICMP_SGT: return ISD::SETGT;
250  case ICmpInst::ICMP_UGT: return ISD::SETUGT;
251  default:
252    llvm_unreachable("Invalid ICmp predicate opcode!");
253  }
254}
255
256static bool isNoopBitcast(Type *T1, Type *T2,
257                          const TargetLoweringBase& TLI) {
258  return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
259         (isa<VectorType>(T1) && isa<VectorType>(T2) &&
260          TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
261}
262
263/// Look through operations that will be free to find the earliest source of
264/// this value.
265///
266/// @param ValLoc If V has aggregate type, we will be interested in a particular
267/// scalar component. This records its address; the reverse of this list gives a
268/// sequence of indices appropriate for an extractvalue to locate the important
269/// value. This value is updated during the function and on exit will indicate
270/// similar information for the Value returned.
271///
272/// @param DataBits If this function looks through truncate instructions, this
273/// will record the smallest size attained.
274static const Value *getNoopInput(const Value *V,
275                                 SmallVectorImpl<unsigned> &ValLoc,
276                                 unsigned &DataBits,
277                                 const TargetLoweringBase &TLI,
278                                 const DataLayout &DL) {
279  while (true) {
280    // Try to look through V1; if V1 is not an instruction, it can't be looked
281    // through.
282    const Instruction *I = dyn_cast<Instruction>(V);
283    if (!I || I->getNumOperands() == 0) return V;
284    const Value *NoopInput = nullptr;
285
286    Value *Op = I->getOperand(0);
287    if (isa<BitCastInst>(I)) {
288      // Look through truly no-op bitcasts.
289      if (isNoopBitcast(Op->getType(), I->getType(), TLI))
290        NoopInput = Op;
291    } else if (isa<GetElementPtrInst>(I)) {
292      // Look through getelementptr
293      if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
294        NoopInput = Op;
295    } else if (isa<IntToPtrInst>(I)) {
296      // Look through inttoptr.
297      // Make sure this isn't a truncating or extending cast.  We could
298      // support this eventually, but don't bother for now.
299      if (!isa<VectorType>(I->getType()) &&
300          DL.getPointerSizeInBits() ==
301              cast<IntegerType>(Op->getType())->getBitWidth())
302        NoopInput = Op;
303    } else if (isa<PtrToIntInst>(I)) {
304      // Look through ptrtoint.
305      // Make sure this isn't a truncating or extending cast.  We could
306      // support this eventually, but don't bother for now.
307      if (!isa<VectorType>(I->getType()) &&
308          DL.getPointerSizeInBits() ==
309              cast<IntegerType>(I->getType())->getBitWidth())
310        NoopInput = Op;
311    } else if (isa<TruncInst>(I) &&
312               TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
313      DataBits = std::min((uint64_t)DataBits,
314                         I->getType()->getPrimitiveSizeInBits().getFixedSize());
315      NoopInput = Op;
316    } else if (auto *CB = dyn_cast<CallBase>(I)) {
317      const Value *ReturnedOp = CB->getReturnedArgOperand();
318      if (ReturnedOp && isNoopBitcast(ReturnedOp->getType(), I->getType(), TLI))
319        NoopInput = ReturnedOp;
320    } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
321      // Value may come from either the aggregate or the scalar
322      ArrayRef<unsigned> InsertLoc = IVI->getIndices();
323      if (ValLoc.size() >= InsertLoc.size() &&
324          std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
325        // The type being inserted is a nested sub-type of the aggregate; we
326        // have to remove those initial indices to get the location we're
327        // interested in for the operand.
328        ValLoc.resize(ValLoc.size() - InsertLoc.size());
329        NoopInput = IVI->getInsertedValueOperand();
330      } else {
331        // The struct we're inserting into has the value we're interested in, no
332        // change of address.
333        NoopInput = Op;
334      }
335    } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
336      // The part we're interested in will inevitably be some sub-section of the
337      // previous aggregate. Combine the two paths to obtain the true address of
338      // our element.
339      ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
340      ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
341      NoopInput = Op;
342    }
343    // Terminate if we couldn't find anything to look through.
344    if (!NoopInput)
345      return V;
346
347    V = NoopInput;
348  }
349}
350
351/// Return true if this scalar return value only has bits discarded on its path
352/// from the "tail call" to the "ret". This includes the obvious noop
353/// instructions handled by getNoopInput above as well as free truncations (or
354/// extensions prior to the call).
355static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
356                                 SmallVectorImpl<unsigned> &RetIndices,
357                                 SmallVectorImpl<unsigned> &CallIndices,
358                                 bool AllowDifferingSizes,
359                                 const TargetLoweringBase &TLI,
360                                 const DataLayout &DL) {
361
362  // Trace the sub-value needed by the return value as far back up the graph as
363  // possible, in the hope that it will intersect with the value produced by the
364  // call. In the simple case with no "returned" attribute, the hope is actually
365  // that we end up back at the tail call instruction itself.
366  unsigned BitsRequired = UINT_MAX;
367  RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
368
369  // If this slot in the value returned is undef, it doesn't matter what the
370  // call puts there, it'll be fine.
371  if (isa<UndefValue>(RetVal))
372    return true;
373
374  // Now do a similar search up through the graph to find where the value
375  // actually returned by the "tail call" comes from. In the simple case without
376  // a "returned" attribute, the search will be blocked immediately and the loop
377  // a Noop.
378  unsigned BitsProvided = UINT_MAX;
379  CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
380
381  // There's no hope if we can't actually trace them to (the same part of!) the
382  // same value.
383  if (CallVal != RetVal || CallIndices != RetIndices)
384    return false;
385
386  // However, intervening truncates may have made the call non-tail. Make sure
387  // all the bits that are needed by the "ret" have been provided by the "tail
388  // call". FIXME: with sufficiently cunning bit-tracking, we could look through
389  // extensions too.
390  if (BitsProvided < BitsRequired ||
391      (!AllowDifferingSizes && BitsProvided != BitsRequired))
392    return false;
393
394  return true;
395}
396
397/// For an aggregate type, determine whether a given index is within bounds or
398/// not.
399static bool indexReallyValid(Type *T, unsigned Idx) {
400  if (ArrayType *AT = dyn_cast<ArrayType>(T))
401    return Idx < AT->getNumElements();
402
403  return Idx < cast<StructType>(T)->getNumElements();
404}
405
406/// Move the given iterators to the next leaf type in depth first traversal.
407///
408/// Performs a depth-first traversal of the type as specified by its arguments,
409/// stopping at the next leaf node (which may be a legitimate scalar type or an
410/// empty struct or array).
411///
412/// @param SubTypes List of the partial components making up the type from
413/// outermost to innermost non-empty aggregate. The element currently
414/// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
415///
416/// @param Path Set of extractvalue indices leading from the outermost type
417/// (SubTypes[0]) to the leaf node currently represented.
418///
419/// @returns true if a new type was found, false otherwise. Calling this
420/// function again on a finished iterator will repeatedly return
421/// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
422/// aggregate or a non-aggregate
423static bool advanceToNextLeafType(SmallVectorImpl<Type *> &SubTypes,
424                                  SmallVectorImpl<unsigned> &Path) {
425  // First march back up the tree until we can successfully increment one of the
426  // coordinates in Path.
427  while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
428    Path.pop_back();
429    SubTypes.pop_back();
430  }
431
432  // If we reached the top, then the iterator is done.
433  if (Path.empty())
434    return false;
435
436  // We know there's *some* valid leaf now, so march back down the tree picking
437  // out the left-most element at each node.
438  ++Path.back();
439  Type *DeeperType =
440      ExtractValueInst::getIndexedType(SubTypes.back(), Path.back());
441  while (DeeperType->isAggregateType()) {
442    if (!indexReallyValid(DeeperType, 0))
443      return true;
444
445    SubTypes.push_back(DeeperType);
446    Path.push_back(0);
447
448    DeeperType = ExtractValueInst::getIndexedType(DeeperType, 0);
449  }
450
451  return true;
452}
453
454/// Find the first non-empty, scalar-like type in Next and setup the iterator
455/// components.
456///
457/// Assuming Next is an aggregate of some kind, this function will traverse the
458/// tree from left to right (i.e. depth-first) looking for the first
459/// non-aggregate type which will play a role in function return.
460///
461/// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
462/// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
463/// i32 in that type.
464static bool firstRealType(Type *Next, SmallVectorImpl<Type *> &SubTypes,
465                          SmallVectorImpl<unsigned> &Path) {
466  // First initialise the iterator components to the first "leaf" node
467  // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
468  // despite nominally being an aggregate).
469  while (Type *FirstInner = ExtractValueInst::getIndexedType(Next, 0)) {
470    SubTypes.push_back(Next);
471    Path.push_back(0);
472    Next = FirstInner;
473  }
474
475  // If there's no Path now, Next was originally scalar already (or empty
476  // leaf). We're done.
477  if (Path.empty())
478    return true;
479
480  // Otherwise, use normal iteration to keep looking through the tree until we
481  // find a non-aggregate type.
482  while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
483             ->isAggregateType()) {
484    if (!advanceToNextLeafType(SubTypes, Path))
485      return false;
486  }
487
488  return true;
489}
490
491/// Set the iterator data-structures to the next non-empty, non-aggregate
492/// subtype.
493static bool nextRealType(SmallVectorImpl<Type *> &SubTypes,
494                         SmallVectorImpl<unsigned> &Path) {
495  do {
496    if (!advanceToNextLeafType(SubTypes, Path))
497      return false;
498
499    assert(!Path.empty() && "found a leaf but didn't set the path?");
500  } while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
501               ->isAggregateType());
502
503  return true;
504}
505
506
507/// Test if the given instruction is in a position to be optimized
508/// with a tail-call. This roughly means that it's in a block with
509/// a return and there's nothing that needs to be scheduled
510/// between it and the return.
511///
512/// This function only tests target-independent requirements.
513bool llvm::isInTailCallPosition(const CallBase &Call, const TargetMachine &TM) {
514  const BasicBlock *ExitBB = Call.getParent();
515  const Instruction *Term = ExitBB->getTerminator();
516  const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
517
518  // The block must end in a return statement or unreachable.
519  //
520  // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
521  // an unreachable, for now. The way tailcall optimization is currently
522  // implemented means it will add an epilogue followed by a jump. That is
523  // not profitable. Also, if the callee is a special function (e.g.
524  // longjmp on x86), it can end up causing miscompilation that has not
525  // been fully understood.
526  if (!Ret &&
527      ((!TM.Options.GuaranteedTailCallOpt &&
528        Call.getCallingConv() != CallingConv::Tail) || !isa<UnreachableInst>(Term)))
529    return false;
530
531  // If I will have a chain, make sure no other instruction that will have a
532  // chain interposes between I and the return.
533  // Check for all calls including speculatable functions.
534  for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
535    if (&*BBI == &Call)
536      break;
537    // Debug info intrinsics do not get in the way of tail call optimization.
538    if (isa<DbgInfoIntrinsic>(BBI))
539      continue;
540    // A lifetime end or assume intrinsic should not stop tail call
541    // optimization.
542    if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(BBI))
543      if (II->getIntrinsicID() == Intrinsic::lifetime_end ||
544          II->getIntrinsicID() == Intrinsic::assume)
545        continue;
546    if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
547        !isSafeToSpeculativelyExecute(&*BBI))
548      return false;
549  }
550
551  const Function *F = ExitBB->getParent();
552  return returnTypeIsEligibleForTailCall(
553      F, &Call, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
554}
555
556bool llvm::attributesPermitTailCall(const Function *F, const Instruction *I,
557                                    const ReturnInst *Ret,
558                                    const TargetLoweringBase &TLI,
559                                    bool *AllowDifferingSizes) {
560  // ADS may be null, so don't write to it directly.
561  bool DummyADS;
562  bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
563  ADS = true;
564
565  AttrBuilder CallerAttrs(F->getAttributes(), AttributeList::ReturnIndex);
566  AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
567                          AttributeList::ReturnIndex);
568
569  // Following attributes are completely benign as far as calling convention
570  // goes, they shouldn't affect whether the call is a tail call.
571  CallerAttrs.removeAttribute(Attribute::NoAlias);
572  CalleeAttrs.removeAttribute(Attribute::NoAlias);
573  CallerAttrs.removeAttribute(Attribute::NonNull);
574  CalleeAttrs.removeAttribute(Attribute::NonNull);
575  CallerAttrs.removeAttribute(Attribute::Dereferenceable);
576  CalleeAttrs.removeAttribute(Attribute::Dereferenceable);
577  CallerAttrs.removeAttribute(Attribute::DereferenceableOrNull);
578  CalleeAttrs.removeAttribute(Attribute::DereferenceableOrNull);
579
580  if (CallerAttrs.contains(Attribute::ZExt)) {
581    if (!CalleeAttrs.contains(Attribute::ZExt))
582      return false;
583
584    ADS = false;
585    CallerAttrs.removeAttribute(Attribute::ZExt);
586    CalleeAttrs.removeAttribute(Attribute::ZExt);
587  } else if (CallerAttrs.contains(Attribute::SExt)) {
588    if (!CalleeAttrs.contains(Attribute::SExt))
589      return false;
590
591    ADS = false;
592    CallerAttrs.removeAttribute(Attribute::SExt);
593    CalleeAttrs.removeAttribute(Attribute::SExt);
594  }
595
596  // Drop sext and zext return attributes if the result is not used.
597  // This enables tail calls for code like:
598  //
599  // define void @caller() {
600  // entry:
601  //   %unused_result = tail call zeroext i1 @callee()
602  //   br label %retlabel
603  // retlabel:
604  //   ret void
605  // }
606  if (I->use_empty()) {
607    CalleeAttrs.removeAttribute(Attribute::SExt);
608    CalleeAttrs.removeAttribute(Attribute::ZExt);
609  }
610
611  // If they're still different, there's some facet we don't understand
612  // (currently only "inreg", but in future who knows). It may be OK but the
613  // only safe option is to reject the tail call.
614  return CallerAttrs == CalleeAttrs;
615}
616
617/// Check whether B is a bitcast of a pointer type to another pointer type,
618/// which is equal to A.
619static bool isPointerBitcastEqualTo(const Value *A, const Value *B) {
620  assert(A && B && "Expected non-null inputs!");
621
622  auto *BitCastIn = dyn_cast<BitCastInst>(B);
623
624  if (!BitCastIn)
625    return false;
626
627  if (!A->getType()->isPointerTy() || !B->getType()->isPointerTy())
628    return false;
629
630  return A == BitCastIn->getOperand(0);
631}
632
633bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
634                                           const Instruction *I,
635                                           const ReturnInst *Ret,
636                                           const TargetLoweringBase &TLI) {
637  // If the block ends with a void return or unreachable, it doesn't matter
638  // what the call's return type is.
639  if (!Ret || Ret->getNumOperands() == 0) return true;
640
641  // If the return value is undef, it doesn't matter what the call's
642  // return type is.
643  if (isa<UndefValue>(Ret->getOperand(0))) return true;
644
645  // Make sure the attributes attached to each return are compatible.
646  bool AllowDifferingSizes;
647  if (!attributesPermitTailCall(F, I, Ret, TLI, &AllowDifferingSizes))
648    return false;
649
650  const Value *RetVal = Ret->getOperand(0), *CallVal = I;
651  // Intrinsic like llvm.memcpy has no return value, but the expanded
652  // libcall may or may not have return value. On most platforms, it
653  // will be expanded as memcpy in libc, which returns the first
654  // argument. On other platforms like arm-none-eabi, memcpy may be
655  // expanded as library call without return value, like __aeabi_memcpy.
656  const CallInst *Call = cast<CallInst>(I);
657  if (Function *F = Call->getCalledFunction()) {
658    Intrinsic::ID IID = F->getIntrinsicID();
659    if (((IID == Intrinsic::memcpy &&
660          TLI.getLibcallName(RTLIB::MEMCPY) == StringRef("memcpy")) ||
661         (IID == Intrinsic::memmove &&
662          TLI.getLibcallName(RTLIB::MEMMOVE) == StringRef("memmove")) ||
663         (IID == Intrinsic::memset &&
664          TLI.getLibcallName(RTLIB::MEMSET) == StringRef("memset"))) &&
665        (RetVal == Call->getArgOperand(0) ||
666         isPointerBitcastEqualTo(RetVal, Call->getArgOperand(0))))
667      return true;
668  }
669
670  SmallVector<unsigned, 4> RetPath, CallPath;
671  SmallVector<Type *, 4> RetSubTypes, CallSubTypes;
672
673  bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
674  bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
675
676  // Nothing's actually returned, it doesn't matter what the callee put there
677  // it's a valid tail call.
678  if (RetEmpty)
679    return true;
680
681  // Iterate pairwise through each of the value types making up the tail call
682  // and the corresponding return. For each one we want to know whether it's
683  // essentially going directly from the tail call to the ret, via operations
684  // that end up not generating any code.
685  //
686  // We allow a certain amount of covariance here. For example it's permitted
687  // for the tail call to define more bits than the ret actually cares about
688  // (e.g. via a truncate).
689  do {
690    if (CallEmpty) {
691      // We've exhausted the values produced by the tail call instruction, the
692      // rest are essentially undef. The type doesn't really matter, but we need
693      // *something*.
694      Type *SlotType =
695          ExtractValueInst::getIndexedType(RetSubTypes.back(), RetPath.back());
696      CallVal = UndefValue::get(SlotType);
697    }
698
699    // The manipulations performed when we're looking through an insertvalue or
700    // an extractvalue would happen at the front of the RetPath list, so since
701    // we have to copy it anyway it's more efficient to create a reversed copy.
702    SmallVector<unsigned, 4> TmpRetPath(RetPath.rbegin(), RetPath.rend());
703    SmallVector<unsigned, 4> TmpCallPath(CallPath.rbegin(), CallPath.rend());
704
705    // Finally, we can check whether the value produced by the tail call at this
706    // index is compatible with the value we return.
707    if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
708                              AllowDifferingSizes, TLI,
709                              F->getParent()->getDataLayout()))
710      return false;
711
712    CallEmpty  = !nextRealType(CallSubTypes, CallPath);
713  } while(nextRealType(RetSubTypes, RetPath));
714
715  return true;
716}
717
718static void collectEHScopeMembers(
719    DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, int EHScope,
720    const MachineBasicBlock *MBB) {
721  SmallVector<const MachineBasicBlock *, 16> Worklist = {MBB};
722  while (!Worklist.empty()) {
723    const MachineBasicBlock *Visiting = Worklist.pop_back_val();
724    // Don't follow blocks which start new scopes.
725    if (Visiting->isEHPad() && Visiting != MBB)
726      continue;
727
728    // Add this MBB to our scope.
729    auto P = EHScopeMembership.insert(std::make_pair(Visiting, EHScope));
730
731    // Don't revisit blocks.
732    if (!P.second) {
733      assert(P.first->second == EHScope && "MBB is part of two scopes!");
734      continue;
735    }
736
737    // Returns are boundaries where scope transfer can occur, don't follow
738    // successors.
739    if (Visiting->isEHScopeReturnBlock())
740      continue;
741
742    for (const MachineBasicBlock *Succ : Visiting->successors())
743      Worklist.push_back(Succ);
744  }
745}
746
747DenseMap<const MachineBasicBlock *, int>
748llvm::getEHScopeMembership(const MachineFunction &MF) {
749  DenseMap<const MachineBasicBlock *, int> EHScopeMembership;
750
751  // We don't have anything to do if there aren't any EH pads.
752  if (!MF.hasEHScopes())
753    return EHScopeMembership;
754
755  int EntryBBNumber = MF.front().getNumber();
756  bool IsSEH = isAsynchronousEHPersonality(
757      classifyEHPersonality(MF.getFunction().getPersonalityFn()));
758
759  const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
760  SmallVector<const MachineBasicBlock *, 16> EHScopeBlocks;
761  SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks;
762  SmallVector<const MachineBasicBlock *, 16> SEHCatchPads;
763  SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors;
764  for (const MachineBasicBlock &MBB : MF) {
765    if (MBB.isEHScopeEntry()) {
766      EHScopeBlocks.push_back(&MBB);
767    } else if (IsSEH && MBB.isEHPad()) {
768      SEHCatchPads.push_back(&MBB);
769    } else if (MBB.pred_empty()) {
770      UnreachableBlocks.push_back(&MBB);
771    }
772
773    MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator();
774
775    // CatchPads are not scopes for SEH so do not consider CatchRet to
776    // transfer control to another scope.
777    if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode())
778      continue;
779
780    // FIXME: SEH CatchPads are not necessarily in the parent function:
781    // they could be inside a finally block.
782    const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB();
783    const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB();
784    CatchRetSuccessors.push_back(
785        {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
786  }
787
788  // We don't have anything to do if there aren't any EH pads.
789  if (EHScopeBlocks.empty())
790    return EHScopeMembership;
791
792  // Identify all the basic blocks reachable from the function entry.
793  collectEHScopeMembers(EHScopeMembership, EntryBBNumber, &MF.front());
794  // All blocks not part of a scope are in the parent function.
795  for (const MachineBasicBlock *MBB : UnreachableBlocks)
796    collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
797  // Next, identify all the blocks inside the scopes.
798  for (const MachineBasicBlock *MBB : EHScopeBlocks)
799    collectEHScopeMembers(EHScopeMembership, MBB->getNumber(), MBB);
800  // SEH CatchPads aren't really scopes, handle them separately.
801  for (const MachineBasicBlock *MBB : SEHCatchPads)
802    collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
803  // Finally, identify all the targets of a catchret.
804  for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
805       CatchRetSuccessors)
806    collectEHScopeMembers(EHScopeMembership, CatchRetPair.second,
807                          CatchRetPair.first);
808  return EHScopeMembership;
809}
810