ScalarEvolutionExpander.cpp revision 309124
1//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the scalar evolution expander,
11// which is used to generate the code corresponding to a given scalar evolution
12// expression.
13//
14//===----------------------------------------------------------------------===//
15
16#include "llvm/Analysis/ScalarEvolutionExpander.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/SmallSet.h"
19#include "llvm/Analysis/InstructionSimplify.h"
20#include "llvm/Analysis/LoopInfo.h"
21#include "llvm/Analysis/TargetTransformInfo.h"
22#include "llvm/IR/DataLayout.h"
23#include "llvm/IR/Dominators.h"
24#include "llvm/IR/IntrinsicInst.h"
25#include "llvm/IR/LLVMContext.h"
26#include "llvm/IR/Module.h"
27#include "llvm/IR/PatternMatch.h"
28#include "llvm/Support/Debug.h"
29#include "llvm/Support/raw_ostream.h"
30
31using namespace llvm;
32using namespace PatternMatch;
33
34/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
35/// reusing an existing cast if a suitable one exists, moving an existing
36/// cast if a suitable one exists but isn't in the right place, or
37/// creating a new one.
38Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
39                                       Instruction::CastOps Op,
40                                       BasicBlock::iterator IP) {
41  // This function must be called with the builder having a valid insertion
42  // point. It doesn't need to be the actual IP where the uses of the returned
43  // cast will be added, but it must dominate such IP.
44  // We use this precondition to produce a cast that will dominate all its
45  // uses. In particular, this is crucial for the case where the builder's
46  // insertion point *is* the point where we were asked to put the cast.
47  // Since we don't know the builder's insertion point is actually
48  // where the uses will be added (only that it dominates it), we are
49  // not allowed to move it.
50  BasicBlock::iterator BIP = Builder.GetInsertPoint();
51
52  Instruction *Ret = nullptr;
53
54  // Check to see if there is already a cast!
55  for (User *U : V->users())
56    if (U->getType() == Ty)
57      if (CastInst *CI = dyn_cast<CastInst>(U))
58        if (CI->getOpcode() == Op) {
59          // If the cast isn't where we want it, create a new cast at IP.
60          // Likewise, do not reuse a cast at BIP because it must dominate
61          // instructions that might be inserted before BIP.
62          if (BasicBlock::iterator(CI) != IP || BIP == IP) {
63            // Create a new cast, and leave the old cast in place in case
64            // it is being used as an insert point. Clear its operand
65            // so that it doesn't hold anything live.
66            Ret = CastInst::Create(Op, V, Ty, "", &*IP);
67            Ret->takeName(CI);
68            CI->replaceAllUsesWith(Ret);
69            CI->setOperand(0, UndefValue::get(V->getType()));
70            break;
71          }
72          Ret = CI;
73          break;
74        }
75
76  // Create a new cast.
77  if (!Ret)
78    Ret = CastInst::Create(Op, V, Ty, V->getName(), &*IP);
79
80  // We assert at the end of the function since IP might point to an
81  // instruction with different dominance properties than a cast
82  // (an invoke for example) and not dominate BIP (but the cast does).
83  assert(SE.DT.dominates(Ret, &*BIP));
84
85  rememberInstruction(Ret);
86  return Ret;
87}
88
89static BasicBlock::iterator findInsertPointAfter(Instruction *I,
90                                                 BasicBlock *MustDominate) {
91  BasicBlock::iterator IP = ++I->getIterator();
92  if (auto *II = dyn_cast<InvokeInst>(I))
93    IP = II->getNormalDest()->begin();
94
95  while (isa<PHINode>(IP))
96    ++IP;
97
98  if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
99    ++IP;
100  } else if (isa<CatchSwitchInst>(IP)) {
101    IP = MustDominate->getFirstInsertionPt();
102  } else {
103    assert(!IP->isEHPad() && "unexpected eh pad!");
104  }
105
106  return IP;
107}
108
109/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
110/// which must be possible with a noop cast, doing what we can to share
111/// the casts.
112Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
113  Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
114  assert((Op == Instruction::BitCast ||
115          Op == Instruction::PtrToInt ||
116          Op == Instruction::IntToPtr) &&
117         "InsertNoopCastOfTo cannot perform non-noop casts!");
118  assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
119         "InsertNoopCastOfTo cannot change sizes!");
120
121  // Short-circuit unnecessary bitcasts.
122  if (Op == Instruction::BitCast) {
123    if (V->getType() == Ty)
124      return V;
125    if (CastInst *CI = dyn_cast<CastInst>(V)) {
126      if (CI->getOperand(0)->getType() == Ty)
127        return CI->getOperand(0);
128    }
129  }
130  // Short-circuit unnecessary inttoptr<->ptrtoint casts.
131  if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
132      SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
133    if (CastInst *CI = dyn_cast<CastInst>(V))
134      if ((CI->getOpcode() == Instruction::PtrToInt ||
135           CI->getOpcode() == Instruction::IntToPtr) &&
136          SE.getTypeSizeInBits(CI->getType()) ==
137          SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
138        return CI->getOperand(0);
139    if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
140      if ((CE->getOpcode() == Instruction::PtrToInt ||
141           CE->getOpcode() == Instruction::IntToPtr) &&
142          SE.getTypeSizeInBits(CE->getType()) ==
143          SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
144        return CE->getOperand(0);
145  }
146
147  // Fold a cast of a constant.
148  if (Constant *C = dyn_cast<Constant>(V))
149    return ConstantExpr::getCast(Op, C, Ty);
150
151  // Cast the argument at the beginning of the entry block, after
152  // any bitcasts of other arguments.
153  if (Argument *A = dyn_cast<Argument>(V)) {
154    BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
155    while ((isa<BitCastInst>(IP) &&
156            isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
157            cast<BitCastInst>(IP)->getOperand(0) != A) ||
158           isa<DbgInfoIntrinsic>(IP))
159      ++IP;
160    return ReuseOrCreateCast(A, Ty, Op, IP);
161  }
162
163  // Cast the instruction immediately after the instruction.
164  Instruction *I = cast<Instruction>(V);
165  BasicBlock::iterator IP = findInsertPointAfter(I, Builder.GetInsertBlock());
166  return ReuseOrCreateCast(I, Ty, Op, IP);
167}
168
169/// InsertBinop - Insert the specified binary operator, doing a small amount
170/// of work to avoid inserting an obviously redundant operation.
171Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
172                                 Value *LHS, Value *RHS) {
173  // Fold a binop with constant operands.
174  if (Constant *CLHS = dyn_cast<Constant>(LHS))
175    if (Constant *CRHS = dyn_cast<Constant>(RHS))
176      return ConstantExpr::get(Opcode, CLHS, CRHS);
177
178  // Do a quick scan to see if we have this binop nearby.  If so, reuse it.
179  unsigned ScanLimit = 6;
180  BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
181  // Scanning starts from the last instruction before the insertion point.
182  BasicBlock::iterator IP = Builder.GetInsertPoint();
183  if (IP != BlockBegin) {
184    --IP;
185    for (; ScanLimit; --IP, --ScanLimit) {
186      // Don't count dbg.value against the ScanLimit, to avoid perturbing the
187      // generated code.
188      if (isa<DbgInfoIntrinsic>(IP))
189        ScanLimit++;
190      if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
191          IP->getOperand(1) == RHS)
192        return &*IP;
193      if (IP == BlockBegin) break;
194    }
195  }
196
197  // Save the original insertion point so we can restore it when we're done.
198  DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
199  SCEVInsertPointGuard Guard(Builder, this);
200
201  // Move the insertion point out of as many loops as we can.
202  while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
203    if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
204    BasicBlock *Preheader = L->getLoopPreheader();
205    if (!Preheader) break;
206
207    // Ok, move up a level.
208    Builder.SetInsertPoint(Preheader->getTerminator());
209  }
210
211  // If we haven't found this binop, insert it.
212  Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
213  BO->setDebugLoc(Loc);
214  rememberInstruction(BO);
215
216  return BO;
217}
218
219/// FactorOutConstant - Test if S is divisible by Factor, using signed
220/// division. If so, update S with Factor divided out and return true.
221/// S need not be evenly divisible if a reasonable remainder can be
222/// computed.
223/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
224/// unnecessary; in its place, just signed-divide Ops[i] by the scale and
225/// check to see if the divide was folded.
226static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder,
227                              const SCEV *Factor, ScalarEvolution &SE,
228                              const DataLayout &DL) {
229  // Everything is divisible by one.
230  if (Factor->isOne())
231    return true;
232
233  // x/x == 1.
234  if (S == Factor) {
235    S = SE.getConstant(S->getType(), 1);
236    return true;
237  }
238
239  // For a Constant, check for a multiple of the given factor.
240  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
241    // 0/x == 0.
242    if (C->isZero())
243      return true;
244    // Check for divisibility.
245    if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
246      ConstantInt *CI =
247          ConstantInt::get(SE.getContext(), C->getAPInt().sdiv(FC->getAPInt()));
248      // If the quotient is zero and the remainder is non-zero, reject
249      // the value at this scale. It will be considered for subsequent
250      // smaller scales.
251      if (!CI->isZero()) {
252        const SCEV *Div = SE.getConstant(CI);
253        S = Div;
254        Remainder = SE.getAddExpr(
255            Remainder, SE.getConstant(C->getAPInt().srem(FC->getAPInt())));
256        return true;
257      }
258    }
259  }
260
261  // In a Mul, check if there is a constant operand which is a multiple
262  // of the given factor.
263  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
264    // Size is known, check if there is a constant operand which is a multiple
265    // of the given factor. If so, we can factor it.
266    const SCEVConstant *FC = cast<SCEVConstant>(Factor);
267    if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
268      if (!C->getAPInt().srem(FC->getAPInt())) {
269        SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
270        NewMulOps[0] = SE.getConstant(C->getAPInt().sdiv(FC->getAPInt()));
271        S = SE.getMulExpr(NewMulOps);
272        return true;
273      }
274  }
275
276  // In an AddRec, check if both start and step are divisible.
277  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
278    const SCEV *Step = A->getStepRecurrence(SE);
279    const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
280    if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
281      return false;
282    if (!StepRem->isZero())
283      return false;
284    const SCEV *Start = A->getStart();
285    if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
286      return false;
287    S = SE.getAddRecExpr(Start, Step, A->getLoop(),
288                         A->getNoWrapFlags(SCEV::FlagNW));
289    return true;
290  }
291
292  return false;
293}
294
295/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
296/// is the number of SCEVAddRecExprs present, which are kept at the end of
297/// the list.
298///
299static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
300                                Type *Ty,
301                                ScalarEvolution &SE) {
302  unsigned NumAddRecs = 0;
303  for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
304    ++NumAddRecs;
305  // Group Ops into non-addrecs and addrecs.
306  SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
307  SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
308  // Let ScalarEvolution sort and simplify the non-addrecs list.
309  const SCEV *Sum = NoAddRecs.empty() ?
310                    SE.getConstant(Ty, 0) :
311                    SE.getAddExpr(NoAddRecs);
312  // If it returned an add, use the operands. Otherwise it simplified
313  // the sum into a single value, so just use that.
314  Ops.clear();
315  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
316    Ops.append(Add->op_begin(), Add->op_end());
317  else if (!Sum->isZero())
318    Ops.push_back(Sum);
319  // Then append the addrecs.
320  Ops.append(AddRecs.begin(), AddRecs.end());
321}
322
323/// SplitAddRecs - Flatten a list of add operands, moving addrec start values
324/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
325/// This helps expose more opportunities for folding parts of the expressions
326/// into GEP indices.
327///
328static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
329                         Type *Ty,
330                         ScalarEvolution &SE) {
331  // Find the addrecs.
332  SmallVector<const SCEV *, 8> AddRecs;
333  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
334    while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
335      const SCEV *Start = A->getStart();
336      if (Start->isZero()) break;
337      const SCEV *Zero = SE.getConstant(Ty, 0);
338      AddRecs.push_back(SE.getAddRecExpr(Zero,
339                                         A->getStepRecurrence(SE),
340                                         A->getLoop(),
341                                         A->getNoWrapFlags(SCEV::FlagNW)));
342      if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
343        Ops[i] = Zero;
344        Ops.append(Add->op_begin(), Add->op_end());
345        e += Add->getNumOperands();
346      } else {
347        Ops[i] = Start;
348      }
349    }
350  if (!AddRecs.empty()) {
351    // Add the addrecs onto the end of the list.
352    Ops.append(AddRecs.begin(), AddRecs.end());
353    // Resort the operand list, moving any constants to the front.
354    SimplifyAddOperands(Ops, Ty, SE);
355  }
356}
357
358/// expandAddToGEP - Expand an addition expression with a pointer type into
359/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
360/// BasicAliasAnalysis and other passes analyze the result. See the rules
361/// for getelementptr vs. inttoptr in
362/// http://llvm.org/docs/LangRef.html#pointeraliasing
363/// for details.
364///
365/// Design note: The correctness of using getelementptr here depends on
366/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
367/// they may introduce pointer arithmetic which may not be safely converted
368/// into getelementptr.
369///
370/// Design note: It might seem desirable for this function to be more
371/// loop-aware. If some of the indices are loop-invariant while others
372/// aren't, it might seem desirable to emit multiple GEPs, keeping the
373/// loop-invariant portions of the overall computation outside the loop.
374/// However, there are a few reasons this is not done here. Hoisting simple
375/// arithmetic is a low-level optimization that often isn't very
376/// important until late in the optimization process. In fact, passes
377/// like InstructionCombining will combine GEPs, even if it means
378/// pushing loop-invariant computation down into loops, so even if the
379/// GEPs were split here, the work would quickly be undone. The
380/// LoopStrengthReduction pass, which is usually run quite late (and
381/// after the last InstructionCombining pass), takes care of hoisting
382/// loop-invariant portions of expressions, after considering what
383/// can be folded using target addressing modes.
384///
385Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
386                                    const SCEV *const *op_end,
387                                    PointerType *PTy,
388                                    Type *Ty,
389                                    Value *V) {
390  Type *OriginalElTy = PTy->getElementType();
391  Type *ElTy = OriginalElTy;
392  SmallVector<Value *, 4> GepIndices;
393  SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
394  bool AnyNonZeroIndices = false;
395
396  // Split AddRecs up into parts as either of the parts may be usable
397  // without the other.
398  SplitAddRecs(Ops, Ty, SE);
399
400  Type *IntPtrTy = DL.getIntPtrType(PTy);
401
402  // Descend down the pointer's type and attempt to convert the other
403  // operands into GEP indices, at each level. The first index in a GEP
404  // indexes into the array implied by the pointer operand; the rest of
405  // the indices index into the element or field type selected by the
406  // preceding index.
407  for (;;) {
408    // If the scale size is not 0, attempt to factor out a scale for
409    // array indexing.
410    SmallVector<const SCEV *, 8> ScaledOps;
411    if (ElTy->isSized()) {
412      const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
413      if (!ElSize->isZero()) {
414        SmallVector<const SCEV *, 8> NewOps;
415        for (const SCEV *Op : Ops) {
416          const SCEV *Remainder = SE.getConstant(Ty, 0);
417          if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) {
418            // Op now has ElSize factored out.
419            ScaledOps.push_back(Op);
420            if (!Remainder->isZero())
421              NewOps.push_back(Remainder);
422            AnyNonZeroIndices = true;
423          } else {
424            // The operand was not divisible, so add it to the list of operands
425            // we'll scan next iteration.
426            NewOps.push_back(Op);
427          }
428        }
429        // If we made any changes, update Ops.
430        if (!ScaledOps.empty()) {
431          Ops = NewOps;
432          SimplifyAddOperands(Ops, Ty, SE);
433        }
434      }
435    }
436
437    // Record the scaled array index for this level of the type. If
438    // we didn't find any operands that could be factored, tentatively
439    // assume that element zero was selected (since the zero offset
440    // would obviously be folded away).
441    Value *Scaled = ScaledOps.empty() ?
442                    Constant::getNullValue(Ty) :
443                    expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
444    GepIndices.push_back(Scaled);
445
446    // Collect struct field index operands.
447    while (StructType *STy = dyn_cast<StructType>(ElTy)) {
448      bool FoundFieldNo = false;
449      // An empty struct has no fields.
450      if (STy->getNumElements() == 0) break;
451      // Field offsets are known. See if a constant offset falls within any of
452      // the struct fields.
453      if (Ops.empty())
454        break;
455      if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
456        if (SE.getTypeSizeInBits(C->getType()) <= 64) {
457          const StructLayout &SL = *DL.getStructLayout(STy);
458          uint64_t FullOffset = C->getValue()->getZExtValue();
459          if (FullOffset < SL.getSizeInBytes()) {
460            unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
461            GepIndices.push_back(
462                ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
463            ElTy = STy->getTypeAtIndex(ElIdx);
464            Ops[0] =
465                SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
466            AnyNonZeroIndices = true;
467            FoundFieldNo = true;
468          }
469        }
470      // If no struct field offsets were found, tentatively assume that
471      // field zero was selected (since the zero offset would obviously
472      // be folded away).
473      if (!FoundFieldNo) {
474        ElTy = STy->getTypeAtIndex(0u);
475        GepIndices.push_back(
476          Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
477      }
478    }
479
480    if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
481      ElTy = ATy->getElementType();
482    else
483      break;
484  }
485
486  // If none of the operands were convertible to proper GEP indices, cast
487  // the base to i8* and do an ugly getelementptr with that. It's still
488  // better than ptrtoint+arithmetic+inttoptr at least.
489  if (!AnyNonZeroIndices) {
490    // Cast the base to i8*.
491    V = InsertNoopCastOfTo(V,
492       Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
493
494    assert(!isa<Instruction>(V) ||
495           SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
496
497    // Expand the operands for a plain byte offset.
498    Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
499
500    // Fold a GEP with constant operands.
501    if (Constant *CLHS = dyn_cast<Constant>(V))
502      if (Constant *CRHS = dyn_cast<Constant>(Idx))
503        return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()),
504                                              CLHS, CRHS);
505
506    // Do a quick scan to see if we have this GEP nearby.  If so, reuse it.
507    unsigned ScanLimit = 6;
508    BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
509    // Scanning starts from the last instruction before the insertion point.
510    BasicBlock::iterator IP = Builder.GetInsertPoint();
511    if (IP != BlockBegin) {
512      --IP;
513      for (; ScanLimit; --IP, --ScanLimit) {
514        // Don't count dbg.value against the ScanLimit, to avoid perturbing the
515        // generated code.
516        if (isa<DbgInfoIntrinsic>(IP))
517          ScanLimit++;
518        if (IP->getOpcode() == Instruction::GetElementPtr &&
519            IP->getOperand(0) == V && IP->getOperand(1) == Idx)
520          return &*IP;
521        if (IP == BlockBegin) break;
522      }
523    }
524
525    // Save the original insertion point so we can restore it when we're done.
526    SCEVInsertPointGuard Guard(Builder, this);
527
528    // Move the insertion point out of as many loops as we can.
529    while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
530      if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
531      BasicBlock *Preheader = L->getLoopPreheader();
532      if (!Preheader) break;
533
534      // Ok, move up a level.
535      Builder.SetInsertPoint(Preheader->getTerminator());
536    }
537
538    // Emit a GEP.
539    Value *GEP = Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep");
540    rememberInstruction(GEP);
541
542    return GEP;
543  }
544
545  {
546    SCEVInsertPointGuard Guard(Builder, this);
547
548    // Move the insertion point out of as many loops as we can.
549    while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
550      if (!L->isLoopInvariant(V)) break;
551
552      bool AnyIndexNotLoopInvariant =
553          std::any_of(GepIndices.begin(), GepIndices.end(),
554                      [L](Value *Op) { return !L->isLoopInvariant(Op); });
555
556      if (AnyIndexNotLoopInvariant)
557        break;
558
559      BasicBlock *Preheader = L->getLoopPreheader();
560      if (!Preheader) break;
561
562      // Ok, move up a level.
563      Builder.SetInsertPoint(Preheader->getTerminator());
564    }
565
566    // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
567    // because ScalarEvolution may have changed the address arithmetic to
568    // compute a value which is beyond the end of the allocated object.
569    Value *Casted = V;
570    if (V->getType() != PTy)
571      Casted = InsertNoopCastOfTo(Casted, PTy);
572    Value *GEP = Builder.CreateGEP(OriginalElTy, Casted, GepIndices, "scevgep");
573    Ops.push_back(SE.getUnknown(GEP));
574    rememberInstruction(GEP);
575  }
576
577  return expand(SE.getAddExpr(Ops));
578}
579
580/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
581/// SCEV expansion. If they are nested, this is the most nested. If they are
582/// neighboring, pick the later.
583static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
584                                        DominatorTree &DT) {
585  if (!A) return B;
586  if (!B) return A;
587  if (A->contains(B)) return B;
588  if (B->contains(A)) return A;
589  if (DT.dominates(A->getHeader(), B->getHeader())) return B;
590  if (DT.dominates(B->getHeader(), A->getHeader())) return A;
591  return A; // Arbitrarily break the tie.
592}
593
594/// getRelevantLoop - Get the most relevant loop associated with the given
595/// expression, according to PickMostRelevantLoop.
596const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
597  // Test whether we've already computed the most relevant loop for this SCEV.
598  auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr));
599  if (!Pair.second)
600    return Pair.first->second;
601
602  if (isa<SCEVConstant>(S))
603    // A constant has no relevant loops.
604    return nullptr;
605  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
606    if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
607      return Pair.first->second = SE.LI.getLoopFor(I->getParent());
608    // A non-instruction has no relevant loops.
609    return nullptr;
610  }
611  if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
612    const Loop *L = nullptr;
613    if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
614      L = AR->getLoop();
615    for (const SCEV *Op : N->operands())
616      L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT);
617    return RelevantLoops[N] = L;
618  }
619  if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
620    const Loop *Result = getRelevantLoop(C->getOperand());
621    return RelevantLoops[C] = Result;
622  }
623  if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
624    const Loop *Result = PickMostRelevantLoop(
625        getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT);
626    return RelevantLoops[D] = Result;
627  }
628  llvm_unreachable("Unexpected SCEV type!");
629}
630
631namespace {
632
633/// LoopCompare - Compare loops by PickMostRelevantLoop.
634class LoopCompare {
635  DominatorTree &DT;
636public:
637  explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
638
639  bool operator()(std::pair<const Loop *, const SCEV *> LHS,
640                  std::pair<const Loop *, const SCEV *> RHS) const {
641    // Keep pointer operands sorted at the end.
642    if (LHS.second->getType()->isPointerTy() !=
643        RHS.second->getType()->isPointerTy())
644      return LHS.second->getType()->isPointerTy();
645
646    // Compare loops with PickMostRelevantLoop.
647    if (LHS.first != RHS.first)
648      return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
649
650    // If one operand is a non-constant negative and the other is not,
651    // put the non-constant negative on the right so that a sub can
652    // be used instead of a negate and add.
653    if (LHS.second->isNonConstantNegative()) {
654      if (!RHS.second->isNonConstantNegative())
655        return false;
656    } else if (RHS.second->isNonConstantNegative())
657      return true;
658
659    // Otherwise they are equivalent according to this comparison.
660    return false;
661  }
662};
663
664}
665
666Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
667  Type *Ty = SE.getEffectiveSCEVType(S->getType());
668
669  // Collect all the add operands in a loop, along with their associated loops.
670  // Iterate in reverse so that constants are emitted last, all else equal, and
671  // so that pointer operands are inserted first, which the code below relies on
672  // to form more involved GEPs.
673  SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
674  for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
675       E(S->op_begin()); I != E; ++I)
676    OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
677
678  // Sort by loop. Use a stable sort so that constants follow non-constants and
679  // pointer operands precede non-pointer operands.
680  std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT));
681
682  // Emit instructions to add all the operands. Hoist as much as possible
683  // out of loops, and form meaningful getelementptrs where possible.
684  Value *Sum = nullptr;
685  for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
686    const Loop *CurLoop = I->first;
687    const SCEV *Op = I->second;
688    if (!Sum) {
689      // This is the first operand. Just expand it.
690      Sum = expand(Op);
691      ++I;
692    } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
693      // The running sum expression is a pointer. Try to form a getelementptr
694      // at this level with that as the base.
695      SmallVector<const SCEV *, 4> NewOps;
696      for (; I != E && I->first == CurLoop; ++I) {
697        // If the operand is SCEVUnknown and not instructions, peek through
698        // it, to enable more of it to be folded into the GEP.
699        const SCEV *X = I->second;
700        if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
701          if (!isa<Instruction>(U->getValue()))
702            X = SE.getSCEV(U->getValue());
703        NewOps.push_back(X);
704      }
705      Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
706    } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
707      // The running sum is an integer, and there's a pointer at this level.
708      // Try to form a getelementptr. If the running sum is instructions,
709      // use a SCEVUnknown to avoid re-analyzing them.
710      SmallVector<const SCEV *, 4> NewOps;
711      NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
712                                               SE.getSCEV(Sum));
713      for (++I; I != E && I->first == CurLoop; ++I)
714        NewOps.push_back(I->second);
715      Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
716    } else if (Op->isNonConstantNegative()) {
717      // Instead of doing a negate and add, just do a subtract.
718      Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
719      Sum = InsertNoopCastOfTo(Sum, Ty);
720      Sum = InsertBinop(Instruction::Sub, Sum, W);
721      ++I;
722    } else {
723      // A simple add.
724      Value *W = expandCodeFor(Op, Ty);
725      Sum = InsertNoopCastOfTo(Sum, Ty);
726      // Canonicalize a constant to the RHS.
727      if (isa<Constant>(Sum)) std::swap(Sum, W);
728      Sum = InsertBinop(Instruction::Add, Sum, W);
729      ++I;
730    }
731  }
732
733  return Sum;
734}
735
736Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
737  Type *Ty = SE.getEffectiveSCEVType(S->getType());
738
739  // Collect all the mul operands in a loop, along with their associated loops.
740  // Iterate in reverse so that constants are emitted last, all else equal.
741  SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
742  for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
743       E(S->op_begin()); I != E; ++I)
744    OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
745
746  // Sort by loop. Use a stable sort so that constants follow non-constants.
747  std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT));
748
749  // Emit instructions to mul all the operands. Hoist as much as possible
750  // out of loops.
751  Value *Prod = nullptr;
752  for (const auto &I : OpsAndLoops) {
753    const SCEV *Op = I.second;
754    if (!Prod) {
755      // This is the first operand. Just expand it.
756      Prod = expand(Op);
757    } else if (Op->isAllOnesValue()) {
758      // Instead of doing a multiply by negative one, just do a negate.
759      Prod = InsertNoopCastOfTo(Prod, Ty);
760      Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
761    } else {
762      // A simple mul.
763      Value *W = expandCodeFor(Op, Ty);
764      Prod = InsertNoopCastOfTo(Prod, Ty);
765      // Canonicalize a constant to the RHS.
766      if (isa<Constant>(Prod)) std::swap(Prod, W);
767      const APInt *RHS;
768      if (match(W, m_Power2(RHS))) {
769        // Canonicalize Prod*(1<<C) to Prod<<C.
770        assert(!Ty->isVectorTy() && "vector types are not SCEVable");
771        Prod = InsertBinop(Instruction::Shl, Prod,
772                           ConstantInt::get(Ty, RHS->logBase2()));
773      } else {
774        Prod = InsertBinop(Instruction::Mul, Prod, W);
775      }
776    }
777  }
778
779  return Prod;
780}
781
782Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
783  Type *Ty = SE.getEffectiveSCEVType(S->getType());
784
785  Value *LHS = expandCodeFor(S->getLHS(), Ty);
786  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
787    const APInt &RHS = SC->getAPInt();
788    if (RHS.isPowerOf2())
789      return InsertBinop(Instruction::LShr, LHS,
790                         ConstantInt::get(Ty, RHS.logBase2()));
791  }
792
793  Value *RHS = expandCodeFor(S->getRHS(), Ty);
794  return InsertBinop(Instruction::UDiv, LHS, RHS);
795}
796
797/// Move parts of Base into Rest to leave Base with the minimal
798/// expression that provides a pointer operand suitable for a
799/// GEP expansion.
800static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
801                              ScalarEvolution &SE) {
802  while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
803    Base = A->getStart();
804    Rest = SE.getAddExpr(Rest,
805                         SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
806                                          A->getStepRecurrence(SE),
807                                          A->getLoop(),
808                                          A->getNoWrapFlags(SCEV::FlagNW)));
809  }
810  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
811    Base = A->getOperand(A->getNumOperands()-1);
812    SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
813    NewAddOps.back() = Rest;
814    Rest = SE.getAddExpr(NewAddOps);
815    ExposePointerBase(Base, Rest, SE);
816  }
817}
818
819/// Determine if this is a well-behaved chain of instructions leading back to
820/// the PHI. If so, it may be reused by expanded expressions.
821bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
822                                         const Loop *L) {
823  if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
824      (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
825    return false;
826  // If any of the operands don't dominate the insert position, bail.
827  // Addrec operands are always loop-invariant, so this can only happen
828  // if there are instructions which haven't been hoisted.
829  if (L == IVIncInsertLoop) {
830    for (User::op_iterator OI = IncV->op_begin()+1,
831           OE = IncV->op_end(); OI != OE; ++OI)
832      if (Instruction *OInst = dyn_cast<Instruction>(OI))
833        if (!SE.DT.dominates(OInst, IVIncInsertPos))
834          return false;
835  }
836  // Advance to the next instruction.
837  IncV = dyn_cast<Instruction>(IncV->getOperand(0));
838  if (!IncV)
839    return false;
840
841  if (IncV->mayHaveSideEffects())
842    return false;
843
844  if (IncV != PN)
845    return true;
846
847  return isNormalAddRecExprPHI(PN, IncV, L);
848}
849
850/// getIVIncOperand returns an induction variable increment's induction
851/// variable operand.
852///
853/// If allowScale is set, any type of GEP is allowed as long as the nonIV
854/// operands dominate InsertPos.
855///
856/// If allowScale is not set, ensure that a GEP increment conforms to one of the
857/// simple patterns generated by getAddRecExprPHILiterally and
858/// expandAddtoGEP. If the pattern isn't recognized, return NULL.
859Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
860                                           Instruction *InsertPos,
861                                           bool allowScale) {
862  if (IncV == InsertPos)
863    return nullptr;
864
865  switch (IncV->getOpcode()) {
866  default:
867    return nullptr;
868  // Check for a simple Add/Sub or GEP of a loop invariant step.
869  case Instruction::Add:
870  case Instruction::Sub: {
871    Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
872    if (!OInst || SE.DT.dominates(OInst, InsertPos))
873      return dyn_cast<Instruction>(IncV->getOperand(0));
874    return nullptr;
875  }
876  case Instruction::BitCast:
877    return dyn_cast<Instruction>(IncV->getOperand(0));
878  case Instruction::GetElementPtr:
879    for (auto I = IncV->op_begin() + 1, E = IncV->op_end(); I != E; ++I) {
880      if (isa<Constant>(*I))
881        continue;
882      if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
883        if (!SE.DT.dominates(OInst, InsertPos))
884          return nullptr;
885      }
886      if (allowScale) {
887        // allow any kind of GEP as long as it can be hoisted.
888        continue;
889      }
890      // This must be a pointer addition of constants (pretty), which is already
891      // handled, or some number of address-size elements (ugly). Ugly geps
892      // have 2 operands. i1* is used by the expander to represent an
893      // address-size element.
894      if (IncV->getNumOperands() != 2)
895        return nullptr;
896      unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
897      if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
898          && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
899        return nullptr;
900      break;
901    }
902    return dyn_cast<Instruction>(IncV->getOperand(0));
903  }
904}
905
906/// If the insert point of the current builder or any of the builders on the
907/// stack of saved builders has 'I' as its insert point, update it to point to
908/// the instruction after 'I'.  This is intended to be used when the instruction
909/// 'I' is being moved.  If this fixup is not done and 'I' is moved to a
910/// different block, the inconsistent insert point (with a mismatched
911/// Instruction and Block) can lead to an instruction being inserted in a block
912/// other than its parent.
913void SCEVExpander::fixupInsertPoints(Instruction *I) {
914  BasicBlock::iterator It(*I);
915  BasicBlock::iterator NewInsertPt = std::next(It);
916  if (Builder.GetInsertPoint() == It)
917    Builder.SetInsertPoint(&*NewInsertPt);
918  for (auto *InsertPtGuard : InsertPointGuards)
919    if (InsertPtGuard->GetInsertPoint() == It)
920      InsertPtGuard->SetInsertPoint(NewInsertPt);
921}
922
923/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
924/// it available to other uses in this loop. Recursively hoist any operands,
925/// until we reach a value that dominates InsertPos.
926bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
927  if (SE.DT.dominates(IncV, InsertPos))
928      return true;
929
930  // InsertPos must itself dominate IncV so that IncV's new position satisfies
931  // its existing users.
932  if (isa<PHINode>(InsertPos) ||
933      !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
934    return false;
935
936  if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
937    return false;
938
939  // Check that the chain of IV operands leading back to Phi can be hoisted.
940  SmallVector<Instruction*, 4> IVIncs;
941  for(;;) {
942    Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
943    if (!Oper)
944      return false;
945    // IncV is safe to hoist.
946    IVIncs.push_back(IncV);
947    IncV = Oper;
948    if (SE.DT.dominates(IncV, InsertPos))
949      break;
950  }
951  for (auto I = IVIncs.rbegin(), E = IVIncs.rend(); I != E; ++I) {
952    fixupInsertPoints(*I);
953    (*I)->moveBefore(InsertPos);
954  }
955  return true;
956}
957
958/// Determine if this cyclic phi is in a form that would have been generated by
959/// LSR. We don't care if the phi was actually expanded in this pass, as long
960/// as it is in a low-cost form, for example, no implied multiplication. This
961/// should match any patterns generated by getAddRecExprPHILiterally and
962/// expandAddtoGEP.
963bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
964                                           const Loop *L) {
965  for(Instruction *IVOper = IncV;
966      (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
967                                /*allowScale=*/false));) {
968    if (IVOper == PN)
969      return true;
970  }
971  return false;
972}
973
974/// expandIVInc - Expand an IV increment at Builder's current InsertPos.
975/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
976/// need to materialize IV increments elsewhere to handle difficult situations.
977Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
978                                 Type *ExpandTy, Type *IntTy,
979                                 bool useSubtract) {
980  Value *IncV;
981  // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
982  if (ExpandTy->isPointerTy()) {
983    PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
984    // If the step isn't constant, don't use an implicitly scaled GEP, because
985    // that would require a multiply inside the loop.
986    if (!isa<ConstantInt>(StepV))
987      GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
988                                  GEPPtrTy->getAddressSpace());
989    const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
990    IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
991    if (IncV->getType() != PN->getType()) {
992      IncV = Builder.CreateBitCast(IncV, PN->getType());
993      rememberInstruction(IncV);
994    }
995  } else {
996    IncV = useSubtract ?
997      Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
998      Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
999    rememberInstruction(IncV);
1000  }
1001  return IncV;
1002}
1003
1004/// \brief Hoist the addrec instruction chain rooted in the loop phi above the
1005/// position. This routine assumes that this is possible (has been checked).
1006void SCEVExpander::hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
1007                                  Instruction *Pos, PHINode *LoopPhi) {
1008  do {
1009    if (DT->dominates(InstToHoist, Pos))
1010      break;
1011    // Make sure the increment is where we want it. But don't move it
1012    // down past a potential existing post-inc user.
1013    fixupInsertPoints(InstToHoist);
1014    InstToHoist->moveBefore(Pos);
1015    Pos = InstToHoist;
1016    InstToHoist = cast<Instruction>(InstToHoist->getOperand(0));
1017  } while (InstToHoist != LoopPhi);
1018}
1019
1020/// \brief Check whether we can cheaply express the requested SCEV in terms of
1021/// the available PHI SCEV by truncation and/or inversion of the step.
1022static bool canBeCheaplyTransformed(ScalarEvolution &SE,
1023                                    const SCEVAddRecExpr *Phi,
1024                                    const SCEVAddRecExpr *Requested,
1025                                    bool &InvertStep) {
1026  Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
1027  Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
1028
1029  if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
1030    return false;
1031
1032  // Try truncate it if necessary.
1033  Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
1034  if (!Phi)
1035    return false;
1036
1037  // Check whether truncation will help.
1038  if (Phi == Requested) {
1039    InvertStep = false;
1040    return true;
1041  }
1042
1043  // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
1044  if (SE.getAddExpr(Requested->getStart(),
1045                    SE.getNegativeSCEV(Requested)) == Phi) {
1046    InvertStep = true;
1047    return true;
1048  }
1049
1050  return false;
1051}
1052
1053static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1054  if (!isa<IntegerType>(AR->getType()))
1055    return false;
1056
1057  unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1058  Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1059  const SCEV *Step = AR->getStepRecurrence(SE);
1060  const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
1061                                            SE.getSignExtendExpr(AR, WideTy));
1062  const SCEV *ExtendAfterOp =
1063    SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1064  return ExtendAfterOp == OpAfterExtend;
1065}
1066
1067static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1068  if (!isa<IntegerType>(AR->getType()))
1069    return false;
1070
1071  unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1072  Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1073  const SCEV *Step = AR->getStepRecurrence(SE);
1074  const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
1075                                            SE.getZeroExtendExpr(AR, WideTy));
1076  const SCEV *ExtendAfterOp =
1077    SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1078  return ExtendAfterOp == OpAfterExtend;
1079}
1080
1081/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1082/// the base addrec, which is the addrec without any non-loop-dominating
1083/// values, and return the PHI.
1084PHINode *
1085SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
1086                                        const Loop *L,
1087                                        Type *ExpandTy,
1088                                        Type *IntTy,
1089                                        Type *&TruncTy,
1090                                        bool &InvertStep) {
1091  assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
1092
1093  // Reuse a previously-inserted PHI, if present.
1094  BasicBlock *LatchBlock = L->getLoopLatch();
1095  if (LatchBlock) {
1096    PHINode *AddRecPhiMatch = nullptr;
1097    Instruction *IncV = nullptr;
1098    TruncTy = nullptr;
1099    InvertStep = false;
1100
1101    // Only try partially matching scevs that need truncation and/or
1102    // step-inversion if we know this loop is outside the current loop.
1103    bool TryNonMatchingSCEV =
1104        IVIncInsertLoop &&
1105        SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1106
1107    for (auto &I : *L->getHeader()) {
1108      auto *PN = dyn_cast<PHINode>(&I);
1109      if (!PN || !SE.isSCEVable(PN->getType()))
1110        continue;
1111
1112      const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PN));
1113      if (!PhiSCEV)
1114        continue;
1115
1116      bool IsMatchingSCEV = PhiSCEV == Normalized;
1117      // We only handle truncation and inversion of phi recurrences for the
1118      // expanded expression if the expanded expression's loop dominates the
1119      // loop we insert to. Check now, so we can bail out early.
1120      if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1121          continue;
1122
1123      Instruction *TempIncV =
1124          cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
1125
1126      // Check whether we can reuse this PHI node.
1127      if (LSRMode) {
1128        if (!isExpandedAddRecExprPHI(PN, TempIncV, L))
1129          continue;
1130        if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos))
1131          continue;
1132      } else {
1133        if (!isNormalAddRecExprPHI(PN, TempIncV, L))
1134          continue;
1135      }
1136
1137      // Stop if we have found an exact match SCEV.
1138      if (IsMatchingSCEV) {
1139        IncV = TempIncV;
1140        TruncTy = nullptr;
1141        InvertStep = false;
1142        AddRecPhiMatch = PN;
1143        break;
1144      }
1145
1146      // Try whether the phi can be translated into the requested form
1147      // (truncated and/or offset by a constant).
1148      if ((!TruncTy || InvertStep) &&
1149          canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
1150        // Record the phi node. But don't stop we might find an exact match
1151        // later.
1152        AddRecPhiMatch = PN;
1153        IncV = TempIncV;
1154        TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
1155      }
1156    }
1157
1158    if (AddRecPhiMatch) {
1159      // Potentially, move the increment. We have made sure in
1160      // isExpandedAddRecExprPHI or hoistIVInc that this is possible.
1161      if (L == IVIncInsertLoop)
1162        hoistBeforePos(&SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
1163
1164      // Ok, the add recurrence looks usable.
1165      // Remember this PHI, even in post-inc mode.
1166      InsertedValues.insert(AddRecPhiMatch);
1167      // Remember the increment.
1168      rememberInstruction(IncV);
1169      return AddRecPhiMatch;
1170    }
1171  }
1172
1173  // Save the original insertion point so we can restore it when we're done.
1174  SCEVInsertPointGuard Guard(Builder, this);
1175
1176  // Another AddRec may need to be recursively expanded below. For example, if
1177  // this AddRec is quadratic, the StepV may itself be an AddRec in this
1178  // loop. Remove this loop from the PostIncLoops set before expanding such
1179  // AddRecs. Otherwise, we cannot find a valid position for the step
1180  // (i.e. StepV can never dominate its loop header).  Ideally, we could do
1181  // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1182  // so it's not worth implementing SmallPtrSet::swap.
1183  PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1184  PostIncLoops.clear();
1185
1186  // Expand code for the start value.
1187  Value *StartV =
1188      expandCodeFor(Normalized->getStart(), ExpandTy, &L->getHeader()->front());
1189
1190  // StartV must be hoisted into L's preheader to dominate the new phi.
1191  assert(!isa<Instruction>(StartV) ||
1192         SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
1193                                 L->getHeader()));
1194
1195  // Expand code for the step value. Do this before creating the PHI so that PHI
1196  // reuse code doesn't see an incomplete PHI.
1197  const SCEV *Step = Normalized->getStepRecurrence(SE);
1198  // If the stride is negative, insert a sub instead of an add for the increment
1199  // (unless it's a constant, because subtracts of constants are canonicalized
1200  // to adds).
1201  bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1202  if (useSubtract)
1203    Step = SE.getNegativeSCEV(Step);
1204  // Expand the step somewhere that dominates the loop header.
1205  Value *StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front());
1206
1207  // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1208  // we actually do emit an addition.  It does not apply if we emit a
1209  // subtraction.
1210  bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1211  bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1212
1213  // Create the PHI.
1214  BasicBlock *Header = L->getHeader();
1215  Builder.SetInsertPoint(Header, Header->begin());
1216  pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1217  PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1218                                  Twine(IVName) + ".iv");
1219  rememberInstruction(PN);
1220
1221  // Create the step instructions and populate the PHI.
1222  for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1223    BasicBlock *Pred = *HPI;
1224
1225    // Add a start value.
1226    if (!L->contains(Pred)) {
1227      PN->addIncoming(StartV, Pred);
1228      continue;
1229    }
1230
1231    // Create a step value and add it to the PHI.
1232    // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1233    // instructions at IVIncInsertPos.
1234    Instruction *InsertPos = L == IVIncInsertLoop ?
1235      IVIncInsertPos : Pred->getTerminator();
1236    Builder.SetInsertPoint(InsertPos);
1237    Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1238
1239    if (isa<OverflowingBinaryOperator>(IncV)) {
1240      if (IncrementIsNUW)
1241        cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1242      if (IncrementIsNSW)
1243        cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1244    }
1245    PN->addIncoming(IncV, Pred);
1246  }
1247
1248  // After expanding subexpressions, restore the PostIncLoops set so the caller
1249  // can ensure that IVIncrement dominates the current uses.
1250  PostIncLoops = SavedPostIncLoops;
1251
1252  // Remember this PHI, even in post-inc mode.
1253  InsertedValues.insert(PN);
1254
1255  return PN;
1256}
1257
1258Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1259  Type *STy = S->getType();
1260  Type *IntTy = SE.getEffectiveSCEVType(STy);
1261  const Loop *L = S->getLoop();
1262
1263  // Determine a normalized form of this expression, which is the expression
1264  // before any post-inc adjustment is made.
1265  const SCEVAddRecExpr *Normalized = S;
1266  if (PostIncLoops.count(L)) {
1267    PostIncLoopSet Loops;
1268    Loops.insert(L);
1269    Normalized = cast<SCEVAddRecExpr>(TransformForPostIncUse(
1270        Normalize, S, nullptr, nullptr, Loops, SE, SE.DT));
1271  }
1272
1273  // Strip off any non-loop-dominating component from the addrec start.
1274  const SCEV *Start = Normalized->getStart();
1275  const SCEV *PostLoopOffset = nullptr;
1276  if (!SE.properlyDominates(Start, L->getHeader())) {
1277    PostLoopOffset = Start;
1278    Start = SE.getConstant(Normalized->getType(), 0);
1279    Normalized = cast<SCEVAddRecExpr>(
1280      SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1281                       Normalized->getLoop(),
1282                       Normalized->getNoWrapFlags(SCEV::FlagNW)));
1283  }
1284
1285  // Strip off any non-loop-dominating component from the addrec step.
1286  const SCEV *Step = Normalized->getStepRecurrence(SE);
1287  const SCEV *PostLoopScale = nullptr;
1288  if (!SE.dominates(Step, L->getHeader())) {
1289    PostLoopScale = Step;
1290    Step = SE.getConstant(Normalized->getType(), 1);
1291    if (!Start->isZero()) {
1292        // The normalization below assumes that Start is constant zero, so if
1293        // it isn't re-associate Start to PostLoopOffset.
1294        assert(!PostLoopOffset && "Start not-null but PostLoopOffset set?");
1295        PostLoopOffset = Start;
1296        Start = SE.getConstant(Normalized->getType(), 0);
1297    }
1298    Normalized =
1299      cast<SCEVAddRecExpr>(SE.getAddRecExpr(
1300                             Start, Step, Normalized->getLoop(),
1301                             Normalized->getNoWrapFlags(SCEV::FlagNW)));
1302  }
1303
1304  // Expand the core addrec. If we need post-loop scaling, force it to
1305  // expand to an integer type to avoid the need for additional casting.
1306  Type *ExpandTy = PostLoopScale ? IntTy : STy;
1307  // In some cases, we decide to reuse an existing phi node but need to truncate
1308  // it and/or invert the step.
1309  Type *TruncTy = nullptr;
1310  bool InvertStep = false;
1311  PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy,
1312                                          TruncTy, InvertStep);
1313
1314  // Accommodate post-inc mode, if necessary.
1315  Value *Result;
1316  if (!PostIncLoops.count(L))
1317    Result = PN;
1318  else {
1319    // In PostInc mode, use the post-incremented value.
1320    BasicBlock *LatchBlock = L->getLoopLatch();
1321    assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1322    Result = PN->getIncomingValueForBlock(LatchBlock);
1323
1324    // For an expansion to use the postinc form, the client must call
1325    // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1326    // or dominated by IVIncInsertPos.
1327    if (isa<Instruction>(Result) &&
1328        !SE.DT.dominates(cast<Instruction>(Result),
1329                         &*Builder.GetInsertPoint())) {
1330      // The induction variable's postinc expansion does not dominate this use.
1331      // IVUsers tries to prevent this case, so it is rare. However, it can
1332      // happen when an IVUser outside the loop is not dominated by the latch
1333      // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1334      // all cases. Consider a phi outide whose operand is replaced during
1335      // expansion with the value of the postinc user. Without fundamentally
1336      // changing the way postinc users are tracked, the only remedy is
1337      // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1338      // but hopefully expandCodeFor handles that.
1339      bool useSubtract =
1340        !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1341      if (useSubtract)
1342        Step = SE.getNegativeSCEV(Step);
1343      Value *StepV;
1344      {
1345        // Expand the step somewhere that dominates the loop header.
1346        SCEVInsertPointGuard Guard(Builder, this);
1347        StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front());
1348      }
1349      Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1350    }
1351  }
1352
1353  // We have decided to reuse an induction variable of a dominating loop. Apply
1354  // truncation and/or invertion of the step.
1355  if (TruncTy) {
1356    Type *ResTy = Result->getType();
1357    // Normalize the result type.
1358    if (ResTy != SE.getEffectiveSCEVType(ResTy))
1359      Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
1360    // Truncate the result.
1361    if (TruncTy != Result->getType()) {
1362      Result = Builder.CreateTrunc(Result, TruncTy);
1363      rememberInstruction(Result);
1364    }
1365    // Invert the result.
1366    if (InvertStep) {
1367      Result = Builder.CreateSub(expandCodeFor(Normalized->getStart(), TruncTy),
1368                                 Result);
1369      rememberInstruction(Result);
1370    }
1371  }
1372
1373  // Re-apply any non-loop-dominating scale.
1374  if (PostLoopScale) {
1375    assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
1376    Result = InsertNoopCastOfTo(Result, IntTy);
1377    Result = Builder.CreateMul(Result,
1378                               expandCodeFor(PostLoopScale, IntTy));
1379    rememberInstruction(Result);
1380  }
1381
1382  // Re-apply any non-loop-dominating offset.
1383  if (PostLoopOffset) {
1384    if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1385      const SCEV *const OffsetArray[1] = { PostLoopOffset };
1386      Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
1387    } else {
1388      Result = InsertNoopCastOfTo(Result, IntTy);
1389      Result = Builder.CreateAdd(Result,
1390                                 expandCodeFor(PostLoopOffset, IntTy));
1391      rememberInstruction(Result);
1392    }
1393  }
1394
1395  return Result;
1396}
1397
1398Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1399  if (!CanonicalMode) return expandAddRecExprLiterally(S);
1400
1401  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1402  const Loop *L = S->getLoop();
1403
1404  // First check for an existing canonical IV in a suitable type.
1405  PHINode *CanonicalIV = nullptr;
1406  if (PHINode *PN = L->getCanonicalInductionVariable())
1407    if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1408      CanonicalIV = PN;
1409
1410  // Rewrite an AddRec in terms of the canonical induction variable, if
1411  // its type is more narrow.
1412  if (CanonicalIV &&
1413      SE.getTypeSizeInBits(CanonicalIV->getType()) >
1414      SE.getTypeSizeInBits(Ty)) {
1415    SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1416    for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1417      NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1418    Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1419                                       S->getNoWrapFlags(SCEV::FlagNW)));
1420    BasicBlock::iterator NewInsertPt =
1421        findInsertPointAfter(cast<Instruction>(V), Builder.GetInsertBlock());
1422    V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
1423                      &*NewInsertPt);
1424    return V;
1425  }
1426
1427  // {X,+,F} --> X + {0,+,F}
1428  if (!S->getStart()->isZero()) {
1429    SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
1430    NewOps[0] = SE.getConstant(Ty, 0);
1431    const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1432                                        S->getNoWrapFlags(SCEV::FlagNW));
1433
1434    // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1435    // comments on expandAddToGEP for details.
1436    const SCEV *Base = S->getStart();
1437    const SCEV *RestArray[1] = { Rest };
1438    // Dig into the expression to find the pointer base for a GEP.
1439    ExposePointerBase(Base, RestArray[0], SE);
1440    // If we found a pointer, expand the AddRec with a GEP.
1441    if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
1442      // Make sure the Base isn't something exotic, such as a multiplied
1443      // or divided pointer value. In those cases, the result type isn't
1444      // actually a pointer type.
1445      if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
1446        Value *StartV = expand(Base);
1447        assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1448        return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
1449      }
1450    }
1451
1452    // Just do a normal add. Pre-expand the operands to suppress folding.
1453    //
1454    // The LHS and RHS values are factored out of the expand call to make the
1455    // output independent of the argument evaluation order.
1456    const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
1457    const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1458    return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1459  }
1460
1461  // If we don't yet have a canonical IV, create one.
1462  if (!CanonicalIV) {
1463    // Create and insert the PHI node for the induction variable in the
1464    // specified loop.
1465    BasicBlock *Header = L->getHeader();
1466    pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1467    CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1468                                  &Header->front());
1469    rememberInstruction(CanonicalIV);
1470
1471    SmallSet<BasicBlock *, 4> PredSeen;
1472    Constant *One = ConstantInt::get(Ty, 1);
1473    for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1474      BasicBlock *HP = *HPI;
1475      if (!PredSeen.insert(HP).second) {
1476        // There must be an incoming value for each predecessor, even the
1477        // duplicates!
1478        CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
1479        continue;
1480      }
1481
1482      if (L->contains(HP)) {
1483        // Insert a unit add instruction right before the terminator
1484        // corresponding to the back-edge.
1485        Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1486                                                     "indvar.next",
1487                                                     HP->getTerminator());
1488        Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1489        rememberInstruction(Add);
1490        CanonicalIV->addIncoming(Add, HP);
1491      } else {
1492        CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1493      }
1494    }
1495  }
1496
1497  // {0,+,1} --> Insert a canonical induction variable into the loop!
1498  if (S->isAffine() && S->getOperand(1)->isOne()) {
1499    assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1500           "IVs with types different from the canonical IV should "
1501           "already have been handled!");
1502    return CanonicalIV;
1503  }
1504
1505  // {0,+,F} --> {0,+,1} * F
1506
1507  // If this is a simple linear addrec, emit it now as a special case.
1508  if (S->isAffine())    // {0,+,F} --> i*F
1509    return
1510      expand(SE.getTruncateOrNoop(
1511        SE.getMulExpr(SE.getUnknown(CanonicalIV),
1512                      SE.getNoopOrAnyExtend(S->getOperand(1),
1513                                            CanonicalIV->getType())),
1514        Ty));
1515
1516  // If this is a chain of recurrences, turn it into a closed form, using the
1517  // folders, then expandCodeFor the closed form.  This allows the folders to
1518  // simplify the expression without having to build a bunch of special code
1519  // into this folder.
1520  const SCEV *IH = SE.getUnknown(CanonicalIV);   // Get I as a "symbolic" SCEV.
1521
1522  // Promote S up to the canonical IV type, if the cast is foldable.
1523  const SCEV *NewS = S;
1524  const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1525  if (isa<SCEVAddRecExpr>(Ext))
1526    NewS = Ext;
1527
1528  const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1529  //cerr << "Evaluated: " << *this << "\n     to: " << *V << "\n";
1530
1531  // Truncate the result down to the original type, if needed.
1532  const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1533  return expand(T);
1534}
1535
1536Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1537  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1538  Value *V = expandCodeFor(S->getOperand(),
1539                           SE.getEffectiveSCEVType(S->getOperand()->getType()));
1540  Value *I = Builder.CreateTrunc(V, Ty);
1541  rememberInstruction(I);
1542  return I;
1543}
1544
1545Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1546  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1547  Value *V = expandCodeFor(S->getOperand(),
1548                           SE.getEffectiveSCEVType(S->getOperand()->getType()));
1549  Value *I = Builder.CreateZExt(V, Ty);
1550  rememberInstruction(I);
1551  return I;
1552}
1553
1554Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1555  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1556  Value *V = expandCodeFor(S->getOperand(),
1557                           SE.getEffectiveSCEVType(S->getOperand()->getType()));
1558  Value *I = Builder.CreateSExt(V, Ty);
1559  rememberInstruction(I);
1560  return I;
1561}
1562
1563Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1564  Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1565  Type *Ty = LHS->getType();
1566  for (int i = S->getNumOperands()-2; i >= 0; --i) {
1567    // In the case of mixed integer and pointer types, do the
1568    // rest of the comparisons as integer.
1569    if (S->getOperand(i)->getType() != Ty) {
1570      Ty = SE.getEffectiveSCEVType(Ty);
1571      LHS = InsertNoopCastOfTo(LHS, Ty);
1572    }
1573    Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1574    Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1575    rememberInstruction(ICmp);
1576    Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1577    rememberInstruction(Sel);
1578    LHS = Sel;
1579  }
1580  // In the case of mixed integer and pointer types, cast the
1581  // final result back to the pointer type.
1582  if (LHS->getType() != S->getType())
1583    LHS = InsertNoopCastOfTo(LHS, S->getType());
1584  return LHS;
1585}
1586
1587Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1588  Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1589  Type *Ty = LHS->getType();
1590  for (int i = S->getNumOperands()-2; i >= 0; --i) {
1591    // In the case of mixed integer and pointer types, do the
1592    // rest of the comparisons as integer.
1593    if (S->getOperand(i)->getType() != Ty) {
1594      Ty = SE.getEffectiveSCEVType(Ty);
1595      LHS = InsertNoopCastOfTo(LHS, Ty);
1596    }
1597    Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1598    Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1599    rememberInstruction(ICmp);
1600    Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1601    rememberInstruction(Sel);
1602    LHS = Sel;
1603  }
1604  // In the case of mixed integer and pointer types, cast the
1605  // final result back to the pointer type.
1606  if (LHS->getType() != S->getType())
1607    LHS = InsertNoopCastOfTo(LHS, S->getType());
1608  return LHS;
1609}
1610
1611Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
1612                                   Instruction *IP) {
1613  setInsertPoint(IP);
1614  return expandCodeFor(SH, Ty);
1615}
1616
1617Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
1618  // Expand the code for this SCEV.
1619  Value *V = expand(SH);
1620  if (Ty) {
1621    assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1622           "non-trivial casts should be done with the SCEVs directly!");
1623    V = InsertNoopCastOfTo(V, Ty);
1624  }
1625  return V;
1626}
1627
1628Value *SCEVExpander::FindValueInExprValueMap(const SCEV *S,
1629                                             const Instruction *InsertPt) {
1630  SetVector<Value *> *Set = SE.getSCEVValues(S);
1631  // If the expansion is not in CanonicalMode, and the SCEV contains any
1632  // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
1633  if (CanonicalMode || !SE.containsAddRecurrence(S)) {
1634    // If S is scConstant, it may be worse to reuse an existing Value.
1635    if (S->getSCEVType() != scConstant && Set) {
1636      // Choose a Value from the set which dominates the insertPt.
1637      // insertPt should be inside the Value's parent loop so as not to break
1638      // the LCSSA form.
1639      for (auto const &Ent : *Set) {
1640        Instruction *EntInst = nullptr;
1641        if (Ent && isa<Instruction>(Ent) &&
1642            (EntInst = cast<Instruction>(Ent)) &&
1643            S->getType() == Ent->getType() &&
1644            EntInst->getFunction() == InsertPt->getFunction() &&
1645            SE.DT.dominates(EntInst, InsertPt) &&
1646            (SE.LI.getLoopFor(EntInst->getParent()) == nullptr ||
1647             SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt))) {
1648          return Ent;
1649        }
1650      }
1651    }
1652  }
1653  return nullptr;
1654}
1655
1656// The expansion of SCEV will either reuse a previous Value in ExprValueMap,
1657// or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
1658// and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
1659// literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
1660// the expansion will try to reuse Value from ExprValueMap, and only when it
1661// fails, expand the SCEV literally.
1662Value *SCEVExpander::expand(const SCEV *S) {
1663  // Compute an insertion point for this SCEV object. Hoist the instructions
1664  // as far out in the loop nest as possible.
1665  Instruction *InsertPt = &*Builder.GetInsertPoint();
1666  for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1667       L = L->getParentLoop())
1668    if (SE.isLoopInvariant(S, L)) {
1669      if (!L) break;
1670      if (BasicBlock *Preheader = L->getLoopPreheader())
1671        InsertPt = Preheader->getTerminator();
1672      else {
1673        // LSR sets the insertion point for AddRec start/step values to the
1674        // block start to simplify value reuse, even though it's an invalid
1675        // position. SCEVExpander must correct for this in all cases.
1676        InsertPt = &*L->getHeader()->getFirstInsertionPt();
1677      }
1678    } else {
1679      // If the SCEV is computable at this level, insert it into the header
1680      // after the PHIs (and after any other instructions that we've inserted
1681      // there) so that it is guaranteed to dominate any user inside the loop.
1682      if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1683        InsertPt = &*L->getHeader()->getFirstInsertionPt();
1684      while (InsertPt->getIterator() != Builder.GetInsertPoint() &&
1685             (isInsertedInstruction(InsertPt) ||
1686              isa<DbgInfoIntrinsic>(InsertPt))) {
1687        InsertPt = &*std::next(InsertPt->getIterator());
1688      }
1689      break;
1690    }
1691
1692  // Check to see if we already expanded this here.
1693  auto I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1694  if (I != InsertedExpressions.end())
1695    return I->second;
1696
1697  SCEVInsertPointGuard Guard(Builder, this);
1698  Builder.SetInsertPoint(InsertPt);
1699
1700  // Expand the expression into instructions.
1701  Value *V = FindValueInExprValueMap(S, InsertPt);
1702
1703  if (!V)
1704    V = visit(S);
1705
1706  // Remember the expanded value for this SCEV at this location.
1707  //
1708  // This is independent of PostIncLoops. The mapped value simply materializes
1709  // the expression at this insertion point. If the mapped value happened to be
1710  // a postinc expansion, it could be reused by a non-postinc user, but only if
1711  // its insertion point was already at the head of the loop.
1712  InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1713  return V;
1714}
1715
1716void SCEVExpander::rememberInstruction(Value *I) {
1717  if (!PostIncLoops.empty())
1718    InsertedPostIncValues.insert(I);
1719  else
1720    InsertedValues.insert(I);
1721}
1722
1723/// getOrInsertCanonicalInductionVariable - This method returns the
1724/// canonical induction variable of the specified type for the specified
1725/// loop (inserting one if there is none).  A canonical induction variable
1726/// starts at zero and steps by one on each iteration.
1727PHINode *
1728SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
1729                                                    Type *Ty) {
1730  assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
1731
1732  // Build a SCEV for {0,+,1}<L>.
1733  // Conservatively use FlagAnyWrap for now.
1734  const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
1735                                   SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
1736
1737  // Emit code for it.
1738  SCEVInsertPointGuard Guard(Builder, this);
1739  PHINode *V =
1740      cast<PHINode>(expandCodeFor(H, nullptr, &L->getHeader()->front()));
1741
1742  return V;
1743}
1744
1745/// replaceCongruentIVs - Check for congruent phis in this loop header and
1746/// replace them with their most canonical representative. Return the number of
1747/// phis eliminated.
1748///
1749/// This does not depend on any SCEVExpander state but should be used in
1750/// the same context that SCEVExpander is used.
1751unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
1752                                           SmallVectorImpl<WeakVH> &DeadInsts,
1753                                           const TargetTransformInfo *TTI) {
1754  // Find integer phis in order of increasing width.
1755  SmallVector<PHINode*, 8> Phis;
1756  for (auto &I : *L->getHeader()) {
1757    if (auto *PN = dyn_cast<PHINode>(&I))
1758      Phis.push_back(PN);
1759    else
1760      break;
1761  }
1762
1763  if (TTI)
1764    std::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) {
1765      // Put pointers at the back and make sure pointer < pointer = false.
1766      if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
1767        return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
1768      return RHS->getType()->getPrimitiveSizeInBits() <
1769             LHS->getType()->getPrimitiveSizeInBits();
1770    });
1771
1772  unsigned NumElim = 0;
1773  DenseMap<const SCEV *, PHINode *> ExprToIVMap;
1774  // Process phis from wide to narrow. Map wide phis to their truncation
1775  // so narrow phis can reuse them.
1776  for (PHINode *Phi : Phis) {
1777    auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
1778      if (Value *V = SimplifyInstruction(PN, DL, &SE.TLI, &SE.DT, &SE.AC))
1779        return V;
1780      if (!SE.isSCEVable(PN->getType()))
1781        return nullptr;
1782      auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN));
1783      if (!Const)
1784        return nullptr;
1785      return Const->getValue();
1786    };
1787
1788    // Fold constant phis. They may be congruent to other constant phis and
1789    // would confuse the logic below that expects proper IVs.
1790    if (Value *V = SimplifyPHINode(Phi)) {
1791      if (V->getType() != Phi->getType())
1792        continue;
1793      Phi->replaceAllUsesWith(V);
1794      DeadInsts.emplace_back(Phi);
1795      ++NumElim;
1796      DEBUG_WITH_TYPE(DebugType, dbgs()
1797                      << "INDVARS: Eliminated constant iv: " << *Phi << '\n');
1798      continue;
1799    }
1800
1801    if (!SE.isSCEVable(Phi->getType()))
1802      continue;
1803
1804    PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1805    if (!OrigPhiRef) {
1806      OrigPhiRef = Phi;
1807      if (Phi->getType()->isIntegerTy() && TTI &&
1808          TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
1809        // This phi can be freely truncated to the narrowest phi type. Map the
1810        // truncated expression to it so it will be reused for narrow types.
1811        const SCEV *TruncExpr =
1812          SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
1813        ExprToIVMap[TruncExpr] = Phi;
1814      }
1815      continue;
1816    }
1817
1818    // Replacing a pointer phi with an integer phi or vice-versa doesn't make
1819    // sense.
1820    if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
1821      continue;
1822
1823    if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1824      Instruction *OrigInc = dyn_cast<Instruction>(
1825          OrigPhiRef->getIncomingValueForBlock(LatchBlock));
1826      Instruction *IsomorphicInc =
1827          dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1828
1829      if (OrigInc && IsomorphicInc) {
1830        // If this phi has the same width but is more canonical, replace the
1831        // original with it. As part of the "more canonical" determination,
1832        // respect a prior decision to use an IV chain.
1833        if (OrigPhiRef->getType() == Phi->getType() &&
1834            !(ChainedPhis.count(Phi) ||
1835              isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) &&
1836            (ChainedPhis.count(Phi) ||
1837             isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1838          std::swap(OrigPhiRef, Phi);
1839          std::swap(OrigInc, IsomorphicInc);
1840        }
1841        // Replacing the congruent phi is sufficient because acyclic
1842        // redundancy elimination, CSE/GVN, should handle the
1843        // rest. However, once SCEV proves that a phi is congruent,
1844        // it's often the head of an IV user cycle that is isomorphic
1845        // with the original phi. It's worth eagerly cleaning up the
1846        // common case of a single IV increment so that DeleteDeadPHIs
1847        // can remove cycles that had postinc uses.
1848        const SCEV *TruncExpr =
1849            SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
1850        if (OrigInc != IsomorphicInc &&
1851            TruncExpr == SE.getSCEV(IsomorphicInc) &&
1852            SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc) &&
1853            hoistIVInc(OrigInc, IsomorphicInc)) {
1854          DEBUG_WITH_TYPE(DebugType,
1855                          dbgs() << "INDVARS: Eliminated congruent iv.inc: "
1856                                 << *IsomorphicInc << '\n');
1857          Value *NewInc = OrigInc;
1858          if (OrigInc->getType() != IsomorphicInc->getType()) {
1859            Instruction *IP = nullptr;
1860            if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
1861              IP = &*PN->getParent()->getFirstInsertionPt();
1862            else
1863              IP = OrigInc->getNextNode();
1864
1865            IRBuilder<> Builder(IP);
1866            Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
1867            NewInc = Builder.CreateTruncOrBitCast(
1868                OrigInc, IsomorphicInc->getType(), IVName);
1869          }
1870          IsomorphicInc->replaceAllUsesWith(NewInc);
1871          DeadInsts.emplace_back(IsomorphicInc);
1872        }
1873      }
1874    }
1875    DEBUG_WITH_TYPE(DebugType, dbgs() << "INDVARS: Eliminated congruent iv: "
1876                                      << *Phi << '\n');
1877    ++NumElim;
1878    Value *NewIV = OrigPhiRef;
1879    if (OrigPhiRef->getType() != Phi->getType()) {
1880      IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt());
1881      Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1882      NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1883    }
1884    Phi->replaceAllUsesWith(NewIV);
1885    DeadInsts.emplace_back(Phi);
1886  }
1887  return NumElim;
1888}
1889
1890Value *SCEVExpander::findExistingExpansion(const SCEV *S,
1891                                           const Instruction *At, Loop *L) {
1892  using namespace llvm::PatternMatch;
1893
1894  SmallVector<BasicBlock *, 4> ExitingBlocks;
1895  L->getExitingBlocks(ExitingBlocks);
1896
1897  // Look for suitable value in simple conditions at the loop exits.
1898  for (BasicBlock *BB : ExitingBlocks) {
1899    ICmpInst::Predicate Pred;
1900    Instruction *LHS, *RHS;
1901    BasicBlock *TrueBB, *FalseBB;
1902
1903    if (!match(BB->getTerminator(),
1904               m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)),
1905                    TrueBB, FalseBB)))
1906      continue;
1907
1908    if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
1909      return LHS;
1910
1911    if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
1912      return RHS;
1913  }
1914
1915  // Use expand's logic which is used for reusing a previous Value in
1916  // ExprValueMap.
1917  if (Value *Val = FindValueInExprValueMap(S, At))
1918    return Val;
1919
1920  // There is potential to make this significantly smarter, but this simple
1921  // heuristic already gets some interesting cases.
1922
1923  // Can not find suitable value.
1924  return nullptr;
1925}
1926
1927bool SCEVExpander::isHighCostExpansionHelper(
1928    const SCEV *S, Loop *L, const Instruction *At,
1929    SmallPtrSetImpl<const SCEV *> &Processed) {
1930
1931  // If we can find an existing value for this scev avaliable at the point "At"
1932  // then consider the expression cheap.
1933  if (At && findExistingExpansion(S, At, L) != nullptr)
1934    return false;
1935
1936  // Zero/One operand expressions
1937  switch (S->getSCEVType()) {
1938  case scUnknown:
1939  case scConstant:
1940    return false;
1941  case scTruncate:
1942    return isHighCostExpansionHelper(cast<SCEVTruncateExpr>(S)->getOperand(),
1943                                     L, At, Processed);
1944  case scZeroExtend:
1945    return isHighCostExpansionHelper(cast<SCEVZeroExtendExpr>(S)->getOperand(),
1946                                     L, At, Processed);
1947  case scSignExtend:
1948    return isHighCostExpansionHelper(cast<SCEVSignExtendExpr>(S)->getOperand(),
1949                                     L, At, Processed);
1950  }
1951
1952  if (!Processed.insert(S).second)
1953    return false;
1954
1955  if (auto *UDivExpr = dyn_cast<SCEVUDivExpr>(S)) {
1956    // If the divisor is a power of two and the SCEV type fits in a native
1957    // integer, consider the division cheap irrespective of whether it occurs in
1958    // the user code since it can be lowered into a right shift.
1959    if (auto *SC = dyn_cast<SCEVConstant>(UDivExpr->getRHS()))
1960      if (SC->getAPInt().isPowerOf2()) {
1961        const DataLayout &DL =
1962            L->getHeader()->getParent()->getParent()->getDataLayout();
1963        unsigned Width = cast<IntegerType>(UDivExpr->getType())->getBitWidth();
1964        return DL.isIllegalInteger(Width);
1965      }
1966
1967    // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
1968    // HowManyLessThans produced to compute a precise expression, rather than a
1969    // UDiv from the user's code. If we can't find a UDiv in the code with some
1970    // simple searching, assume the former consider UDivExpr expensive to
1971    // compute.
1972    BasicBlock *ExitingBB = L->getExitingBlock();
1973    if (!ExitingBB)
1974      return true;
1975
1976    // At the beginning of this function we already tried to find existing value
1977    // for plain 'S'. Now try to lookup 'S + 1' since it is common pattern
1978    // involving division. This is just a simple search heuristic.
1979    if (!At)
1980      At = &ExitingBB->back();
1981    if (!findExistingExpansion(
1982            SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), At, L))
1983      return true;
1984  }
1985
1986  // HowManyLessThans uses a Max expression whenever the loop is not guarded by
1987  // the exit condition.
1988  if (isa<SCEVSMaxExpr>(S) || isa<SCEVUMaxExpr>(S))
1989    return true;
1990
1991  // Recurse past nary expressions, which commonly occur in the
1992  // BackedgeTakenCount. They may already exist in program code, and if not,
1993  // they are not too expensive rematerialize.
1994  if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(S)) {
1995    for (auto *Op : NAry->operands())
1996      if (isHighCostExpansionHelper(Op, L, At, Processed))
1997        return true;
1998  }
1999
2000  // If we haven't recognized an expensive SCEV pattern, assume it's an
2001  // expression produced by program code.
2002  return false;
2003}
2004
2005Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred,
2006                                            Instruction *IP) {
2007  assert(IP);
2008  switch (Pred->getKind()) {
2009  case SCEVPredicate::P_Union:
2010    return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
2011  case SCEVPredicate::P_Equal:
2012    return expandEqualPredicate(cast<SCEVEqualPredicate>(Pred), IP);
2013  case SCEVPredicate::P_Wrap: {
2014    auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2015    return expandWrapPredicate(AddRecPred, IP);
2016  }
2017  }
2018  llvm_unreachable("Unknown SCEV predicate type");
2019}
2020
2021Value *SCEVExpander::expandEqualPredicate(const SCEVEqualPredicate *Pred,
2022                                          Instruction *IP) {
2023  Value *Expr0 = expandCodeFor(Pred->getLHS(), Pred->getLHS()->getType(), IP);
2024  Value *Expr1 = expandCodeFor(Pred->getRHS(), Pred->getRHS()->getType(), IP);
2025
2026  Builder.SetInsertPoint(IP);
2027  auto *I = Builder.CreateICmpNE(Expr0, Expr1, "ident.check");
2028  return I;
2029}
2030
2031Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
2032                                           Instruction *Loc, bool Signed) {
2033  assert(AR->isAffine() && "Cannot generate RT check for "
2034                           "non-affine expression");
2035
2036  SCEVUnionPredicate Pred;
2037  const SCEV *ExitCount =
2038      SE.getPredicatedBackedgeTakenCount(AR->getLoop(), Pred);
2039
2040  assert(ExitCount != SE.getCouldNotCompute() && "Invalid loop count");
2041
2042  const SCEV *Step = AR->getStepRecurrence(SE);
2043  const SCEV *Start = AR->getStart();
2044
2045  unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType());
2046  unsigned DstBits = SE.getTypeSizeInBits(AR->getType());
2047
2048  // The expression {Start,+,Step} has nusw/nssw if
2049  //   Step < 0, Start - |Step| * Backedge <= Start
2050  //   Step >= 0, Start + |Step| * Backedge > Start
2051  // and |Step| * Backedge doesn't unsigned overflow.
2052
2053  IntegerType *CountTy = IntegerType::get(Loc->getContext(), SrcBits);
2054  Builder.SetInsertPoint(Loc);
2055  Value *TripCountVal = expandCodeFor(ExitCount, CountTy, Loc);
2056
2057  IntegerType *Ty =
2058      IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(AR->getType()));
2059
2060  Value *StepValue = expandCodeFor(Step, Ty, Loc);
2061  Value *NegStepValue = expandCodeFor(SE.getNegativeSCEV(Step), Ty, Loc);
2062  Value *StartValue = expandCodeFor(Start, Ty, Loc);
2063
2064  ConstantInt *Zero =
2065      ConstantInt::get(Loc->getContext(), APInt::getNullValue(DstBits));
2066
2067  Builder.SetInsertPoint(Loc);
2068  // Compute |Step|
2069  Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero);
2070  Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2071
2072  // Get the backedge taken count and truncate or extended to the AR type.
2073  Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2074  auto *MulF = Intrinsic::getDeclaration(Loc->getModule(),
2075                                         Intrinsic::umul_with_overflow, Ty);
2076
2077  // Compute |Step| * Backedge
2078  CallInst *Mul = Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul");
2079  Value *MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
2080  Value *OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
2081
2082  // Compute:
2083  //   Start + |Step| * Backedge < Start
2084  //   Start - |Step| * Backedge > Start
2085  Value *Add = Builder.CreateAdd(StartValue, MulV);
2086  Value *Sub = Builder.CreateSub(StartValue, MulV);
2087
2088  Value *EndCompareGT = Builder.CreateICmp(
2089      Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue);
2090
2091  Value *EndCompareLT = Builder.CreateICmp(
2092      Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue);
2093
2094  // Select the answer based on the sign of Step.
2095  Value *EndCheck =
2096      Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2097
2098  // If the backedge taken count type is larger than the AR type,
2099  // check that we don't drop any bits by truncating it. If we are
2100  // droping bits, then we have overflow (unless the step is zero).
2101  if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) {
2102    auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits);
2103    auto *BackedgeCheck =
2104        Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal,
2105                           ConstantInt::get(Loc->getContext(), MaxVal));
2106    BackedgeCheck = Builder.CreateAnd(
2107        BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero));
2108
2109    EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2110  }
2111
2112  EndCheck = Builder.CreateOr(EndCheck, OfMul);
2113  return EndCheck;
2114}
2115
2116Value *SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate *Pred,
2117                                         Instruction *IP) {
2118  const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr());
2119  Value *NSSWCheck = nullptr, *NUSWCheck = nullptr;
2120
2121  // Add a check for NUSW
2122  if (Pred->getFlags() & SCEVWrapPredicate::IncrementNUSW)
2123    NUSWCheck = generateOverflowCheck(A, IP, false);
2124
2125  // Add a check for NSSW
2126  if (Pred->getFlags() & SCEVWrapPredicate::IncrementNSSW)
2127    NSSWCheck = generateOverflowCheck(A, IP, true);
2128
2129  if (NUSWCheck && NSSWCheck)
2130    return Builder.CreateOr(NUSWCheck, NSSWCheck);
2131
2132  if (NUSWCheck)
2133    return NUSWCheck;
2134
2135  if (NSSWCheck)
2136    return NSSWCheck;
2137
2138  return ConstantInt::getFalse(IP->getContext());
2139}
2140
2141Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union,
2142                                          Instruction *IP) {
2143  auto *BoolType = IntegerType::get(IP->getContext(), 1);
2144  Value *Check = ConstantInt::getNullValue(BoolType);
2145
2146  // Loop over all checks in this set.
2147  for (auto Pred : Union->getPredicates()) {
2148    auto *NextCheck = expandCodeForPredicate(Pred, IP);
2149    Builder.SetInsertPoint(IP);
2150    Check = Builder.CreateOr(Check, NextCheck);
2151  }
2152
2153  return Check;
2154}
2155
2156namespace {
2157// Search for a SCEV subexpression that is not safe to expand.  Any expression
2158// that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
2159// UDiv expressions. We don't know if the UDiv is derived from an IR divide
2160// instruction, but the important thing is that we prove the denominator is
2161// nonzero before expansion.
2162//
2163// IVUsers already checks that IV-derived expressions are safe. So this check is
2164// only needed when the expression includes some subexpression that is not IV
2165// derived.
2166//
2167// Currently, we only allow division by a nonzero constant here. If this is
2168// inadequate, we could easily allow division by SCEVUnknown by using
2169// ValueTracking to check isKnownNonZero().
2170//
2171// We cannot generally expand recurrences unless the step dominates the loop
2172// header. The expander handles the special case of affine recurrences by
2173// scaling the recurrence outside the loop, but this technique isn't generally
2174// applicable. Expanding a nested recurrence outside a loop requires computing
2175// binomial coefficients. This could be done, but the recurrence has to be in a
2176// perfectly reduced form, which can't be guaranteed.
2177struct SCEVFindUnsafe {
2178  ScalarEvolution &SE;
2179  bool IsUnsafe;
2180
2181  SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {}
2182
2183  bool follow(const SCEV *S) {
2184    if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
2185      const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
2186      if (!SC || SC->getValue()->isZero()) {
2187        IsUnsafe = true;
2188        return false;
2189      }
2190    }
2191    if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2192      const SCEV *Step = AR->getStepRecurrence(SE);
2193      if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
2194        IsUnsafe = true;
2195        return false;
2196      }
2197    }
2198    return true;
2199  }
2200  bool isDone() const { return IsUnsafe; }
2201};
2202}
2203
2204namespace llvm {
2205bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
2206  SCEVFindUnsafe Search(SE);
2207  visitAll(S, Search);
2208  return !Search.IsUnsafe;
2209}
2210}
2211