1193323Sed//===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
2193323Sed//
3193323Sed//                     The LLVM Compiler Infrastructure
4193323Sed//
5193323Sed// This file is distributed under the University of Illinois Open Source
6193323Sed// License. See LICENSE.TXT for details.
7193323Sed//
8193323Sed//===----------------------------------------------------------------------===//
9193323Sed//
10193323Sed// This file contains the implementation of the scalar evolution analysis
11193323Sed// engine, which is used primarily to analyze expressions involving induction
12193323Sed// variables in loops.
13193323Sed//
14193323Sed// There are several aspects to this library.  First is the representation of
15193323Sed// scalar expressions, which are represented as subclasses of the SCEV class.
16193323Sed// These classes are used to represent certain types of subexpressions that we
17198090Srdivacky// can handle. We only create one SCEV of a particular shape, so
18198090Srdivacky// pointer-comparisons for equality are legal.
19193323Sed//
20193323Sed// One important aspect of the SCEV objects is that they are never cyclic, even
21193323Sed// if there is a cycle in the dataflow for an expression (ie, a PHI node).  If
22193323Sed// the PHI node is one of the idioms that we can represent (e.g., a polynomial
23193323Sed// recurrence) then we represent it directly as a recurrence node, otherwise we
24193323Sed// represent it as a SCEVUnknown node.
25193323Sed//
26193323Sed// In addition to being able to represent expressions of various types, we also
27193323Sed// have folders that are used to build the *canonical* representation for a
28193323Sed// particular expression.  These folders are capable of using a variety of
29193323Sed// rewrite rules to simplify the expressions.
30193323Sed//
31193323Sed// Once the folders are defined, we can implement the more interesting
32193323Sed// higher-level code, such as the code that recognizes PHI nodes of various
33193323Sed// types, computes the execution count of a loop, etc.
34193323Sed//
35193323Sed// TODO: We should use these routines and value representations to implement
36193323Sed// dependence analysis!
37193323Sed//
38193323Sed//===----------------------------------------------------------------------===//
39193323Sed//
40193323Sed// There are several good references for the techniques used in this analysis.
41193323Sed//
42193323Sed//  Chains of recurrences -- a method to expedite the evaluation
43193323Sed//  of closed-form functions
44193323Sed//  Olaf Bachmann, Paul S. Wang, Eugene V. Zima
45193323Sed//
46193323Sed//  On computational properties of chains of recurrences
47193323Sed//  Eugene V. Zima
48193323Sed//
49193323Sed//  Symbolic Evaluation of Chains of Recurrences for Loop Optimization
50193323Sed//  Robert A. van Engelen
51193323Sed//
52193323Sed//  Efficient Symbolic Analysis for Optimizing Compilers
53193323Sed//  Robert A. van Engelen
54193323Sed//
55193323Sed//  Using the chains of recurrences algebra for data dependence testing and
56193323Sed//  induction variable substitution
57193323Sed//  MS Thesis, Johnie Birch
58193323Sed//
59193323Sed//===----------------------------------------------------------------------===//
60193323Sed
61193323Sed#define DEBUG_TYPE "scalar-evolution"
62249423Sdim#include "llvm/Analysis/ScalarEvolution.h"
63249423Sdim#include "llvm/ADT/STLExtras.h"
64249423Sdim#include "llvm/ADT/SmallPtrSet.h"
65249423Sdim#include "llvm/ADT/Statistic.h"
66193323Sed#include "llvm/Analysis/ConstantFolding.h"
67193323Sed#include "llvm/Analysis/Dominators.h"
68218893Sdim#include "llvm/Analysis/InstructionSimplify.h"
69193323Sed#include "llvm/Analysis/LoopInfo.h"
70249423Sdim#include "llvm/Analysis/ScalarEvolutionExpressions.h"
71194612Sed#include "llvm/Analysis/ValueTracking.h"
72193323Sed#include "llvm/Assembly/Writer.h"
73249423Sdim#include "llvm/IR/Constants.h"
74249423Sdim#include "llvm/IR/DataLayout.h"
75249423Sdim#include "llvm/IR/DerivedTypes.h"
76249423Sdim#include "llvm/IR/GlobalAlias.h"
77249423Sdim#include "llvm/IR/GlobalVariable.h"
78249423Sdim#include "llvm/IR/Instructions.h"
79249423Sdim#include "llvm/IR/LLVMContext.h"
80249423Sdim#include "llvm/IR/Operator.h"
81193323Sed#include "llvm/Support/CommandLine.h"
82193323Sed#include "llvm/Support/ConstantRange.h"
83201360Srdivacky#include "llvm/Support/Debug.h"
84198090Srdivacky#include "llvm/Support/ErrorHandling.h"
85193323Sed#include "llvm/Support/GetElementPtrTypeIterator.h"
86193323Sed#include "llvm/Support/InstIterator.h"
87193323Sed#include "llvm/Support/MathExtras.h"
88193323Sed#include "llvm/Support/raw_ostream.h"
89249423Sdim#include "llvm/Target/TargetLibraryInfo.h"
90193323Sed#include <algorithm>
91193323Sedusing namespace llvm;
92193323Sed
93193323SedSTATISTIC(NumArrayLenItCounts,
94193323Sed          "Number of trip counts computed with array length");
95193323SedSTATISTIC(NumTripCountsComputed,
96193323Sed          "Number of loops with predictable loop counts");
97193323SedSTATISTIC(NumTripCountsNotComputed,
98193323Sed          "Number of loops without predictable loop counts");
99193323SedSTATISTIC(NumBruteForceTripCountsComputed,
100193323Sed          "Number of loops with trip counts computed by force");
101193323Sed
102193323Sedstatic cl::opt<unsigned>
103193323SedMaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
104193323Sed                        cl::desc("Maximum number of iterations SCEV will "
105195098Sed                                 "symbolically execute a constant "
106195098Sed                                 "derived loop"),
107193323Sed                        cl::init(100));
108193323Sed
109243830Sdim// FIXME: Enable this with XDEBUG when the test suite is clean.
110243830Sdimstatic cl::opt<bool>
111243830SdimVerifySCEV("verify-scev",
112243830Sdim           cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
113243830Sdim
114218893SdimINITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
115218893Sdim                "Scalar Evolution Analysis", false, true)
116218893SdimINITIALIZE_PASS_DEPENDENCY(LoopInfo)
117218893SdimINITIALIZE_PASS_DEPENDENCY(DominatorTree)
118234353SdimINITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
119218893SdimINITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
120218893Sdim                "Scalar Evolution Analysis", false, true)
121193323Sedchar ScalarEvolution::ID = 0;
122193323Sed
123193323Sed//===----------------------------------------------------------------------===//
124193323Sed//                           SCEV class definitions
125193323Sed//===----------------------------------------------------------------------===//
126193323Sed
127193323Sed//===----------------------------------------------------------------------===//
128193323Sed// Implementation of the SCEV class.
129193323Sed//
130195340Sed
131243830Sdim#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
132193323Sedvoid SCEV::dump() const {
133201360Srdivacky  print(dbgs());
134201360Srdivacky  dbgs() << '\n';
135193323Sed}
136243830Sdim#endif
137193323Sed
138218893Sdimvoid SCEV::print(raw_ostream &OS) const {
139218893Sdim  switch (getSCEVType()) {
140218893Sdim  case scConstant:
141218893Sdim    WriteAsOperand(OS, cast<SCEVConstant>(this)->getValue(), false);
142218893Sdim    return;
143218893Sdim  case scTruncate: {
144218893Sdim    const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
145218893Sdim    const SCEV *Op = Trunc->getOperand();
146218893Sdim    OS << "(trunc " << *Op->getType() << " " << *Op << " to "
147218893Sdim       << *Trunc->getType() << ")";
148218893Sdim    return;
149218893Sdim  }
150218893Sdim  case scZeroExtend: {
151218893Sdim    const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
152218893Sdim    const SCEV *Op = ZExt->getOperand();
153218893Sdim    OS << "(zext " << *Op->getType() << " " << *Op << " to "
154218893Sdim       << *ZExt->getType() << ")";
155218893Sdim    return;
156218893Sdim  }
157218893Sdim  case scSignExtend: {
158218893Sdim    const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
159218893Sdim    const SCEV *Op = SExt->getOperand();
160218893Sdim    OS << "(sext " << *Op->getType() << " " << *Op << " to "
161218893Sdim       << *SExt->getType() << ")";
162218893Sdim    return;
163218893Sdim  }
164218893Sdim  case scAddRecExpr: {
165218893Sdim    const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
166218893Sdim    OS << "{" << *AR->getOperand(0);
167218893Sdim    for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
168218893Sdim      OS << ",+," << *AR->getOperand(i);
169218893Sdim    OS << "}<";
170221345Sdim    if (AR->getNoWrapFlags(FlagNUW))
171218893Sdim      OS << "nuw><";
172221345Sdim    if (AR->getNoWrapFlags(FlagNSW))
173218893Sdim      OS << "nsw><";
174221345Sdim    if (AR->getNoWrapFlags(FlagNW) &&
175221345Sdim        !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
176221345Sdim      OS << "nw><";
177218893Sdim    WriteAsOperand(OS, AR->getLoop()->getHeader(), /*PrintType=*/false);
178218893Sdim    OS << ">";
179218893Sdim    return;
180218893Sdim  }
181218893Sdim  case scAddExpr:
182218893Sdim  case scMulExpr:
183218893Sdim  case scUMaxExpr:
184218893Sdim  case scSMaxExpr: {
185218893Sdim    const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
186218893Sdim    const char *OpStr = 0;
187218893Sdim    switch (NAry->getSCEVType()) {
188218893Sdim    case scAddExpr: OpStr = " + "; break;
189218893Sdim    case scMulExpr: OpStr = " * "; break;
190218893Sdim    case scUMaxExpr: OpStr = " umax "; break;
191218893Sdim    case scSMaxExpr: OpStr = " smax "; break;
192218893Sdim    }
193218893Sdim    OS << "(";
194218893Sdim    for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
195218893Sdim         I != E; ++I) {
196218893Sdim      OS << **I;
197218893Sdim      if (llvm::next(I) != E)
198218893Sdim        OS << OpStr;
199218893Sdim    }
200218893Sdim    OS << ")";
201234353Sdim    switch (NAry->getSCEVType()) {
202234353Sdim    case scAddExpr:
203234353Sdim    case scMulExpr:
204234353Sdim      if (NAry->getNoWrapFlags(FlagNUW))
205234353Sdim        OS << "<nuw>";
206234353Sdim      if (NAry->getNoWrapFlags(FlagNSW))
207234353Sdim        OS << "<nsw>";
208234353Sdim    }
209218893Sdim    return;
210218893Sdim  }
211218893Sdim  case scUDivExpr: {
212218893Sdim    const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
213218893Sdim    OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
214218893Sdim    return;
215218893Sdim  }
216218893Sdim  case scUnknown: {
217218893Sdim    const SCEVUnknown *U = cast<SCEVUnknown>(this);
218226633Sdim    Type *AllocTy;
219218893Sdim    if (U->isSizeOf(AllocTy)) {
220218893Sdim      OS << "sizeof(" << *AllocTy << ")";
221218893Sdim      return;
222218893Sdim    }
223218893Sdim    if (U->isAlignOf(AllocTy)) {
224218893Sdim      OS << "alignof(" << *AllocTy << ")";
225218893Sdim      return;
226218893Sdim    }
227221345Sdim
228226633Sdim    Type *CTy;
229218893Sdim    Constant *FieldNo;
230218893Sdim    if (U->isOffsetOf(CTy, FieldNo)) {
231218893Sdim      OS << "offsetof(" << *CTy << ", ";
232218893Sdim      WriteAsOperand(OS, FieldNo, false);
233218893Sdim      OS << ")";
234218893Sdim      return;
235218893Sdim    }
236221345Sdim
237218893Sdim    // Otherwise just print it normally.
238218893Sdim    WriteAsOperand(OS, U->getValue(), false);
239218893Sdim    return;
240218893Sdim  }
241218893Sdim  case scCouldNotCompute:
242218893Sdim    OS << "***COULDNOTCOMPUTE***";
243218893Sdim    return;
244218893Sdim  default: break;
245218893Sdim  }
246218893Sdim  llvm_unreachable("Unknown SCEV kind!");
247218893Sdim}
248218893Sdim
249226633SdimType *SCEV::getType() const {
250218893Sdim  switch (getSCEVType()) {
251218893Sdim  case scConstant:
252218893Sdim    return cast<SCEVConstant>(this)->getType();
253218893Sdim  case scTruncate:
254218893Sdim  case scZeroExtend:
255218893Sdim  case scSignExtend:
256218893Sdim    return cast<SCEVCastExpr>(this)->getType();
257218893Sdim  case scAddRecExpr:
258218893Sdim  case scMulExpr:
259218893Sdim  case scUMaxExpr:
260218893Sdim  case scSMaxExpr:
261218893Sdim    return cast<SCEVNAryExpr>(this)->getType();
262218893Sdim  case scAddExpr:
263218893Sdim    return cast<SCEVAddExpr>(this)->getType();
264218893Sdim  case scUDivExpr:
265218893Sdim    return cast<SCEVUDivExpr>(this)->getType();
266218893Sdim  case scUnknown:
267218893Sdim    return cast<SCEVUnknown>(this)->getType();
268218893Sdim  case scCouldNotCompute:
269218893Sdim    llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
270234353Sdim  default:
271234353Sdim    llvm_unreachable("Unknown SCEV kind!");
272218893Sdim  }
273218893Sdim}
274218893Sdim
275193323Sedbool SCEV::isZero() const {
276193323Sed  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
277193323Sed    return SC->getValue()->isZero();
278193323Sed  return false;
279193323Sed}
280193323Sed
281193323Sedbool SCEV::isOne() const {
282193323Sed  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
283193323Sed    return SC->getValue()->isOne();
284193323Sed  return false;
285193323Sed}
286193323Sed
287195098Sedbool SCEV::isAllOnesValue() const {
288195098Sed  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
289195098Sed    return SC->getValue()->isAllOnesValue();
290195098Sed  return false;
291195098Sed}
292195098Sed
293234353Sdim/// isNonConstantNegative - Return true if the specified scev is negated, but
294234353Sdim/// not a constant.
295234353Sdimbool SCEV::isNonConstantNegative() const {
296234353Sdim  const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
297234353Sdim  if (!Mul) return false;
298234353Sdim
299234353Sdim  // If there is a constant factor, it will be first.
300234353Sdim  const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
301234353Sdim  if (!SC) return false;
302234353Sdim
303234353Sdim  // Return true if the value is negative, this matches things like (-42 * V).
304234353Sdim  return SC->getValue()->getValue().isNegative();
305234353Sdim}
306234353Sdim
307194710SedSCEVCouldNotCompute::SCEVCouldNotCompute() :
308205407Srdivacky  SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
309193323Sed
310193323Sedbool SCEVCouldNotCompute::classof(const SCEV *S) {
311193323Sed  return S->getSCEVType() == scCouldNotCompute;
312193323Sed}
313193323Sed
314198090Srdivackyconst SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
315195340Sed  FoldingSetNodeID ID;
316195340Sed  ID.AddInteger(scConstant);
317195340Sed  ID.AddPointer(V);
318195340Sed  void *IP = 0;
319195340Sed  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
320205407Srdivacky  SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
321195340Sed  UniqueSCEVs.InsertNode(S, IP);
322195340Sed  return S;
323193323Sed}
324193323Sed
325198090Srdivackyconst SCEV *ScalarEvolution::getConstant(const APInt& Val) {
326198090Srdivacky  return getConstant(ConstantInt::get(getContext(), Val));
327193323Sed}
328193323Sed
329198090Srdivackyconst SCEV *
330226633SdimScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
331226633Sdim  IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
332207618Srdivacky  return getConstant(ConstantInt::get(ITy, V, isSigned));
333194612Sed}
334194612Sed
335205407SrdivackySCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
336226633Sdim                           unsigned SCEVTy, const SCEV *op, Type *ty)
337198090Srdivacky  : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
338193323Sed
339205407SrdivackySCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
340226633Sdim                                   const SCEV *op, Type *ty)
341198090Srdivacky  : SCEVCastExpr(ID, scTruncate, op, ty) {
342204642Srdivacky  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
343204642Srdivacky         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
344193323Sed         "Cannot truncate non-integer value!");
345193323Sed}
346193323Sed
347205407SrdivackySCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
348226633Sdim                                       const SCEV *op, Type *ty)
349198090Srdivacky  : SCEVCastExpr(ID, scZeroExtend, op, ty) {
350204642Srdivacky  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
351204642Srdivacky         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
352193323Sed         "Cannot zero extend non-integer value!");
353193323Sed}
354193323Sed
355205407SrdivackySCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
356226633Sdim                                       const SCEV *op, Type *ty)
357198090Srdivacky  : SCEVCastExpr(ID, scSignExtend, op, ty) {
358204642Srdivacky  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
359204642Srdivacky         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
360193323Sed         "Cannot sign extend non-integer value!");
361193323Sed}
362193323Sed
363212904Sdimvoid SCEVUnknown::deleted() {
364218893Sdim  // Clear this SCEVUnknown from various maps.
365218893Sdim  SE->forgetMemoizedResults(this);
366212904Sdim
367212904Sdim  // Remove this SCEVUnknown from the uniquing map.
368212904Sdim  SE->UniqueSCEVs.RemoveNode(this);
369212904Sdim
370212904Sdim  // Release the value.
371212904Sdim  setValPtr(0);
372212904Sdim}
373212904Sdim
374212904Sdimvoid SCEVUnknown::allUsesReplacedWith(Value *New) {
375218893Sdim  // Clear this SCEVUnknown from various maps.
376218893Sdim  SE->forgetMemoizedResults(this);
377212904Sdim
378212904Sdim  // Remove this SCEVUnknown from the uniquing map.
379212904Sdim  SE->UniqueSCEVs.RemoveNode(this);
380212904Sdim
381212904Sdim  // Update this SCEVUnknown to point to the new value. This is needed
382212904Sdim  // because there may still be outstanding SCEVs which still point to
383212904Sdim  // this SCEVUnknown.
384212904Sdim  setValPtr(New);
385212904Sdim}
386212904Sdim
387226633Sdimbool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
388212904Sdim  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
389203954Srdivacky    if (VCE->getOpcode() == Instruction::PtrToInt)
390203954Srdivacky      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
391203954Srdivacky        if (CE->getOpcode() == Instruction::GetElementPtr &&
392203954Srdivacky            CE->getOperand(0)->isNullValue() &&
393203954Srdivacky            CE->getNumOperands() == 2)
394203954Srdivacky          if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
395203954Srdivacky            if (CI->isOne()) {
396203954Srdivacky              AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
397203954Srdivacky                                 ->getElementType();
398203954Srdivacky              return true;
399203954Srdivacky            }
400203954Srdivacky
401203954Srdivacky  return false;
402203954Srdivacky}
403203954Srdivacky
404226633Sdimbool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
405212904Sdim  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
406203954Srdivacky    if (VCE->getOpcode() == Instruction::PtrToInt)
407203954Srdivacky      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
408203954Srdivacky        if (CE->getOpcode() == Instruction::GetElementPtr &&
409203954Srdivacky            CE->getOperand(0)->isNullValue()) {
410226633Sdim          Type *Ty =
411203954Srdivacky            cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
412226633Sdim          if (StructType *STy = dyn_cast<StructType>(Ty))
413203954Srdivacky            if (!STy->isPacked() &&
414203954Srdivacky                CE->getNumOperands() == 3 &&
415203954Srdivacky                CE->getOperand(1)->isNullValue()) {
416203954Srdivacky              if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
417203954Srdivacky                if (CI->isOne() &&
418203954Srdivacky                    STy->getNumElements() == 2 &&
419203954Srdivacky                    STy->getElementType(0)->isIntegerTy(1)) {
420203954Srdivacky                  AllocTy = STy->getElementType(1);
421203954Srdivacky                  return true;
422203954Srdivacky                }
423203954Srdivacky            }
424203954Srdivacky        }
425203954Srdivacky
426203954Srdivacky  return false;
427203954Srdivacky}
428203954Srdivacky
429226633Sdimbool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
430212904Sdim  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
431203954Srdivacky    if (VCE->getOpcode() == Instruction::PtrToInt)
432203954Srdivacky      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
433203954Srdivacky        if (CE->getOpcode() == Instruction::GetElementPtr &&
434203954Srdivacky            CE->getNumOperands() == 3 &&
435203954Srdivacky            CE->getOperand(0)->isNullValue() &&
436203954Srdivacky            CE->getOperand(1)->isNullValue()) {
437226633Sdim          Type *Ty =
438203954Srdivacky            cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
439203954Srdivacky          // Ignore vector types here so that ScalarEvolutionExpander doesn't
440203954Srdivacky          // emit getelementptrs that index into vectors.
441204642Srdivacky          if (Ty->isStructTy() || Ty->isArrayTy()) {
442203954Srdivacky            CTy = Ty;
443203954Srdivacky            FieldNo = CE->getOperand(2);
444203954Srdivacky            return true;
445203954Srdivacky          }
446203954Srdivacky        }
447203954Srdivacky
448203954Srdivacky  return false;
449203954Srdivacky}
450203954Srdivacky
451193323Sed//===----------------------------------------------------------------------===//
452193323Sed//                               SCEV Utilities
453193323Sed//===----------------------------------------------------------------------===//
454193323Sed
455193323Sednamespace {
456193323Sed  /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
457193323Sed  /// than the complexity of the RHS.  This comparator is used to canonicalize
458193323Sed  /// expressions.
459198892Srdivacky  class SCEVComplexityCompare {
460212904Sdim    const LoopInfo *const LI;
461193323Sed  public:
462212904Sdim    explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
463193323Sed
464212904Sdim    // Return true or false if LHS is less than, or at least RHS, respectively.
465193323Sed    bool operator()(const SCEV *LHS, const SCEV *RHS) const {
466212904Sdim      return compare(LHS, RHS) < 0;
467212904Sdim    }
468212904Sdim
469212904Sdim    // Return negative, zero, or positive, if LHS is less than, equal to, or
470212904Sdim    // greater than RHS, respectively. A three-way result allows recursive
471212904Sdim    // comparisons to be more efficient.
472212904Sdim    int compare(const SCEV *LHS, const SCEV *RHS) const {
473198090Srdivacky      // Fast-path: SCEVs are uniqued so we can do a quick equality check.
474198090Srdivacky      if (LHS == RHS)
475212904Sdim        return 0;
476198090Srdivacky
477193323Sed      // Primarily, sort the SCEVs by their getSCEVType().
478212904Sdim      unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
479212904Sdim      if (LType != RType)
480212904Sdim        return (int)LType - (int)RType;
481193323Sed
482193323Sed      // Aside from the getSCEVType() ordering, the particular ordering
483193323Sed      // isn't very important except that it's beneficial to be consistent,
484193323Sed      // so that (a + b) and (b + a) don't end up as different expressions.
485212904Sdim      switch (LType) {
486212904Sdim      case scUnknown: {
487212904Sdim        const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
488193323Sed        const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
489193323Sed
490212904Sdim        // Sort SCEVUnknown values with some loose heuristics. TODO: This is
491212904Sdim        // not as complete as it could be.
492212904Sdim        const Value *LV = LU->getValue(), *RV = RU->getValue();
493212904Sdim
494193323Sed        // Order pointer values after integer values. This helps SCEVExpander
495193323Sed        // form GEPs.
496212904Sdim        bool LIsPointer = LV->getType()->isPointerTy(),
497212904Sdim             RIsPointer = RV->getType()->isPointerTy();
498212904Sdim        if (LIsPointer != RIsPointer)
499212904Sdim          return (int)LIsPointer - (int)RIsPointer;
500193323Sed
501193323Sed        // Compare getValueID values.
502212904Sdim        unsigned LID = LV->getValueID(),
503212904Sdim                 RID = RV->getValueID();
504212904Sdim        if (LID != RID)
505212904Sdim          return (int)LID - (int)RID;
506193323Sed
507193323Sed        // Sort arguments by their position.
508212904Sdim        if (const Argument *LA = dyn_cast<Argument>(LV)) {
509212904Sdim          const Argument *RA = cast<Argument>(RV);
510212904Sdim          unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
511212904Sdim          return (int)LArgNo - (int)RArgNo;
512193323Sed        }
513193323Sed
514212904Sdim        // For instructions, compare their loop depth, and their operand
515212904Sdim        // count.  This is pretty loose.
516212904Sdim        if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
517212904Sdim          const Instruction *RInst = cast<Instruction>(RV);
518193323Sed
519193323Sed          // Compare loop depths.
520212904Sdim          const BasicBlock *LParent = LInst->getParent(),
521212904Sdim                           *RParent = RInst->getParent();
522212904Sdim          if (LParent != RParent) {
523212904Sdim            unsigned LDepth = LI->getLoopDepth(LParent),
524212904Sdim                     RDepth = LI->getLoopDepth(RParent);
525212904Sdim            if (LDepth != RDepth)
526212904Sdim              return (int)LDepth - (int)RDepth;
527212904Sdim          }
528193323Sed
529193323Sed          // Compare the number of operands.
530212904Sdim          unsigned LNumOps = LInst->getNumOperands(),
531212904Sdim                   RNumOps = RInst->getNumOperands();
532212904Sdim          return (int)LNumOps - (int)RNumOps;
533193323Sed        }
534193323Sed
535212904Sdim        return 0;
536193323Sed      }
537193323Sed
538212904Sdim      case scConstant: {
539212904Sdim        const SCEVConstant *LC = cast<SCEVConstant>(LHS);
540194612Sed        const SCEVConstant *RC = cast<SCEVConstant>(RHS);
541212904Sdim
542212904Sdim        // Compare constant values.
543212904Sdim        const APInt &LA = LC->getValue()->getValue();
544212904Sdim        const APInt &RA = RC->getValue()->getValue();
545212904Sdim        unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
546212904Sdim        if (LBitWidth != RBitWidth)
547212904Sdim          return (int)LBitWidth - (int)RBitWidth;
548212904Sdim        return LA.ult(RA) ? -1 : 1;
549194612Sed      }
550193323Sed
551212904Sdim      case scAddRecExpr: {
552212904Sdim        const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
553194612Sed        const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
554212904Sdim
555212904Sdim        // Compare addrec loop depths.
556212904Sdim        const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
557212904Sdim        if (LLoop != RLoop) {
558212904Sdim          unsigned LDepth = LLoop->getLoopDepth(),
559212904Sdim                   RDepth = RLoop->getLoopDepth();
560212904Sdim          if (LDepth != RDepth)
561212904Sdim            return (int)LDepth - (int)RDepth;
562212904Sdim        }
563212904Sdim
564212904Sdim        // Addrec complexity grows with operand count.
565212904Sdim        unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
566212904Sdim        if (LNumOps != RNumOps)
567212904Sdim          return (int)LNumOps - (int)RNumOps;
568212904Sdim
569212904Sdim        // Lexicographically compare.
570212904Sdim        for (unsigned i = 0; i != LNumOps; ++i) {
571212904Sdim          long X = compare(LA->getOperand(i), RA->getOperand(i));
572212904Sdim          if (X != 0)
573212904Sdim            return X;
574212904Sdim        }
575212904Sdim
576212904Sdim        return 0;
577194612Sed      }
578194612Sed
579212904Sdim      case scAddExpr:
580212904Sdim      case scMulExpr:
581212904Sdim      case scSMaxExpr:
582212904Sdim      case scUMaxExpr: {
583212904Sdim        const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
584193323Sed        const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
585212904Sdim
586212904Sdim        // Lexicographically compare n-ary expressions.
587212904Sdim        unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
588263508Sdim        if (LNumOps != RNumOps)
589263508Sdim          return (int)LNumOps - (int)RNumOps;
590263508Sdim
591212904Sdim        for (unsigned i = 0; i != LNumOps; ++i) {
592212904Sdim          if (i >= RNumOps)
593212904Sdim            return 1;
594212904Sdim          long X = compare(LC->getOperand(i), RC->getOperand(i));
595212904Sdim          if (X != 0)
596212904Sdim            return X;
597193323Sed        }
598212904Sdim        return (int)LNumOps - (int)RNumOps;
599193323Sed      }
600193323Sed
601212904Sdim      case scUDivExpr: {
602212904Sdim        const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
603193323Sed        const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
604212904Sdim
605212904Sdim        // Lexicographically compare udiv expressions.
606212904Sdim        long X = compare(LC->getLHS(), RC->getLHS());
607212904Sdim        if (X != 0)
608212904Sdim          return X;
609212904Sdim        return compare(LC->getRHS(), RC->getRHS());
610193323Sed      }
611193323Sed
612212904Sdim      case scTruncate:
613212904Sdim      case scZeroExtend:
614212904Sdim      case scSignExtend: {
615212904Sdim        const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
616193323Sed        const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
617212904Sdim
618212904Sdim        // Compare cast expressions by operand.
619212904Sdim        return compare(LC->getOperand(), RC->getOperand());
620193323Sed      }
621193323Sed
622212904Sdim      default:
623234353Sdim        llvm_unreachable("Unknown SCEV kind!");
624212904Sdim      }
625193323Sed    }
626193323Sed  };
627193323Sed}
628193323Sed
629193323Sed/// GroupByComplexity - Given a list of SCEV objects, order them by their
630193323Sed/// complexity, and group objects of the same complexity together by value.
631193323Sed/// When this routine is finished, we know that any duplicates in the vector are
632193323Sed/// consecutive and that complexity is monotonically increasing.
633193323Sed///
634204642Srdivacky/// Note that we go take special precautions to ensure that we get deterministic
635193323Sed/// results from this routine.  In other words, we don't want the results of
636193323Sed/// this to depend on where the addresses of various SCEV objects happened to
637193323Sed/// land in memory.
638193323Sed///
639198090Srdivackystatic void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
640193323Sed                              LoopInfo *LI) {
641193323Sed  if (Ops.size() < 2) return;  // Noop
642193323Sed  if (Ops.size() == 2) {
643193323Sed    // This is the common case, which also happens to be trivially simple.
644193323Sed    // Special case it.
645212904Sdim    const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
646212904Sdim    if (SCEVComplexityCompare(LI)(RHS, LHS))
647212904Sdim      std::swap(LHS, RHS);
648193323Sed    return;
649193323Sed  }
650193323Sed
651193323Sed  // Do the rough sort by complexity.
652193323Sed  std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
653193323Sed
654193323Sed  // Now that we are sorted by complexity, group elements of the same
655193323Sed  // complexity.  Note that this is, at worst, N^2, but the vector is likely to
656193323Sed  // be extremely short in practice.  Note that we take this approach because we
657193323Sed  // do not want to depend on the addresses of the objects we are grouping.
658193323Sed  for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
659193323Sed    const SCEV *S = Ops[i];
660193323Sed    unsigned Complexity = S->getSCEVType();
661193323Sed
662193323Sed    // If there are any objects of the same complexity and same value as this
663193323Sed    // one, group them.
664193323Sed    for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
665193323Sed      if (Ops[j] == S) { // Found a duplicate.
666193323Sed        // Move it to immediately after i'th element.
667193323Sed        std::swap(Ops[i+1], Ops[j]);
668193323Sed        ++i;   // no need to rescan it.
669193323Sed        if (i == e-2) return;  // Done!
670193323Sed      }
671193323Sed    }
672193323Sed  }
673193323Sed}
674193323Sed
675193323Sed
676193323Sed
677193323Sed//===----------------------------------------------------------------------===//
678193323Sed//                      Simple SCEV method implementations
679193323Sed//===----------------------------------------------------------------------===//
680193323Sed
681193323Sed/// BinomialCoefficient - Compute BC(It, K).  The result has width W.
682193323Sed/// Assume, K > 0.
683198090Srdivackystatic const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
684198090Srdivacky                                       ScalarEvolution &SE,
685226633Sdim                                       Type *ResultTy) {
686193323Sed  // Handle the simplest case efficiently.
687193323Sed  if (K == 1)
688193323Sed    return SE.getTruncateOrZeroExtend(It, ResultTy);
689193323Sed
690193323Sed  // We are using the following formula for BC(It, K):
691193323Sed  //
692193323Sed  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
693193323Sed  //
694193323Sed  // Suppose, W is the bitwidth of the return value.  We must be prepared for
695193323Sed  // overflow.  Hence, we must assure that the result of our computation is
696193323Sed  // equal to the accurate one modulo 2^W.  Unfortunately, division isn't
697193323Sed  // safe in modular arithmetic.
698193323Sed  //
699193323Sed  // However, this code doesn't use exactly that formula; the formula it uses
700195098Sed  // is something like the following, where T is the number of factors of 2 in
701193323Sed  // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
702193323Sed  // exponentiation:
703193323Sed  //
704193323Sed  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
705193323Sed  //
706193323Sed  // This formula is trivially equivalent to the previous formula.  However,
707193323Sed  // this formula can be implemented much more efficiently.  The trick is that
708193323Sed  // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
709193323Sed  // arithmetic.  To do exact division in modular arithmetic, all we have
710193323Sed  // to do is multiply by the inverse.  Therefore, this step can be done at
711193323Sed  // width W.
712195098Sed  //
713193323Sed  // The next issue is how to safely do the division by 2^T.  The way this
714193323Sed  // is done is by doing the multiplication step at a width of at least W + T
715193323Sed  // bits.  This way, the bottom W+T bits of the product are accurate. Then,
716193323Sed  // when we perform the division by 2^T (which is equivalent to a right shift
717193323Sed  // by T), the bottom W bits are accurate.  Extra bits are okay; they'll get
718193323Sed  // truncated out after the division by 2^T.
719193323Sed  //
720193323Sed  // In comparison to just directly using the first formula, this technique
721193323Sed  // is much more efficient; using the first formula requires W * K bits,
722193323Sed  // but this formula less than W + K bits. Also, the first formula requires
723193323Sed  // a division step, whereas this formula only requires multiplies and shifts.
724193323Sed  //
725193323Sed  // It doesn't matter whether the subtraction step is done in the calculation
726193323Sed  // width or the input iteration count's width; if the subtraction overflows,
727193323Sed  // the result must be zero anyway.  We prefer here to do it in the width of
728193323Sed  // the induction variable because it helps a lot for certain cases; CodeGen
729193323Sed  // isn't smart enough to ignore the overflow, which leads to much less
730193323Sed  // efficient code if the width of the subtraction is wider than the native
731193323Sed  // register width.
732193323Sed  //
733193323Sed  // (It's possible to not widen at all by pulling out factors of 2 before
734193323Sed  // the multiplication; for example, K=2 can be calculated as
735193323Sed  // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
736193323Sed  // extra arithmetic, so it's not an obvious win, and it gets
737193323Sed  // much more complicated for K > 3.)
738193323Sed
739193323Sed  // Protection from insane SCEVs; this bound is conservative,
740193323Sed  // but it probably doesn't matter.
741193323Sed  if (K > 1000)
742193323Sed    return SE.getCouldNotCompute();
743193323Sed
744193323Sed  unsigned W = SE.getTypeSizeInBits(ResultTy);
745193323Sed
746193323Sed  // Calculate K! / 2^T and T; we divide out the factors of two before
747193323Sed  // multiplying for calculating K! / 2^T to avoid overflow.
748193323Sed  // Other overflow doesn't matter because we only care about the bottom
749193323Sed  // W bits of the result.
750193323Sed  APInt OddFactorial(W, 1);
751193323Sed  unsigned T = 1;
752193323Sed  for (unsigned i = 3; i <= K; ++i) {
753193323Sed    APInt Mult(W, i);
754193323Sed    unsigned TwoFactors = Mult.countTrailingZeros();
755193323Sed    T += TwoFactors;
756193323Sed    Mult = Mult.lshr(TwoFactors);
757193323Sed    OddFactorial *= Mult;
758193323Sed  }
759193323Sed
760193323Sed  // We need at least W + T bits for the multiplication step
761193323Sed  unsigned CalculationBits = W + T;
762193323Sed
763204642Srdivacky  // Calculate 2^T, at width T+W.
764263508Sdim  APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);
765193323Sed
766193323Sed  // Calculate the multiplicative inverse of K! / 2^T;
767193323Sed  // this multiplication factor will perform the exact division by
768193323Sed  // K! / 2^T.
769193323Sed  APInt Mod = APInt::getSignedMinValue(W+1);
770193323Sed  APInt MultiplyFactor = OddFactorial.zext(W+1);
771193323Sed  MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
772193323Sed  MultiplyFactor = MultiplyFactor.trunc(W);
773193323Sed
774193323Sed  // Calculate the product, at width T+W
775226633Sdim  IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
776198090Srdivacky                                                      CalculationBits);
777198090Srdivacky  const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
778193323Sed  for (unsigned i = 1; i != K; ++i) {
779207618Srdivacky    const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
780193323Sed    Dividend = SE.getMulExpr(Dividend,
781193323Sed                             SE.getTruncateOrZeroExtend(S, CalculationTy));
782193323Sed  }
783193323Sed
784193323Sed  // Divide by 2^T
785198090Srdivacky  const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
786193323Sed
787193323Sed  // Truncate the result, and divide by K! / 2^T.
788193323Sed
789193323Sed  return SE.getMulExpr(SE.getConstant(MultiplyFactor),
790193323Sed                       SE.getTruncateOrZeroExtend(DivResult, ResultTy));
791193323Sed}
792193323Sed
793193323Sed/// evaluateAtIteration - Return the value of this chain of recurrences at
794193323Sed/// the specified iteration number.  We can evaluate this recurrence by
795193323Sed/// multiplying each element in the chain by the binomial coefficient
796193323Sed/// corresponding to it.  In other words, we can evaluate {A,+,B,+,C,+,D} as:
797193323Sed///
798193323Sed///   A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
799193323Sed///
800193323Sed/// where BC(It, k) stands for binomial coefficient.
801193323Sed///
802198090Srdivackyconst SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
803198090Srdivacky                                                ScalarEvolution &SE) const {
804198090Srdivacky  const SCEV *Result = getStart();
805193323Sed  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
806193323Sed    // The computation is correct in the face of overflow provided that the
807193323Sed    // multiplication is performed _after_ the evaluation of the binomial
808193323Sed    // coefficient.
809198090Srdivacky    const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
810193323Sed    if (isa<SCEVCouldNotCompute>(Coeff))
811193323Sed      return Coeff;
812193323Sed
813193323Sed    Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
814193323Sed  }
815193323Sed  return Result;
816193323Sed}
817193323Sed
818193323Sed//===----------------------------------------------------------------------===//
819193323Sed//                    SCEV Expression folder implementations
820193323Sed//===----------------------------------------------------------------------===//
821193323Sed
822198090Srdivackyconst SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
823226633Sdim                                             Type *Ty) {
824193323Sed  assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
825193323Sed         "This is not a truncating conversion!");
826193323Sed  assert(isSCEVable(Ty) &&
827193323Sed         "This is not a conversion to a SCEVable type!");
828193323Sed  Ty = getEffectiveSCEVType(Ty);
829193323Sed
830198090Srdivacky  FoldingSetNodeID ID;
831198090Srdivacky  ID.AddInteger(scTruncate);
832198090Srdivacky  ID.AddPointer(Op);
833198090Srdivacky  ID.AddPointer(Ty);
834198090Srdivacky  void *IP = 0;
835198090Srdivacky  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
836198090Srdivacky
837195340Sed  // Fold if the operand is constant.
838193323Sed  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
839195098Sed    return getConstant(
840239462Sdim      cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
841193323Sed
842193323Sed  // trunc(trunc(x)) --> trunc(x)
843193323Sed  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
844193323Sed    return getTruncateExpr(ST->getOperand(), Ty);
845193323Sed
846193323Sed  // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
847193323Sed  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
848193323Sed    return getTruncateOrSignExtend(SS->getOperand(), Ty);
849193323Sed
850193323Sed  // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
851193323Sed  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
852193323Sed    return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
853193323Sed
854218893Sdim  // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can
855218893Sdim  // eliminate all the truncates.
856218893Sdim  if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) {
857218893Sdim    SmallVector<const SCEV *, 4> Operands;
858218893Sdim    bool hasTrunc = false;
859218893Sdim    for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) {
860218893Sdim      const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty);
861218893Sdim      hasTrunc = isa<SCEVTruncateExpr>(S);
862218893Sdim      Operands.push_back(S);
863218893Sdim    }
864218893Sdim    if (!hasTrunc)
865221345Sdim      return getAddExpr(Operands);
866218893Sdim    UniqueSCEVs.FindNodeOrInsertPos(ID, IP);  // Mutates IP, returns NULL.
867218893Sdim  }
868218893Sdim
869218893Sdim  // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can
870218893Sdim  // eliminate all the truncates.
871218893Sdim  if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) {
872218893Sdim    SmallVector<const SCEV *, 4> Operands;
873218893Sdim    bool hasTrunc = false;
874218893Sdim    for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) {
875218893Sdim      const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty);
876218893Sdim      hasTrunc = isa<SCEVTruncateExpr>(S);
877218893Sdim      Operands.push_back(S);
878218893Sdim    }
879218893Sdim    if (!hasTrunc)
880221345Sdim      return getMulExpr(Operands);
881218893Sdim    UniqueSCEVs.FindNodeOrInsertPos(ID, IP);  // Mutates IP, returns NULL.
882218893Sdim  }
883218893Sdim
884194612Sed  // If the input value is a chrec scev, truncate the chrec's operands.
885193323Sed  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
886198090Srdivacky    SmallVector<const SCEV *, 4> Operands;
887193323Sed    for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
888193323Sed      Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
889221345Sdim    return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
890193323Sed  }
891193323Sed
892210299Sed  // The cast wasn't folded; create an explicit cast node. We can reuse
893210299Sed  // the existing insert position since if we get here, we won't have
894210299Sed  // made any changes which would invalidate it.
895205407Srdivacky  SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
896205407Srdivacky                                                 Op, Ty);
897195340Sed  UniqueSCEVs.InsertNode(S, IP);
898195340Sed  return S;
899193323Sed}
900193323Sed
901198090Srdivackyconst SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
902226633Sdim                                               Type *Ty) {
903193323Sed  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
904193323Sed         "This is not an extending conversion!");
905193323Sed  assert(isSCEVable(Ty) &&
906193323Sed         "This is not a conversion to a SCEVable type!");
907193323Sed  Ty = getEffectiveSCEVType(Ty);
908193323Sed
909195340Sed  // Fold if the operand is constant.
910210299Sed  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
911210299Sed    return getConstant(
912239462Sdim      cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
913193323Sed
914193323Sed  // zext(zext(x)) --> zext(x)
915193323Sed  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
916193323Sed    return getZeroExtendExpr(SZ->getOperand(), Ty);
917193323Sed
918198090Srdivacky  // Before doing any expensive analysis, check to see if we've already
919198090Srdivacky  // computed a SCEV for this Op and Ty.
920198090Srdivacky  FoldingSetNodeID ID;
921198090Srdivacky  ID.AddInteger(scZeroExtend);
922198090Srdivacky  ID.AddPointer(Op);
923198090Srdivacky  ID.AddPointer(Ty);
924198090Srdivacky  void *IP = 0;
925198090Srdivacky  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
926198090Srdivacky
927218893Sdim  // zext(trunc(x)) --> zext(x) or x or trunc(x)
928218893Sdim  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
929218893Sdim    // It's possible the bits taken off by the truncate were all zero bits. If
930218893Sdim    // so, we should be able to simplify this further.
931218893Sdim    const SCEV *X = ST->getOperand();
932218893Sdim    ConstantRange CR = getUnsignedRange(X);
933218893Sdim    unsigned TruncBits = getTypeSizeInBits(ST->getType());
934218893Sdim    unsigned NewBits = getTypeSizeInBits(Ty);
935218893Sdim    if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
936218893Sdim            CR.zextOrTrunc(NewBits)))
937218893Sdim      return getTruncateOrZeroExtend(X, Ty);
938218893Sdim  }
939218893Sdim
940193323Sed  // If the input value is a chrec scev, and we can prove that the value
941193323Sed  // did not overflow the old, smaller, value, we can zero extend all of the
942193323Sed  // operands (often constants).  This allows analysis of something like
943193323Sed  // this:  for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
944193323Sed  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
945193323Sed    if (AR->isAffine()) {
946198090Srdivacky      const SCEV *Start = AR->getStart();
947198090Srdivacky      const SCEV *Step = AR->getStepRecurrence(*this);
948198090Srdivacky      unsigned BitWidth = getTypeSizeInBits(AR->getType());
949198090Srdivacky      const Loop *L = AR->getLoop();
950198090Srdivacky
951198090Srdivacky      // If we have special knowledge that this addrec won't overflow,
952198090Srdivacky      // we don't need to do any further analysis.
953221345Sdim      if (AR->getNoWrapFlags(SCEV::FlagNUW))
954198090Srdivacky        return getAddRecExpr(getZeroExtendExpr(Start, Ty),
955198090Srdivacky                             getZeroExtendExpr(Step, Ty),
956221345Sdim                             L, AR->getNoWrapFlags());
957198090Srdivacky
958193323Sed      // Check whether the backedge-taken count is SCEVCouldNotCompute.
959193323Sed      // Note that this serves two purposes: It filters out loops that are
960193323Sed      // simply not analyzable, and it covers the case where this code is
961193323Sed      // being called from within backedge-taken count analysis, such that
962193323Sed      // attempting to ask for the backedge-taken count would likely result
963193323Sed      // in infinite recursion. In the later case, the analysis code will
964193323Sed      // cope with a conservative value, and it will take care to purge
965193323Sed      // that value once it has finished.
966198090Srdivacky      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
967193323Sed      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
968193323Sed        // Manually compute the final value for AR, checking for
969193323Sed        // overflow.
970193323Sed
971193323Sed        // Check whether the backedge-taken count can be losslessly casted to
972193323Sed        // the addrec's type. The count is always unsigned.
973198090Srdivacky        const SCEV *CastedMaxBECount =
974193323Sed          getTruncateOrZeroExtend(MaxBECount, Start->getType());
975198090Srdivacky        const SCEV *RecastedMaxBECount =
976193323Sed          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
977193323Sed        if (MaxBECount == RecastedMaxBECount) {
978226633Sdim          Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
979193323Sed          // Check whether Start+Step*MaxBECount has no unsigned overflow.
980204642Srdivacky          const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
981239462Sdim          const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul), WideTy);
982239462Sdim          const SCEV *WideStart = getZeroExtendExpr(Start, WideTy);
983239462Sdim          const SCEV *WideMaxBECount =
984239462Sdim            getZeroExtendExpr(CastedMaxBECount, WideTy);
985198090Srdivacky          const SCEV *OperandExtendedAdd =
986239462Sdim            getAddExpr(WideStart,
987239462Sdim                       getMulExpr(WideMaxBECount,
988193323Sed                                  getZeroExtendExpr(Step, WideTy)));
989239462Sdim          if (ZAdd == OperandExtendedAdd) {
990221345Sdim            // Cache knowledge of AR NUW, which is propagated to this AddRec.
991221345Sdim            const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
992193323Sed            // Return the expression with the addrec on the outside.
993193323Sed            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
994193323Sed                                 getZeroExtendExpr(Step, Ty),
995221345Sdim                                 L, AR->getNoWrapFlags());
996221345Sdim          }
997193323Sed          // Similar to above, only this time treat the step value as signed.
998193323Sed          // This covers loops that count down.
999193323Sed          OperandExtendedAdd =
1000239462Sdim            getAddExpr(WideStart,
1001239462Sdim                       getMulExpr(WideMaxBECount,
1002193323Sed                                  getSignExtendExpr(Step, WideTy)));
1003239462Sdim          if (ZAdd == OperandExtendedAdd) {
1004221345Sdim            // Cache knowledge of AR NW, which is propagated to this AddRec.
1005221345Sdim            // Negative step causes unsigned wrap, but it still can't self-wrap.
1006221345Sdim            const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
1007193323Sed            // Return the expression with the addrec on the outside.
1008193323Sed            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1009193323Sed                                 getSignExtendExpr(Step, Ty),
1010221345Sdim                                 L, AR->getNoWrapFlags());
1011221345Sdim          }
1012193323Sed        }
1013198090Srdivacky
1014198090Srdivacky        // If the backedge is guarded by a comparison with the pre-inc value
1015198090Srdivacky        // the addrec is safe. Also, if the entry is guarded by a comparison
1016198090Srdivacky        // with the start value and the backedge is guarded by a comparison
1017198090Srdivacky        // with the post-inc value, the addrec is safe.
1018198090Srdivacky        if (isKnownPositive(Step)) {
1019198090Srdivacky          const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
1020198090Srdivacky                                      getUnsignedRange(Step).getUnsignedMax());
1021198090Srdivacky          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
1022207618Srdivacky              (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
1023198090Srdivacky               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
1024221345Sdim                                           AR->getPostIncExpr(*this), N))) {
1025221345Sdim            // Cache knowledge of AR NUW, which is propagated to this AddRec.
1026221345Sdim            const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
1027198090Srdivacky            // Return the expression with the addrec on the outside.
1028198090Srdivacky            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1029198090Srdivacky                                 getZeroExtendExpr(Step, Ty),
1030221345Sdim                                 L, AR->getNoWrapFlags());
1031221345Sdim          }
1032198090Srdivacky        } else if (isKnownNegative(Step)) {
1033198090Srdivacky          const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
1034198090Srdivacky                                      getSignedRange(Step).getSignedMin());
1035207618Srdivacky          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
1036207618Srdivacky              (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
1037198090Srdivacky               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
1038221345Sdim                                           AR->getPostIncExpr(*this), N))) {
1039221345Sdim            // Cache knowledge of AR NW, which is propagated to this AddRec.
1040221345Sdim            // Negative step causes unsigned wrap, but it still can't self-wrap.
1041221345Sdim            const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
1042198090Srdivacky            // Return the expression with the addrec on the outside.
1043198090Srdivacky            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1044198090Srdivacky                                 getSignExtendExpr(Step, Ty),
1045221345Sdim                                 L, AR->getNoWrapFlags());
1046221345Sdim          }
1047198090Srdivacky        }
1048193323Sed      }
1049193323Sed    }
1050193323Sed
1051198090Srdivacky  // The cast wasn't folded; create an explicit cast node.
1052198090Srdivacky  // Recompute the insert position, as it may have been invalidated.
1053195340Sed  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1054205407Srdivacky  SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1055205407Srdivacky                                                   Op, Ty);
1056195340Sed  UniqueSCEVs.InsertNode(S, IP);
1057195340Sed  return S;
1058193323Sed}
1059193323Sed
1060223017Sdim// Get the limit of a recurrence such that incrementing by Step cannot cause
1061223017Sdim// signed overflow as long as the value of the recurrence within the loop does
1062223017Sdim// not exceed this limit before incrementing.
1063223017Sdimstatic const SCEV *getOverflowLimitForStep(const SCEV *Step,
1064223017Sdim                                           ICmpInst::Predicate *Pred,
1065223017Sdim                                           ScalarEvolution *SE) {
1066223017Sdim  unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1067223017Sdim  if (SE->isKnownPositive(Step)) {
1068223017Sdim    *Pred = ICmpInst::ICMP_SLT;
1069223017Sdim    return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
1070223017Sdim                           SE->getSignedRange(Step).getSignedMax());
1071223017Sdim  }
1072223017Sdim  if (SE->isKnownNegative(Step)) {
1073223017Sdim    *Pred = ICmpInst::ICMP_SGT;
1074223017Sdim    return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
1075223017Sdim                       SE->getSignedRange(Step).getSignedMin());
1076223017Sdim  }
1077223017Sdim  return 0;
1078223017Sdim}
1079223017Sdim
1080223017Sdim// The recurrence AR has been shown to have no signed wrap. Typically, if we can
1081223017Sdim// prove NSW for AR, then we can just as easily prove NSW for its preincrement
1082223017Sdim// or postincrement sibling. This allows normalizing a sign extended AddRec as
1083223017Sdim// such: {sext(Step + Start),+,Step} => {(Step + sext(Start),+,Step} As a
1084223017Sdim// result, the expression "Step + sext(PreIncAR)" is congruent with
1085223017Sdim// "sext(PostIncAR)"
1086223017Sdimstatic const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
1087226633Sdim                                            Type *Ty,
1088223017Sdim                                            ScalarEvolution *SE) {
1089223017Sdim  const Loop *L = AR->getLoop();
1090223017Sdim  const SCEV *Start = AR->getStart();
1091223017Sdim  const SCEV *Step = AR->getStepRecurrence(*SE);
1092223017Sdim
1093223017Sdim  // Check for a simple looking step prior to loop entry.
1094223017Sdim  const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1095226633Sdim  if (!SA)
1096223017Sdim    return 0;
1097223017Sdim
1098226633Sdim  // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1099226633Sdim  // subtraction is expensive. For this purpose, perform a quick and dirty
1100226633Sdim  // difference, by checking for Step in the operand list.
1101226633Sdim  SmallVector<const SCEV *, 4> DiffOps;
1102226633Sdim  for (SCEVAddExpr::op_iterator I = SA->op_begin(), E = SA->op_end();
1103226633Sdim       I != E; ++I) {
1104226633Sdim    if (*I != Step)
1105226633Sdim      DiffOps.push_back(*I);
1106226633Sdim  }
1107226633Sdim  if (DiffOps.size() == SA->getNumOperands())
1108226633Sdim    return 0;
1109226633Sdim
1110223017Sdim  // This is a postinc AR. Check for overflow on the preinc recurrence using the
1111223017Sdim  // same three conditions that getSignExtendedExpr checks.
1112223017Sdim
1113223017Sdim  // 1. NSW flags on the step increment.
1114226633Sdim  const SCEV *PreStart = SE->getAddExpr(DiffOps, SA->getNoWrapFlags());
1115223017Sdim  const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
1116223017Sdim    SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
1117223017Sdim
1118223017Sdim  if (PreAR && PreAR->getNoWrapFlags(SCEV::FlagNSW))
1119223017Sdim    return PreStart;
1120223017Sdim
1121223017Sdim  // 2. Direct overflow check on the step operation's expression.
1122223017Sdim  unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
1123226633Sdim  Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
1124223017Sdim  const SCEV *OperandExtendedStart =
1125223017Sdim    SE->getAddExpr(SE->getSignExtendExpr(PreStart, WideTy),
1126223017Sdim                   SE->getSignExtendExpr(Step, WideTy));
1127223017Sdim  if (SE->getSignExtendExpr(Start, WideTy) == OperandExtendedStart) {
1128223017Sdim    // Cache knowledge of PreAR NSW.
1129223017Sdim    if (PreAR)
1130223017Sdim      const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(SCEV::FlagNSW);
1131223017Sdim    // FIXME: this optimization needs a unit test
1132223017Sdim    DEBUG(dbgs() << "SCEV: untested prestart overflow check\n");
1133223017Sdim    return PreStart;
1134223017Sdim  }
1135223017Sdim
1136223017Sdim  // 3. Loop precondition.
1137223017Sdim  ICmpInst::Predicate Pred;
1138223017Sdim  const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, SE);
1139223017Sdim
1140223017Sdim  if (OverflowLimit &&
1141223017Sdim      SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) {
1142223017Sdim    return PreStart;
1143223017Sdim  }
1144223017Sdim  return 0;
1145223017Sdim}
1146223017Sdim
1147223017Sdim// Get the normalized sign-extended expression for this AddRec's Start.
1148223017Sdimstatic const SCEV *getSignExtendAddRecStart(const SCEVAddRecExpr *AR,
1149226633Sdim                                            Type *Ty,
1150223017Sdim                                            ScalarEvolution *SE) {
1151223017Sdim  const SCEV *PreStart = getPreStartForSignExtend(AR, Ty, SE);
1152223017Sdim  if (!PreStart)
1153223017Sdim    return SE->getSignExtendExpr(AR->getStart(), Ty);
1154223017Sdim
1155223017Sdim  return SE->getAddExpr(SE->getSignExtendExpr(AR->getStepRecurrence(*SE), Ty),
1156223017Sdim                        SE->getSignExtendExpr(PreStart, Ty));
1157223017Sdim}
1158223017Sdim
1159198090Srdivackyconst SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
1160226633Sdim                                               Type *Ty) {
1161193323Sed  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1162193323Sed         "This is not an extending conversion!");
1163193323Sed  assert(isSCEVable(Ty) &&
1164193323Sed         "This is not a conversion to a SCEVable type!");
1165193323Sed  Ty = getEffectiveSCEVType(Ty);
1166193323Sed
1167195340Sed  // Fold if the operand is constant.
1168210299Sed  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1169210299Sed    return getConstant(
1170239462Sdim      cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
1171193323Sed
1172193323Sed  // sext(sext(x)) --> sext(x)
1173193323Sed  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1174193323Sed    return getSignExtendExpr(SS->getOperand(), Ty);
1175193323Sed
1176218893Sdim  // sext(zext(x)) --> zext(x)
1177218893Sdim  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1178218893Sdim    return getZeroExtendExpr(SZ->getOperand(), Ty);
1179218893Sdim
1180198090Srdivacky  // Before doing any expensive analysis, check to see if we've already
1181198090Srdivacky  // computed a SCEV for this Op and Ty.
1182198090Srdivacky  FoldingSetNodeID ID;
1183198090Srdivacky  ID.AddInteger(scSignExtend);
1184198090Srdivacky  ID.AddPointer(Op);
1185198090Srdivacky  ID.AddPointer(Ty);
1186198090Srdivacky  void *IP = 0;
1187198090Srdivacky  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1188198090Srdivacky
1189218893Sdim  // If the input value is provably positive, build a zext instead.
1190218893Sdim  if (isKnownNonNegative(Op))
1191218893Sdim    return getZeroExtendExpr(Op, Ty);
1192218893Sdim
1193218893Sdim  // sext(trunc(x)) --> sext(x) or x or trunc(x)
1194218893Sdim  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1195218893Sdim    // It's possible the bits taken off by the truncate were all sign bits. If
1196218893Sdim    // so, we should be able to simplify this further.
1197218893Sdim    const SCEV *X = ST->getOperand();
1198218893Sdim    ConstantRange CR = getSignedRange(X);
1199218893Sdim    unsigned TruncBits = getTypeSizeInBits(ST->getType());
1200218893Sdim    unsigned NewBits = getTypeSizeInBits(Ty);
1201218893Sdim    if (CR.truncate(TruncBits).signExtend(NewBits).contains(
1202218893Sdim            CR.sextOrTrunc(NewBits)))
1203218893Sdim      return getTruncateOrSignExtend(X, Ty);
1204218893Sdim  }
1205218893Sdim
1206193323Sed  // If the input value is a chrec scev, and we can prove that the value
1207193323Sed  // did not overflow the old, smaller, value, we can sign extend all of the
1208193323Sed  // operands (often constants).  This allows analysis of something like
1209193323Sed  // this:  for (signed char X = 0; X < 100; ++X) { int Y = X; }
1210193323Sed  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1211193323Sed    if (AR->isAffine()) {
1212198090Srdivacky      const SCEV *Start = AR->getStart();
1213198090Srdivacky      const SCEV *Step = AR->getStepRecurrence(*this);
1214198090Srdivacky      unsigned BitWidth = getTypeSizeInBits(AR->getType());
1215198090Srdivacky      const Loop *L = AR->getLoop();
1216198090Srdivacky
1217198090Srdivacky      // If we have special knowledge that this addrec won't overflow,
1218198090Srdivacky      // we don't need to do any further analysis.
1219221345Sdim      if (AR->getNoWrapFlags(SCEV::FlagNSW))
1220223017Sdim        return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
1221198090Srdivacky                             getSignExtendExpr(Step, Ty),
1222221345Sdim                             L, SCEV::FlagNSW);
1223198090Srdivacky
1224193323Sed      // Check whether the backedge-taken count is SCEVCouldNotCompute.
1225193323Sed      // Note that this serves two purposes: It filters out loops that are
1226193323Sed      // simply not analyzable, and it covers the case where this code is
1227193323Sed      // being called from within backedge-taken count analysis, such that
1228193323Sed      // attempting to ask for the backedge-taken count would likely result
1229193323Sed      // in infinite recursion. In the later case, the analysis code will
1230193323Sed      // cope with a conservative value, and it will take care to purge
1231193323Sed      // that value once it has finished.
1232198090Srdivacky      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
1233193323Sed      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1234193323Sed        // Manually compute the final value for AR, checking for
1235193323Sed        // overflow.
1236193323Sed
1237193323Sed        // Check whether the backedge-taken count can be losslessly casted to
1238193323Sed        // the addrec's type. The count is always unsigned.
1239198090Srdivacky        const SCEV *CastedMaxBECount =
1240193323Sed          getTruncateOrZeroExtend(MaxBECount, Start->getType());
1241198090Srdivacky        const SCEV *RecastedMaxBECount =
1242193323Sed          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
1243193323Sed        if (MaxBECount == RecastedMaxBECount) {
1244226633Sdim          Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1245193323Sed          // Check whether Start+Step*MaxBECount has no signed overflow.
1246204642Srdivacky          const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
1247239462Sdim          const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul), WideTy);
1248239462Sdim          const SCEV *WideStart = getSignExtendExpr(Start, WideTy);
1249239462Sdim          const SCEV *WideMaxBECount =
1250239462Sdim            getZeroExtendExpr(CastedMaxBECount, WideTy);
1251198090Srdivacky          const SCEV *OperandExtendedAdd =
1252239462Sdim            getAddExpr(WideStart,
1253239462Sdim                       getMulExpr(WideMaxBECount,
1254193323Sed                                  getSignExtendExpr(Step, WideTy)));
1255239462Sdim          if (SAdd == OperandExtendedAdd) {
1256221345Sdim            // Cache knowledge of AR NSW, which is propagated to this AddRec.
1257221345Sdim            const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1258193323Sed            // Return the expression with the addrec on the outside.
1259223017Sdim            return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
1260193323Sed                                 getSignExtendExpr(Step, Ty),
1261221345Sdim                                 L, AR->getNoWrapFlags());
1262221345Sdim          }
1263198090Srdivacky          // Similar to above, only this time treat the step value as unsigned.
1264198090Srdivacky          // This covers loops that count up with an unsigned step.
1265198090Srdivacky          OperandExtendedAdd =
1266239462Sdim            getAddExpr(WideStart,
1267239462Sdim                       getMulExpr(WideMaxBECount,
1268198090Srdivacky                                  getZeroExtendExpr(Step, WideTy)));
1269239462Sdim          if (SAdd == OperandExtendedAdd) {
1270221345Sdim            // Cache knowledge of AR NSW, which is propagated to this AddRec.
1271221345Sdim            const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1272198090Srdivacky            // Return the expression with the addrec on the outside.
1273223017Sdim            return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
1274198090Srdivacky                                 getZeroExtendExpr(Step, Ty),
1275221345Sdim                                 L, AR->getNoWrapFlags());
1276221345Sdim          }
1277193323Sed        }
1278198090Srdivacky
1279198090Srdivacky        // If the backedge is guarded by a comparison with the pre-inc value
1280198090Srdivacky        // the addrec is safe. Also, if the entry is guarded by a comparison
1281198090Srdivacky        // with the start value and the backedge is guarded by a comparison
1282198090Srdivacky        // with the post-inc value, the addrec is safe.
1283223017Sdim        ICmpInst::Predicate Pred;
1284223017Sdim        const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, this);
1285223017Sdim        if (OverflowLimit &&
1286223017Sdim            (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
1287223017Sdim             (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) &&
1288223017Sdim              isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this),
1289223017Sdim                                          OverflowLimit)))) {
1290223017Sdim          // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
1291223017Sdim          const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1292223017Sdim          return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
1293223017Sdim                               getSignExtendExpr(Step, Ty),
1294223017Sdim                               L, AR->getNoWrapFlags());
1295198090Srdivacky        }
1296193323Sed      }
1297193323Sed    }
1298193323Sed
1299198090Srdivacky  // The cast wasn't folded; create an explicit cast node.
1300198090Srdivacky  // Recompute the insert position, as it may have been invalidated.
1301195340Sed  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1302205407Srdivacky  SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1303205407Srdivacky                                                   Op, Ty);
1304195340Sed  UniqueSCEVs.InsertNode(S, IP);
1305195340Sed  return S;
1306193323Sed}
1307193323Sed
1308194178Sed/// getAnyExtendExpr - Return a SCEV for the given operand extended with
1309194178Sed/// unspecified bits out to the given type.
1310194178Sed///
1311198090Srdivackyconst SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
1312226633Sdim                                              Type *Ty) {
1313194178Sed  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1314194178Sed         "This is not an extending conversion!");
1315194178Sed  assert(isSCEVable(Ty) &&
1316194178Sed         "This is not a conversion to a SCEVable type!");
1317194178Sed  Ty = getEffectiveSCEVType(Ty);
1318194178Sed
1319194178Sed  // Sign-extend negative constants.
1320194178Sed  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1321194178Sed    if (SC->getValue()->getValue().isNegative())
1322194178Sed      return getSignExtendExpr(Op, Ty);
1323194178Sed
1324194178Sed  // Peel off a truncate cast.
1325194178Sed  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
1326198090Srdivacky    const SCEV *NewOp = T->getOperand();
1327194178Sed    if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
1328194178Sed      return getAnyExtendExpr(NewOp, Ty);
1329194178Sed    return getTruncateOrNoop(NewOp, Ty);
1330194178Sed  }
1331194178Sed
1332194178Sed  // Next try a zext cast. If the cast is folded, use it.
1333198090Srdivacky  const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
1334194178Sed  if (!isa<SCEVZeroExtendExpr>(ZExt))
1335194178Sed    return ZExt;
1336194178Sed
1337194178Sed  // Next try a sext cast. If the cast is folded, use it.
1338198090Srdivacky  const SCEV *SExt = getSignExtendExpr(Op, Ty);
1339194178Sed  if (!isa<SCEVSignExtendExpr>(SExt))
1340194178Sed    return SExt;
1341194178Sed
1342202878Srdivacky  // Force the cast to be folded into the operands of an addrec.
1343202878Srdivacky  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
1344202878Srdivacky    SmallVector<const SCEV *, 4> Ops;
1345202878Srdivacky    for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
1346202878Srdivacky         I != E; ++I)
1347202878Srdivacky      Ops.push_back(getAnyExtendExpr(*I, Ty));
1348221345Sdim    return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
1349202878Srdivacky  }
1350202878Srdivacky
1351194178Sed  // If the expression is obviously signed, use the sext cast value.
1352194178Sed  if (isa<SCEVSMaxExpr>(Op))
1353194178Sed    return SExt;
1354194178Sed
1355194178Sed  // Absent any other information, use the zext cast value.
1356194178Sed  return ZExt;
1357194178Sed}
1358194178Sed
1359194612Sed/// CollectAddOperandsWithScales - Process the given Ops list, which is
1360194612Sed/// a list of operands to be added under the given scale, update the given
1361194612Sed/// map. This is a helper function for getAddRecExpr. As an example of
1362194612Sed/// what it does, given a sequence of operands that would form an add
1363194612Sed/// expression like this:
1364194612Sed///
1365194612Sed///    m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1366194612Sed///
1367194612Sed/// where A and B are constants, update the map with these values:
1368194612Sed///
1369194612Sed///    (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1370194612Sed///
1371194612Sed/// and add 13 + A*B*29 to AccumulatedConstant.
1372194612Sed/// This will allow getAddRecExpr to produce this:
1373194612Sed///
1374194612Sed///    13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1375194612Sed///
1376194612Sed/// This form often exposes folding opportunities that are hidden in
1377194612Sed/// the original operand list.
1378194612Sed///
1379194612Sed/// Return true iff it appears that any interesting folding opportunities
1380194612Sed/// may be exposed. This helps getAddRecExpr short-circuit extra work in
1381194612Sed/// the common case where no interesting opportunities are present, and
1382194612Sed/// is also used as a check to avoid infinite recursion.
1383194612Sed///
1384194612Sedstatic bool
1385198090SrdivackyCollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1386263508Sdim                             SmallVectorImpl<const SCEV *> &NewOps,
1387194612Sed                             APInt &AccumulatedConstant,
1388205407Srdivacky                             const SCEV *const *Ops, size_t NumOperands,
1389194612Sed                             const APInt &Scale,
1390194612Sed                             ScalarEvolution &SE) {
1391194612Sed  bool Interesting = false;
1392194612Sed
1393210299Sed  // Iterate over the add operands. They are sorted, with constants first.
1394210299Sed  unsigned i = 0;
1395210299Sed  while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1396210299Sed    ++i;
1397210299Sed    // Pull a buried constant out to the outside.
1398210299Sed    if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
1399210299Sed      Interesting = true;
1400210299Sed    AccumulatedConstant += Scale * C->getValue()->getValue();
1401210299Sed  }
1402210299Sed
1403210299Sed  // Next comes everything else. We're especially interested in multiplies
1404210299Sed  // here, but they're in the middle, so just visit the rest with one loop.
1405210299Sed  for (; i != NumOperands; ++i) {
1406194612Sed    const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1407194612Sed    if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1408194612Sed      APInt NewScale =
1409194612Sed        Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1410194612Sed      if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1411194612Sed        // A multiplication of a constant with another add; recurse.
1412205407Srdivacky        const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
1413194612Sed        Interesting |=
1414194612Sed          CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1415205407Srdivacky                                       Add->op_begin(), Add->getNumOperands(),
1416194612Sed                                       NewScale, SE);
1417194612Sed      } else {
1418194612Sed        // A multiplication of a constant with some other value. Update
1419194612Sed        // the map.
1420198090Srdivacky        SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1421198090Srdivacky        const SCEV *Key = SE.getMulExpr(MulOps);
1422198090Srdivacky        std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1423195340Sed          M.insert(std::make_pair(Key, NewScale));
1424194612Sed        if (Pair.second) {
1425194612Sed          NewOps.push_back(Pair.first->first);
1426194612Sed        } else {
1427194612Sed          Pair.first->second += NewScale;
1428194612Sed          // The map already had an entry for this value, which may indicate
1429194612Sed          // a folding opportunity.
1430194612Sed          Interesting = true;
1431194612Sed        }
1432194612Sed      }
1433194612Sed    } else {
1434194612Sed      // An ordinary operand. Update the map.
1435198090Srdivacky      std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1436195340Sed        M.insert(std::make_pair(Ops[i], Scale));
1437194612Sed      if (Pair.second) {
1438194612Sed        NewOps.push_back(Pair.first->first);
1439194612Sed      } else {
1440194612Sed        Pair.first->second += Scale;
1441194612Sed        // The map already had an entry for this value, which may indicate
1442194612Sed        // a folding opportunity.
1443194612Sed        Interesting = true;
1444194612Sed      }
1445194612Sed    }
1446194612Sed  }
1447194612Sed
1448194612Sed  return Interesting;
1449194612Sed}
1450194612Sed
1451194612Sednamespace {
1452194612Sed  struct APIntCompare {
1453194612Sed    bool operator()(const APInt &LHS, const APInt &RHS) const {
1454194612Sed      return LHS.ult(RHS);
1455194612Sed    }
1456194612Sed  };
1457194612Sed}
1458194612Sed
1459193323Sed/// getAddExpr - Get a canonical add expression, or something simpler if
1460193323Sed/// possible.
1461198090Srdivackyconst SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
1462221345Sdim                                        SCEV::NoWrapFlags Flags) {
1463221345Sdim  assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
1464221345Sdim         "only nuw or nsw allowed");
1465193323Sed  assert(!Ops.empty() && "Cannot get empty add!");
1466193323Sed  if (Ops.size() == 1) return Ops[0];
1467193323Sed#ifndef NDEBUG
1468226633Sdim  Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
1469193323Sed  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1470210299Sed    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
1471193323Sed           "SCEVAddExpr operand types don't match!");
1472193323Sed#endif
1473193323Sed
1474221345Sdim  // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
1475221345Sdim  // And vice-versa.
1476221345Sdim  int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
1477221345Sdim  SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
1478221345Sdim  if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
1479202878Srdivacky    bool All = true;
1480212904Sdim    for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
1481212904Sdim         E = Ops.end(); I != E; ++I)
1482212904Sdim      if (!isKnownNonNegative(*I)) {
1483202878Srdivacky        All = false;
1484202878Srdivacky        break;
1485202878Srdivacky      }
1486221345Sdim    if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
1487202878Srdivacky  }
1488202878Srdivacky
1489193323Sed  // Sort by complexity, this groups all similar expression types together.
1490193323Sed  GroupByComplexity(Ops, LI);
1491193323Sed
1492193323Sed  // If there are any constants, fold them together.
1493193323Sed  unsigned Idx = 0;
1494193323Sed  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1495193323Sed    ++Idx;
1496193323Sed    assert(Idx < Ops.size());
1497193323Sed    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1498193323Sed      // We found two constants, fold them together!
1499194612Sed      Ops[0] = getConstant(LHSC->getValue()->getValue() +
1500194612Sed                           RHSC->getValue()->getValue());
1501194612Sed      if (Ops.size() == 2) return Ops[0];
1502193323Sed      Ops.erase(Ops.begin()+1);  // Erase the folded element
1503193323Sed      LHSC = cast<SCEVConstant>(Ops[0]);
1504193323Sed    }
1505193323Sed
1506193323Sed    // If we are left with a constant zero being added, strip it off.
1507207618Srdivacky    if (LHSC->getValue()->isZero()) {
1508193323Sed      Ops.erase(Ops.begin());
1509193323Sed      --Idx;
1510193323Sed    }
1511207618Srdivacky
1512207618Srdivacky    if (Ops.size() == 1) return Ops[0];
1513193323Sed  }
1514193323Sed
1515212904Sdim  // Okay, check to see if the same value occurs in the operand list more than
1516212904Sdim  // once.  If so, merge them together into an multiply expression.  Since we
1517212904Sdim  // sorted the list, these values are required to be adjacent.
1518226633Sdim  Type *Ty = Ops[0]->getType();
1519212904Sdim  bool FoundMatch = false;
1520212904Sdim  for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
1521193323Sed    if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
1522212904Sdim      // Scan ahead to count how many equal operands there are.
1523212904Sdim      unsigned Count = 2;
1524212904Sdim      while (i+Count != e && Ops[i+Count] == Ops[i])
1525212904Sdim        ++Count;
1526212904Sdim      // Merge the values into a multiply.
1527212904Sdim      const SCEV *Scale = getConstant(Ty, Count);
1528212904Sdim      const SCEV *Mul = getMulExpr(Scale, Ops[i]);
1529212904Sdim      if (Ops.size() == Count)
1530193323Sed        return Mul;
1531212904Sdim      Ops[i] = Mul;
1532212904Sdim      Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
1533212904Sdim      --i; e -= Count - 1;
1534212904Sdim      FoundMatch = true;
1535193323Sed    }
1536212904Sdim  if (FoundMatch)
1537221345Sdim    return getAddExpr(Ops, Flags);
1538193323Sed
1539193323Sed  // Check for truncates. If all the operands are truncated from the same
1540193323Sed  // type, see if factoring out the truncate would permit the result to be
1541193323Sed  // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1542193323Sed  // if the contents of the resulting outer trunc fold to something simple.
1543193323Sed  for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1544193323Sed    const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1545226633Sdim    Type *DstType = Trunc->getType();
1546226633Sdim    Type *SrcType = Trunc->getOperand()->getType();
1547198090Srdivacky    SmallVector<const SCEV *, 8> LargeOps;
1548193323Sed    bool Ok = true;
1549193323Sed    // Check all the operands to see if they can be represented in the
1550193323Sed    // source type of the truncate.
1551193323Sed    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1552193323Sed      if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1553193323Sed        if (T->getOperand()->getType() != SrcType) {
1554193323Sed          Ok = false;
1555193323Sed          break;
1556193323Sed        }
1557193323Sed        LargeOps.push_back(T->getOperand());
1558193323Sed      } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1559207618Srdivacky        LargeOps.push_back(getAnyExtendExpr(C, SrcType));
1560193323Sed      } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1561198090Srdivacky        SmallVector<const SCEV *, 8> LargeMulOps;
1562193323Sed        for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1563193323Sed          if (const SCEVTruncateExpr *T =
1564193323Sed                dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1565193323Sed            if (T->getOperand()->getType() != SrcType) {
1566193323Sed              Ok = false;
1567193323Sed              break;
1568193323Sed            }
1569193323Sed            LargeMulOps.push_back(T->getOperand());
1570193323Sed          } else if (const SCEVConstant *C =
1571193323Sed                       dyn_cast<SCEVConstant>(M->getOperand(j))) {
1572207618Srdivacky            LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
1573193323Sed          } else {
1574193323Sed            Ok = false;
1575193323Sed            break;
1576193323Sed          }
1577193323Sed        }
1578193323Sed        if (Ok)
1579193323Sed          LargeOps.push_back(getMulExpr(LargeMulOps));
1580193323Sed      } else {
1581193323Sed        Ok = false;
1582193323Sed        break;
1583193323Sed      }
1584193323Sed    }
1585193323Sed    if (Ok) {
1586193323Sed      // Evaluate the expression in the larger type.
1587221345Sdim      const SCEV *Fold = getAddExpr(LargeOps, Flags);
1588193323Sed      // If it folds to something simple, use it. Otherwise, don't.
1589193323Sed      if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1590193323Sed        return getTruncateExpr(Fold, DstType);
1591193323Sed    }
1592193323Sed  }
1593193323Sed
1594193323Sed  // Skip past any other cast SCEVs.
1595193323Sed  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1596193323Sed    ++Idx;
1597193323Sed
1598193323Sed  // If there are add operands they would be next.
1599193323Sed  if (Idx < Ops.size()) {
1600193323Sed    bool DeletedAdd = false;
1601193323Sed    while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1602193323Sed      // If we have an add, expand the add operands onto the end of the operands
1603193323Sed      // list.
1604193323Sed      Ops.erase(Ops.begin()+Idx);
1605210299Sed      Ops.append(Add->op_begin(), Add->op_end());
1606193323Sed      DeletedAdd = true;
1607193323Sed    }
1608193323Sed
1609193323Sed    // If we deleted at least one add, we added operands to the end of the list,
1610193323Sed    // and they are not necessarily sorted.  Recurse to resort and resimplify
1611204642Srdivacky    // any operands we just acquired.
1612193323Sed    if (DeletedAdd)
1613193323Sed      return getAddExpr(Ops);
1614193323Sed  }
1615193323Sed
1616193323Sed  // Skip over the add expression until we get to a multiply.
1617193323Sed  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1618193323Sed    ++Idx;
1619193323Sed
1620194612Sed  // Check to see if there are any folding opportunities present with
1621194612Sed  // operands multiplied by constant values.
1622194612Sed  if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1623194612Sed    uint64_t BitWidth = getTypeSizeInBits(Ty);
1624198090Srdivacky    DenseMap<const SCEV *, APInt> M;
1625198090Srdivacky    SmallVector<const SCEV *, 8> NewOps;
1626194612Sed    APInt AccumulatedConstant(BitWidth, 0);
1627194612Sed    if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1628205407Srdivacky                                     Ops.data(), Ops.size(),
1629205407Srdivacky                                     APInt(BitWidth, 1), *this)) {
1630194612Sed      // Some interesting folding opportunity is present, so its worthwhile to
1631194612Sed      // re-generate the operands list. Group the operands by constant scale,
1632194612Sed      // to avoid multiplying by the same constant scale multiple times.
1633198090Srdivacky      std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1634263508Sdim      for (SmallVectorImpl<const SCEV *>::const_iterator I = NewOps.begin(),
1635194612Sed           E = NewOps.end(); I != E; ++I)
1636194612Sed        MulOpLists[M.find(*I)->second].push_back(*I);
1637194612Sed      // Re-generate the operands list.
1638194612Sed      Ops.clear();
1639194612Sed      if (AccumulatedConstant != 0)
1640194612Sed        Ops.push_back(getConstant(AccumulatedConstant));
1641195098Sed      for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1642195098Sed           I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1643194612Sed        if (I->first != 0)
1644195098Sed          Ops.push_back(getMulExpr(getConstant(I->first),
1645195098Sed                                   getAddExpr(I->second)));
1646194612Sed      if (Ops.empty())
1647207618Srdivacky        return getConstant(Ty, 0);
1648194612Sed      if (Ops.size() == 1)
1649194612Sed        return Ops[0];
1650194612Sed      return getAddExpr(Ops);
1651194612Sed    }
1652194612Sed  }
1653194612Sed
1654193323Sed  // If we are adding something to a multiply expression, make sure the
1655193323Sed  // something is not already an operand of the multiply.  If so, merge it into
1656193323Sed  // the multiply.
1657193323Sed  for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1658193323Sed    const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1659193323Sed    for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1660193323Sed      const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1661212904Sdim      if (isa<SCEVConstant>(MulOpSCEV))
1662212904Sdim        continue;
1663193323Sed      for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1664212904Sdim        if (MulOpSCEV == Ops[AddOp]) {
1665193323Sed          // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
1666198090Srdivacky          const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1667193323Sed          if (Mul->getNumOperands() != 2) {
1668193323Sed            // If the multiply has more than two operands, we must get the
1669193323Sed            // Y*Z term.
1670212904Sdim            SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1671212904Sdim                                                Mul->op_begin()+MulOp);
1672212904Sdim            MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
1673193323Sed            InnerMul = getMulExpr(MulOps);
1674193323Sed          }
1675207618Srdivacky          const SCEV *One = getConstant(Ty, 1);
1676212904Sdim          const SCEV *AddOne = getAddExpr(One, InnerMul);
1677212904Sdim          const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
1678193323Sed          if (Ops.size() == 2) return OuterMul;
1679193323Sed          if (AddOp < Idx) {
1680193323Sed            Ops.erase(Ops.begin()+AddOp);
1681193323Sed            Ops.erase(Ops.begin()+Idx-1);
1682193323Sed          } else {
1683193323Sed            Ops.erase(Ops.begin()+Idx);
1684193323Sed            Ops.erase(Ops.begin()+AddOp-1);
1685193323Sed          }
1686193323Sed          Ops.push_back(OuterMul);
1687193323Sed          return getAddExpr(Ops);
1688193323Sed        }
1689193323Sed
1690193323Sed      // Check this multiply against other multiplies being added together.
1691193323Sed      for (unsigned OtherMulIdx = Idx+1;
1692193323Sed           OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1693193323Sed           ++OtherMulIdx) {
1694193323Sed        const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1695193323Sed        // If MulOp occurs in OtherMul, we can fold the two multiplies
1696193323Sed        // together.
1697193323Sed        for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1698193323Sed             OMulOp != e; ++OMulOp)
1699193323Sed          if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1700193323Sed            // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1701198090Srdivacky            const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1702193323Sed            if (Mul->getNumOperands() != 2) {
1703195098Sed              SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1704212904Sdim                                                  Mul->op_begin()+MulOp);
1705212904Sdim              MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
1706193323Sed              InnerMul1 = getMulExpr(MulOps);
1707193323Sed            }
1708198090Srdivacky            const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1709193323Sed            if (OtherMul->getNumOperands() != 2) {
1710195098Sed              SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1711212904Sdim                                                  OtherMul->op_begin()+OMulOp);
1712212904Sdim              MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
1713193323Sed              InnerMul2 = getMulExpr(MulOps);
1714193323Sed            }
1715198090Srdivacky            const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1716198090Srdivacky            const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1717193323Sed            if (Ops.size() == 2) return OuterMul;
1718193323Sed            Ops.erase(Ops.begin()+Idx);
1719193323Sed            Ops.erase(Ops.begin()+OtherMulIdx-1);
1720193323Sed            Ops.push_back(OuterMul);
1721193323Sed            return getAddExpr(Ops);
1722193323Sed          }
1723193323Sed      }
1724193323Sed    }
1725193323Sed  }
1726193323Sed
1727193323Sed  // If there are any add recurrences in the operands list, see if any other
1728193323Sed  // added values are loop invariant.  If so, we can fold them into the
1729193323Sed  // recurrence.
1730193323Sed  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1731193323Sed    ++Idx;
1732193323Sed
1733193323Sed  // Scan over all recurrences, trying to fold loop invariants into them.
1734193323Sed  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1735193323Sed    // Scan all of the other operands to this add and add them to the vector if
1736193323Sed    // they are loop invariant w.r.t. the recurrence.
1737198090Srdivacky    SmallVector<const SCEV *, 8> LIOps;
1738193323Sed    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1739207618Srdivacky    const Loop *AddRecLoop = AddRec->getLoop();
1740193323Sed    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1741218893Sdim      if (isLoopInvariant(Ops[i], AddRecLoop)) {
1742193323Sed        LIOps.push_back(Ops[i]);
1743193323Sed        Ops.erase(Ops.begin()+i);
1744193323Sed        --i; --e;
1745193323Sed      }
1746193323Sed
1747193323Sed    // If we found some loop invariants, fold them into the recurrence.
1748193323Sed    if (!LIOps.empty()) {
1749193323Sed      //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
1750193323Sed      LIOps.push_back(AddRec->getStart());
1751193323Sed
1752198090Srdivacky      SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1753201360Srdivacky                                             AddRec->op_end());
1754193323Sed      AddRecOps[0] = getAddExpr(LIOps);
1755193323Sed
1756210299Sed      // Build the new addrec. Propagate the NUW and NSW flags if both the
1757210299Sed      // outer add and the inner addrec are guaranteed to have no overflow.
1758221345Sdim      // Always propagate NW.
1759221345Sdim      Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
1760221345Sdim      const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
1761201360Srdivacky
1762193323Sed      // If all of the other operands were loop invariant, we are done.
1763193323Sed      if (Ops.size() == 1) return NewRec;
1764193323Sed
1765226633Sdim      // Otherwise, add the folded AddRec by the non-invariant parts.
1766193323Sed      for (unsigned i = 0;; ++i)
1767193323Sed        if (Ops[i] == AddRec) {
1768193323Sed          Ops[i] = NewRec;
1769193323Sed          break;
1770193323Sed        }
1771193323Sed      return getAddExpr(Ops);
1772193323Sed    }
1773193323Sed
1774193323Sed    // Okay, if there weren't any loop invariants to be folded, check to see if
1775193323Sed    // there are multiple AddRec's with the same loop induction variable being
1776193323Sed    // added together.  If so, we can fold them.
1777193323Sed    for (unsigned OtherIdx = Idx+1;
1778212904Sdim         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
1779212904Sdim         ++OtherIdx)
1780212904Sdim      if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
1781212904Sdim        // Other + {A,+,B}<L> + {C,+,D}<L>  -->  Other + {A+C,+,B+D}<L>
1782212904Sdim        SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1783212904Sdim                                               AddRec->op_end());
1784212904Sdim        for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
1785212904Sdim             ++OtherIdx)
1786212904Sdim          if (const SCEVAddRecExpr *OtherAddRec =
1787212904Sdim                dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
1788212904Sdim            if (OtherAddRec->getLoop() == AddRecLoop) {
1789212904Sdim              for (unsigned i = 0, e = OtherAddRec->getNumOperands();
1790212904Sdim                   i != e; ++i) {
1791212904Sdim                if (i >= AddRecOps.size()) {
1792212904Sdim                  AddRecOps.append(OtherAddRec->op_begin()+i,
1793212904Sdim                                   OtherAddRec->op_end());
1794212904Sdim                  break;
1795212904Sdim                }
1796212904Sdim                AddRecOps[i] = getAddExpr(AddRecOps[i],
1797212904Sdim                                          OtherAddRec->getOperand(i));
1798212904Sdim              }
1799212904Sdim              Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
1800193323Sed            }
1801221345Sdim        // Step size has changed, so we cannot guarantee no self-wraparound.
1802221345Sdim        Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
1803212904Sdim        return getAddExpr(Ops);
1804193323Sed      }
1805193323Sed
1806193323Sed    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1807193323Sed    // next one.
1808193323Sed  }
1809193323Sed
1810193323Sed  // Okay, it looks like we really DO need an add expr.  Check to see if we
1811193323Sed  // already have one, otherwise create a new one.
1812195340Sed  FoldingSetNodeID ID;
1813195340Sed  ID.AddInteger(scAddExpr);
1814195340Sed  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1815195340Sed    ID.AddPointer(Ops[i]);
1816195340Sed  void *IP = 0;
1817202878Srdivacky  SCEVAddExpr *S =
1818202878Srdivacky    static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1819202878Srdivacky  if (!S) {
1820205407Srdivacky    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
1821205407Srdivacky    std::uninitialized_copy(Ops.begin(), Ops.end(), O);
1822205407Srdivacky    S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
1823205407Srdivacky                                        O, Ops.size());
1824202878Srdivacky    UniqueSCEVs.InsertNode(S, IP);
1825202878Srdivacky  }
1826221345Sdim  S->setNoWrapFlags(Flags);
1827195340Sed  return S;
1828193323Sed}
1829193323Sed
1830226633Sdimstatic uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
1831226633Sdim  uint64_t k = i*j;
1832226633Sdim  if (j > 1 && k / j != i) Overflow = true;
1833226633Sdim  return k;
1834226633Sdim}
1835226633Sdim
1836226633Sdim/// Compute the result of "n choose k", the binomial coefficient.  If an
1837226633Sdim/// intermediate computation overflows, Overflow will be set and the return will
1838239462Sdim/// be garbage. Overflow is not cleared on absence of overflow.
1839226633Sdimstatic uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
1840226633Sdim  // We use the multiplicative formula:
1841226633Sdim  //     n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
1842226633Sdim  // At each iteration, we take the n-th term of the numeral and divide by the
1843226633Sdim  // (k-n)th term of the denominator.  This division will always produce an
1844226633Sdim  // integral result, and helps reduce the chance of overflow in the
1845226633Sdim  // intermediate computations. However, we can still overflow even when the
1846226633Sdim  // final result would fit.
1847226633Sdim
1848226633Sdim  if (n == 0 || n == k) return 1;
1849226633Sdim  if (k > n) return 0;
1850226633Sdim
1851226633Sdim  if (k > n/2)
1852226633Sdim    k = n-k;
1853226633Sdim
1854226633Sdim  uint64_t r = 1;
1855226633Sdim  for (uint64_t i = 1; i <= k; ++i) {
1856226633Sdim    r = umul_ov(r, n-(i-1), Overflow);
1857226633Sdim    r /= i;
1858226633Sdim  }
1859226633Sdim  return r;
1860226633Sdim}
1861226633Sdim
1862193323Sed/// getMulExpr - Get a canonical multiply expression, or something simpler if
1863193323Sed/// possible.
1864198090Srdivackyconst SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
1865221345Sdim                                        SCEV::NoWrapFlags Flags) {
1866221345Sdim  assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
1867221345Sdim         "only nuw or nsw allowed");
1868193323Sed  assert(!Ops.empty() && "Cannot get empty mul!");
1869202878Srdivacky  if (Ops.size() == 1) return Ops[0];
1870193323Sed#ifndef NDEBUG
1871226633Sdim  Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
1872193323Sed  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1873212904Sdim    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
1874193323Sed           "SCEVMulExpr operand types don't match!");
1875193323Sed#endif
1876193323Sed
1877221345Sdim  // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
1878221345Sdim  // And vice-versa.
1879221345Sdim  int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
1880221345Sdim  SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
1881221345Sdim  if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
1882202878Srdivacky    bool All = true;
1883212904Sdim    for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
1884212904Sdim         E = Ops.end(); I != E; ++I)
1885212904Sdim      if (!isKnownNonNegative(*I)) {
1886202878Srdivacky        All = false;
1887202878Srdivacky        break;
1888202878Srdivacky      }
1889221345Sdim    if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
1890202878Srdivacky  }
1891202878Srdivacky
1892193323Sed  // Sort by complexity, this groups all similar expression types together.
1893193323Sed  GroupByComplexity(Ops, LI);
1894193323Sed
1895193323Sed  // If there are any constants, fold them together.
1896193323Sed  unsigned Idx = 0;
1897193323Sed  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1898193323Sed
1899193323Sed    // C1*(C2+V) -> C1*C2 + C1*V
1900193323Sed    if (Ops.size() == 2)
1901193323Sed      if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1902193323Sed        if (Add->getNumOperands() == 2 &&
1903193323Sed            isa<SCEVConstant>(Add->getOperand(0)))
1904193323Sed          return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1905193323Sed                            getMulExpr(LHSC, Add->getOperand(1)));
1906193323Sed
1907193323Sed    ++Idx;
1908193323Sed    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1909193323Sed      // We found two constants, fold them together!
1910198090Srdivacky      ConstantInt *Fold = ConstantInt::get(getContext(),
1911198090Srdivacky                                           LHSC->getValue()->getValue() *
1912193323Sed                                           RHSC->getValue()->getValue());
1913193323Sed      Ops[0] = getConstant(Fold);
1914193323Sed      Ops.erase(Ops.begin()+1);  // Erase the folded element
1915193323Sed      if (Ops.size() == 1) return Ops[0];
1916193323Sed      LHSC = cast<SCEVConstant>(Ops[0]);
1917193323Sed    }
1918193323Sed
1919193323Sed    // If we are left with a constant one being multiplied, strip it off.
1920193323Sed    if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1921193323Sed      Ops.erase(Ops.begin());
1922193323Sed      --Idx;
1923193323Sed    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1924193323Sed      // If we have a multiply of zero, it will always be zero.
1925193323Sed      return Ops[0];
1926202878Srdivacky    } else if (Ops[0]->isAllOnesValue()) {
1927202878Srdivacky      // If we have a mul by -1 of an add, try distributing the -1 among the
1928202878Srdivacky      // add operands.
1929221345Sdim      if (Ops.size() == 2) {
1930202878Srdivacky        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
1931202878Srdivacky          SmallVector<const SCEV *, 4> NewOps;
1932202878Srdivacky          bool AnyFolded = false;
1933221345Sdim          for (SCEVAddRecExpr::op_iterator I = Add->op_begin(),
1934221345Sdim                 E = Add->op_end(); I != E; ++I) {
1935202878Srdivacky            const SCEV *Mul = getMulExpr(Ops[0], *I);
1936202878Srdivacky            if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
1937202878Srdivacky            NewOps.push_back(Mul);
1938202878Srdivacky          }
1939202878Srdivacky          if (AnyFolded)
1940202878Srdivacky            return getAddExpr(NewOps);
1941202878Srdivacky        }
1942221345Sdim        else if (const SCEVAddRecExpr *
1943221345Sdim                 AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
1944221345Sdim          // Negation preserves a recurrence's no self-wrap property.
1945221345Sdim          SmallVector<const SCEV *, 4> Operands;
1946221345Sdim          for (SCEVAddRecExpr::op_iterator I = AddRec->op_begin(),
1947221345Sdim                 E = AddRec->op_end(); I != E; ++I) {
1948221345Sdim            Operands.push_back(getMulExpr(Ops[0], *I));
1949221345Sdim          }
1950221345Sdim          return getAddRecExpr(Operands, AddRec->getLoop(),
1951221345Sdim                               AddRec->getNoWrapFlags(SCEV::FlagNW));
1952221345Sdim        }
1953221345Sdim      }
1954193323Sed    }
1955207618Srdivacky
1956207618Srdivacky    if (Ops.size() == 1)
1957207618Srdivacky      return Ops[0];
1958193323Sed  }
1959193323Sed
1960193323Sed  // Skip over the add expression until we get to a multiply.
1961193323Sed  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1962193323Sed    ++Idx;
1963193323Sed
1964193323Sed  // If there are mul operands inline them all into this expression.
1965193323Sed  if (Idx < Ops.size()) {
1966193323Sed    bool DeletedMul = false;
1967193323Sed    while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1968193323Sed      // If we have an mul, expand the mul operands onto the end of the operands
1969193323Sed      // list.
1970193323Sed      Ops.erase(Ops.begin()+Idx);
1971210299Sed      Ops.append(Mul->op_begin(), Mul->op_end());
1972193323Sed      DeletedMul = true;
1973193323Sed    }
1974193323Sed
1975193323Sed    // If we deleted at least one mul, we added operands to the end of the list,
1976193323Sed    // and they are not necessarily sorted.  Recurse to resort and resimplify
1977204642Srdivacky    // any operands we just acquired.
1978193323Sed    if (DeletedMul)
1979193323Sed      return getMulExpr(Ops);
1980193323Sed  }
1981193323Sed
1982193323Sed  // If there are any add recurrences in the operands list, see if any other
1983193323Sed  // added values are loop invariant.  If so, we can fold them into the
1984193323Sed  // recurrence.
1985193323Sed  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1986193323Sed    ++Idx;
1987193323Sed
1988193323Sed  // Scan over all recurrences, trying to fold loop invariants into them.
1989193323Sed  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1990193323Sed    // Scan all of the other operands to this mul and add them to the vector if
1991193323Sed    // they are loop invariant w.r.t. the recurrence.
1992198090Srdivacky    SmallVector<const SCEV *, 8> LIOps;
1993193323Sed    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1994212904Sdim    const Loop *AddRecLoop = AddRec->getLoop();
1995193323Sed    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1996218893Sdim      if (isLoopInvariant(Ops[i], AddRecLoop)) {
1997193323Sed        LIOps.push_back(Ops[i]);
1998193323Sed        Ops.erase(Ops.begin()+i);
1999193323Sed        --i; --e;
2000193323Sed      }
2001193323Sed
2002193323Sed    // If we found some loop invariants, fold them into the recurrence.
2003193323Sed    if (!LIOps.empty()) {
2004193323Sed      //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
2005198090Srdivacky      SmallVector<const SCEV *, 4> NewOps;
2006193323Sed      NewOps.reserve(AddRec->getNumOperands());
2007210299Sed      const SCEV *Scale = getMulExpr(LIOps);
2008210299Sed      for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
2009210299Sed        NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
2010193323Sed
2011210299Sed      // Build the new addrec. Propagate the NUW and NSW flags if both the
2012210299Sed      // outer mul and the inner addrec are guaranteed to have no overflow.
2013221345Sdim      //
2014221345Sdim      // No self-wrap cannot be guaranteed after changing the step size, but
2015221345Sdim      // will be inferred if either NUW or NSW is true.
2016221345Sdim      Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW));
2017221345Sdim      const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);
2018193323Sed
2019193323Sed      // If all of the other operands were loop invariant, we are done.
2020193323Sed      if (Ops.size() == 1) return NewRec;
2021193323Sed
2022226633Sdim      // Otherwise, multiply the folded AddRec by the non-invariant parts.
2023193323Sed      for (unsigned i = 0;; ++i)
2024193323Sed        if (Ops[i] == AddRec) {
2025193323Sed          Ops[i] = NewRec;
2026193323Sed          break;
2027193323Sed        }
2028193323Sed      return getMulExpr(Ops);
2029193323Sed    }
2030193323Sed
2031193323Sed    // Okay, if there weren't any loop invariants to be folded, check to see if
2032193323Sed    // there are multiple AddRec's with the same loop induction variable being
2033193323Sed    // multiplied together.  If so, we can fold them.
2034193323Sed    for (unsigned OtherIdx = Idx+1;
2035212904Sdim         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2036226633Sdim         ++OtherIdx) {
2037239462Sdim      if (AddRecLoop != cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop())
2038239462Sdim        continue;
2039239462Sdim
2040239462Sdim      // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
2041239462Sdim      // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
2042239462Sdim      //       choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
2043239462Sdim      //   ]]],+,...up to x=2n}.
2044239462Sdim      // Note that the arguments to choose() are always integers with values
2045239462Sdim      // known at compile time, never SCEV objects.
2046239462Sdim      //
2047239462Sdim      // The implementation avoids pointless extra computations when the two
2048239462Sdim      // addrec's are of different length (mathematically, it's equivalent to
2049239462Sdim      // an infinite stream of zeros on the right).
2050239462Sdim      bool OpsModified = false;
2051239462Sdim      for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2052239462Sdim           ++OtherIdx) {
2053239462Sdim        const SCEVAddRecExpr *OtherAddRec =
2054239462Sdim          dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2055239462Sdim        if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
2056239462Sdim          continue;
2057239462Sdim
2058239462Sdim        bool Overflow = false;
2059239462Sdim        Type *Ty = AddRec->getType();
2060239462Sdim        bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
2061239462Sdim        SmallVector<const SCEV*, 7> AddRecOps;
2062239462Sdim        for (int x = 0, xe = AddRec->getNumOperands() +
2063239462Sdim               OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
2064239462Sdim          const SCEV *Term = getConstant(Ty, 0);
2065239462Sdim          for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
2066239462Sdim            uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
2067239462Sdim            for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
2068239462Sdim                   ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
2069239462Sdim                 z < ze && !Overflow; ++z) {
2070239462Sdim              uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
2071239462Sdim              uint64_t Coeff;
2072239462Sdim              if (LargerThan64Bits)
2073239462Sdim                Coeff = umul_ov(Coeff1, Coeff2, Overflow);
2074239462Sdim              else
2075239462Sdim                Coeff = Coeff1*Coeff2;
2076239462Sdim              const SCEV *CoeffTerm = getConstant(Ty, Coeff);
2077239462Sdim              const SCEV *Term1 = AddRec->getOperand(y-z);
2078239462Sdim              const SCEV *Term2 = OtherAddRec->getOperand(z);
2079239462Sdim              Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2));
2080212904Sdim            }
2081239462Sdim          }
2082239462Sdim          AddRecOps.push_back(Term);
2083239462Sdim        }
2084239462Sdim        if (!Overflow) {
2085239462Sdim          const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRec->getLoop(),
2086239462Sdim                                                SCEV::FlagAnyWrap);
2087239462Sdim          if (Ops.size() == 2) return NewAddRec;
2088239462Sdim          Ops[Idx] = NewAddRec;
2089239462Sdim          Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2090239462Sdim          OpsModified = true;
2091239462Sdim          AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
2092239462Sdim          if (!AddRec)
2093239462Sdim            break;
2094239462Sdim        }
2095193323Sed      }
2096239462Sdim      if (OpsModified)
2097239462Sdim        return getMulExpr(Ops);
2098226633Sdim    }
2099193323Sed
2100193323Sed    // Otherwise couldn't fold anything into this recurrence.  Move onto the
2101193323Sed    // next one.
2102193323Sed  }
2103193323Sed
2104193323Sed  // Okay, it looks like we really DO need an mul expr.  Check to see if we
2105193323Sed  // already have one, otherwise create a new one.
2106195340Sed  FoldingSetNodeID ID;
2107195340Sed  ID.AddInteger(scMulExpr);
2108195340Sed  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2109195340Sed    ID.AddPointer(Ops[i]);
2110195340Sed  void *IP = 0;
2111202878Srdivacky  SCEVMulExpr *S =
2112202878Srdivacky    static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2113202878Srdivacky  if (!S) {
2114205407Srdivacky    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2115205407Srdivacky    std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2116205407Srdivacky    S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2117205407Srdivacky                                        O, Ops.size());
2118202878Srdivacky    UniqueSCEVs.InsertNode(S, IP);
2119202878Srdivacky  }
2120221345Sdim  S->setNoWrapFlags(Flags);
2121195340Sed  return S;
2122193323Sed}
2123193323Sed
2124198090Srdivacky/// getUDivExpr - Get a canonical unsigned division expression, or something
2125198090Srdivacky/// simpler if possible.
2126195098Sedconst SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
2127195098Sed                                         const SCEV *RHS) {
2128193323Sed  assert(getEffectiveSCEVType(LHS->getType()) ==
2129193323Sed         getEffectiveSCEVType(RHS->getType()) &&
2130193323Sed         "SCEVUDivExpr operand types don't match!");
2131193323Sed
2132193323Sed  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
2133193323Sed    if (RHSC->getValue()->equalsInt(1))
2134198090Srdivacky      return LHS;                               // X udiv 1 --> x
2135207618Srdivacky    // If the denominator is zero, the result of the udiv is undefined. Don't
2136207618Srdivacky    // try to analyze it, because the resolution chosen here may differ from
2137207618Srdivacky    // the resolution chosen in other parts of the compiler.
2138207618Srdivacky    if (!RHSC->getValue()->isZero()) {
2139207618Srdivacky      // Determine if the division can be folded into the operands of
2140207618Srdivacky      // its operands.
2141207618Srdivacky      // TODO: Generalize this to non-constants by using known-bits information.
2142226633Sdim      Type *Ty = LHS->getType();
2143207618Srdivacky      unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
2144212904Sdim      unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
2145207618Srdivacky      // For non-power-of-two values, effectively round the value up to the
2146207618Srdivacky      // nearest power of two.
2147207618Srdivacky      if (!RHSC->getValue()->getValue().isPowerOf2())
2148207618Srdivacky        ++MaxShiftAmt;
2149226633Sdim      IntegerType *ExtTy =
2150207618Srdivacky        IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
2151207618Srdivacky      if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
2152207618Srdivacky        if (const SCEVConstant *Step =
2153226633Sdim            dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
2154226633Sdim          // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
2155226633Sdim          const APInt &StepInt = Step->getValue()->getValue();
2156226633Sdim          const APInt &DivInt = RHSC->getValue()->getValue();
2157226633Sdim          if (!StepInt.urem(DivInt) &&
2158207618Srdivacky              getZeroExtendExpr(AR, ExtTy) ==
2159207618Srdivacky              getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
2160207618Srdivacky                            getZeroExtendExpr(Step, ExtTy),
2161221345Sdim                            AR->getLoop(), SCEV::FlagAnyWrap)) {
2162207618Srdivacky            SmallVector<const SCEV *, 4> Operands;
2163207618Srdivacky            for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
2164207618Srdivacky              Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
2165221345Sdim            return getAddRecExpr(Operands, AR->getLoop(),
2166221345Sdim                                 SCEV::FlagNW);
2167193323Sed          }
2168226633Sdim          /// Get a canonical UDivExpr for a recurrence.
2169226633Sdim          /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
2170226633Sdim          // We can currently only fold X%N if X is constant.
2171226633Sdim          const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
2172226633Sdim          if (StartC && !DivInt.urem(StepInt) &&
2173226633Sdim              getZeroExtendExpr(AR, ExtTy) ==
2174226633Sdim              getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
2175226633Sdim                            getZeroExtendExpr(Step, ExtTy),
2176226633Sdim                            AR->getLoop(), SCEV::FlagAnyWrap)) {
2177226633Sdim            const APInt &StartInt = StartC->getValue()->getValue();
2178226633Sdim            const APInt &StartRem = StartInt.urem(StepInt);
2179226633Sdim            if (StartRem != 0)
2180226633Sdim              LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step,
2181226633Sdim                                  AR->getLoop(), SCEV::FlagNW);
2182226633Sdim          }
2183226633Sdim        }
2184207618Srdivacky      // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
2185207618Srdivacky      if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
2186207618Srdivacky        SmallVector<const SCEV *, 4> Operands;
2187207618Srdivacky        for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
2188207618Srdivacky          Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
2189207618Srdivacky        if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
2190207618Srdivacky          // Find an operand that's safely divisible.
2191207618Srdivacky          for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
2192207618Srdivacky            const SCEV *Op = M->getOperand(i);
2193207618Srdivacky            const SCEV *Div = getUDivExpr(Op, RHSC);
2194207618Srdivacky            if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
2195207618Srdivacky              Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
2196207618Srdivacky                                                      M->op_end());
2197207618Srdivacky              Operands[i] = Div;
2198207618Srdivacky              return getMulExpr(Operands);
2199207618Srdivacky            }
2200207618Srdivacky          }
2201207618Srdivacky      }
2202207618Srdivacky      // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
2203221345Sdim      if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
2204207618Srdivacky        SmallVector<const SCEV *, 4> Operands;
2205207618Srdivacky        for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
2206207618Srdivacky          Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
2207207618Srdivacky        if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
2208207618Srdivacky          Operands.clear();
2209207618Srdivacky          for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
2210207618Srdivacky            const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
2211207618Srdivacky            if (isa<SCEVUDivExpr>(Op) ||
2212207618Srdivacky                getMulExpr(Op, RHS) != A->getOperand(i))
2213207618Srdivacky              break;
2214207618Srdivacky            Operands.push_back(Op);
2215207618Srdivacky          }
2216207618Srdivacky          if (Operands.size() == A->getNumOperands())
2217207618Srdivacky            return getAddExpr(Operands);
2218193323Sed        }
2219193323Sed      }
2220193323Sed
2221207618Srdivacky      // Fold if both operands are constant.
2222207618Srdivacky      if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
2223207618Srdivacky        Constant *LHSCV = LHSC->getValue();
2224207618Srdivacky        Constant *RHSCV = RHSC->getValue();
2225207618Srdivacky        return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
2226207618Srdivacky                                                                   RHSCV)));
2227207618Srdivacky      }
2228193323Sed    }
2229193323Sed  }
2230193323Sed
2231195340Sed  FoldingSetNodeID ID;
2232195340Sed  ID.AddInteger(scUDivExpr);
2233195340Sed  ID.AddPointer(LHS);
2234195340Sed  ID.AddPointer(RHS);
2235195340Sed  void *IP = 0;
2236195340Sed  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2237205407Srdivacky  SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
2238205407Srdivacky                                             LHS, RHS);
2239195340Sed  UniqueSCEVs.InsertNode(S, IP);
2240195340Sed  return S;
2241193323Sed}
2242193323Sed
2243193323Sed
2244193323Sed/// getAddRecExpr - Get an add recurrence expression for the specified loop.
2245193323Sed/// Simplify the expression as much as possible.
2246221345Sdimconst SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
2247221345Sdim                                           const Loop *L,
2248221345Sdim                                           SCEV::NoWrapFlags Flags) {
2249198090Srdivacky  SmallVector<const SCEV *, 4> Operands;
2250193323Sed  Operands.push_back(Start);
2251193323Sed  if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
2252193323Sed    if (StepChrec->getLoop() == L) {
2253210299Sed      Operands.append(StepChrec->op_begin(), StepChrec->op_end());
2254221345Sdim      return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
2255193323Sed    }
2256193323Sed
2257193323Sed  Operands.push_back(Step);
2258221345Sdim  return getAddRecExpr(Operands, L, Flags);
2259193323Sed}
2260193323Sed
2261193323Sed/// getAddRecExpr - Get an add recurrence expression for the specified loop.
2262193323Sed/// Simplify the expression as much as possible.
2263195098Sedconst SCEV *
2264198090SrdivackyScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
2265221345Sdim                               const Loop *L, SCEV::NoWrapFlags Flags) {
2266193323Sed  if (Operands.size() == 1) return Operands[0];
2267193323Sed#ifndef NDEBUG
2268226633Sdim  Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
2269193323Sed  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
2270212904Sdim    assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
2271193323Sed           "SCEVAddRecExpr operand types don't match!");
2272218893Sdim  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2273218893Sdim    assert(isLoopInvariant(Operands[i], L) &&
2274218893Sdim           "SCEVAddRecExpr operand is not loop-invariant!");
2275193323Sed#endif
2276193323Sed
2277193323Sed  if (Operands.back()->isZero()) {
2278193323Sed    Operands.pop_back();
2279221345Sdim    return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0}  -->  X
2280193323Sed  }
2281193323Sed
2282204642Srdivacky  // It's tempting to want to call getMaxBackedgeTakenCount count here and
2283204642Srdivacky  // use that information to infer NUW and NSW flags. However, computing a
2284204642Srdivacky  // BE count requires calling getAddRecExpr, so we may not yet have a
2285204642Srdivacky  // meaningful BE count at this point (and if we don't, we'd be stuck
2286204642Srdivacky  // with a SCEVCouldNotCompute as the cached BE count).
2287204642Srdivacky
2288221345Sdim  // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
2289221345Sdim  // And vice-versa.
2290221345Sdim  int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
2291221345Sdim  SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
2292221345Sdim  if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
2293202878Srdivacky    bool All = true;
2294212904Sdim    for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(),
2295212904Sdim         E = Operands.end(); I != E; ++I)
2296212904Sdim      if (!isKnownNonNegative(*I)) {
2297202878Srdivacky        All = false;
2298202878Srdivacky        break;
2299202878Srdivacky      }
2300221345Sdim    if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
2301202878Srdivacky  }
2302202878Srdivacky
2303193323Sed  // Canonicalize nested AddRecs in by nesting them in order of loop depth.
2304193323Sed  if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
2305201360Srdivacky    const Loop *NestedLoop = NestedAR->getLoop();
2306212904Sdim    if (L->contains(NestedLoop) ?
2307202878Srdivacky        (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
2308212904Sdim        (!NestedLoop->contains(L) &&
2309202878Srdivacky         DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
2310198090Srdivacky      SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
2311201360Srdivacky                                                  NestedAR->op_end());
2312193323Sed      Operands[0] = NestedAR->getStart();
2313195098Sed      // AddRecs require their operands be loop-invariant with respect to their
2314195098Sed      // loops. Don't perform this transformation if it would break this
2315195098Sed      // requirement.
2316195098Sed      bool AllInvariant = true;
2317195098Sed      for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2318218893Sdim        if (!isLoopInvariant(Operands[i], L)) {
2319195098Sed          AllInvariant = false;
2320195098Sed          break;
2321195098Sed        }
2322195098Sed      if (AllInvariant) {
2323221345Sdim        // Create a recurrence for the outer loop with the same step size.
2324221345Sdim        //
2325221345Sdim        // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
2326221345Sdim        // inner recurrence has the same property.
2327221345Sdim        SCEV::NoWrapFlags OuterFlags =
2328221345Sdim          maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
2329221345Sdim
2330221345Sdim        NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
2331195098Sed        AllInvariant = true;
2332195098Sed        for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
2333218893Sdim          if (!isLoopInvariant(NestedOperands[i], NestedLoop)) {
2334195098Sed            AllInvariant = false;
2335195098Sed            break;
2336195098Sed          }
2337221345Sdim        if (AllInvariant) {
2338195098Sed          // Ok, both add recurrences are valid after the transformation.
2339221345Sdim          //
2340221345Sdim          // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
2341221345Sdim          // the outer recurrence has the same property.
2342221345Sdim          SCEV::NoWrapFlags InnerFlags =
2343221345Sdim            maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
2344221345Sdim          return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
2345221345Sdim        }
2346195098Sed      }
2347195098Sed      // Reset Operands to its original state.
2348195098Sed      Operands[0] = NestedAR;
2349193323Sed    }
2350193323Sed  }
2351193323Sed
2352202878Srdivacky  // Okay, it looks like we really DO need an addrec expr.  Check to see if we
2353202878Srdivacky  // already have one, otherwise create a new one.
2354195340Sed  FoldingSetNodeID ID;
2355195340Sed  ID.AddInteger(scAddRecExpr);
2356195340Sed  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2357195340Sed    ID.AddPointer(Operands[i]);
2358195340Sed  ID.AddPointer(L);
2359195340Sed  void *IP = 0;
2360202878Srdivacky  SCEVAddRecExpr *S =
2361202878Srdivacky    static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2362202878Srdivacky  if (!S) {
2363205407Srdivacky    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
2364205407Srdivacky    std::uninitialized_copy(Operands.begin(), Operands.end(), O);
2365205407Srdivacky    S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
2366205407Srdivacky                                           O, Operands.size(), L);
2367202878Srdivacky    UniqueSCEVs.InsertNode(S, IP);
2368202878Srdivacky  }
2369221345Sdim  S->setNoWrapFlags(Flags);
2370195340Sed  return S;
2371193323Sed}
2372193323Sed
2373195098Sedconst SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
2374195098Sed                                         const SCEV *RHS) {
2375198090Srdivacky  SmallVector<const SCEV *, 2> Ops;
2376193323Sed  Ops.push_back(LHS);
2377193323Sed  Ops.push_back(RHS);
2378193323Sed  return getSMaxExpr(Ops);
2379193323Sed}
2380193323Sed
2381198090Srdivackyconst SCEV *
2382198090SrdivackyScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2383193323Sed  assert(!Ops.empty() && "Cannot get empty smax!");
2384193323Sed  if (Ops.size() == 1) return Ops[0];
2385193323Sed#ifndef NDEBUG
2386226633Sdim  Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2387193323Sed  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2388212904Sdim    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2389193323Sed           "SCEVSMaxExpr operand types don't match!");
2390193323Sed#endif
2391193323Sed
2392193323Sed  // Sort by complexity, this groups all similar expression types together.
2393193323Sed  GroupByComplexity(Ops, LI);
2394193323Sed
2395193323Sed  // If there are any constants, fold them together.
2396193323Sed  unsigned Idx = 0;
2397193323Sed  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2398193323Sed    ++Idx;
2399193323Sed    assert(Idx < Ops.size());
2400193323Sed    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2401193323Sed      // We found two constants, fold them together!
2402198090Srdivacky      ConstantInt *Fold = ConstantInt::get(getContext(),
2403193323Sed                              APIntOps::smax(LHSC->getValue()->getValue(),
2404193323Sed                                             RHSC->getValue()->getValue()));
2405193323Sed      Ops[0] = getConstant(Fold);
2406193323Sed      Ops.erase(Ops.begin()+1);  // Erase the folded element
2407193323Sed      if (Ops.size() == 1) return Ops[0];
2408193323Sed      LHSC = cast<SCEVConstant>(Ops[0]);
2409193323Sed    }
2410193323Sed
2411195098Sed    // If we are left with a constant minimum-int, strip it off.
2412193323Sed    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
2413193323Sed      Ops.erase(Ops.begin());
2414193323Sed      --Idx;
2415195098Sed    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
2416195098Sed      // If we have an smax with a constant maximum-int, it will always be
2417195098Sed      // maximum-int.
2418195098Sed      return Ops[0];
2419193323Sed    }
2420207618Srdivacky
2421207618Srdivacky    if (Ops.size() == 1) return Ops[0];
2422193323Sed  }
2423193323Sed
2424193323Sed  // Find the first SMax
2425193323Sed  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
2426193323Sed    ++Idx;
2427193323Sed
2428193323Sed  // Check to see if one of the operands is an SMax. If so, expand its operands
2429193323Sed  // onto our operand list, and recurse to simplify.
2430193323Sed  if (Idx < Ops.size()) {
2431193323Sed    bool DeletedSMax = false;
2432193323Sed    while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
2433193323Sed      Ops.erase(Ops.begin()+Idx);
2434210299Sed      Ops.append(SMax->op_begin(), SMax->op_end());
2435193323Sed      DeletedSMax = true;
2436193323Sed    }
2437193323Sed
2438193323Sed    if (DeletedSMax)
2439193323Sed      return getSMaxExpr(Ops);
2440193323Sed  }
2441193323Sed
2442193323Sed  // Okay, check to see if the same value occurs in the operand list twice.  If
2443193323Sed  // so, delete one.  Since we sorted the list, these values are required to
2444193323Sed  // be adjacent.
2445193323Sed  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2446207618Srdivacky    //  X smax Y smax Y  -->  X smax Y
2447207618Srdivacky    //  X smax Y         -->  X, if X is always greater than Y
2448207618Srdivacky    if (Ops[i] == Ops[i+1] ||
2449207618Srdivacky        isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
2450207618Srdivacky      Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2451207618Srdivacky      --i; --e;
2452207618Srdivacky    } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
2453193323Sed      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2454193323Sed      --i; --e;
2455193323Sed    }
2456193323Sed
2457193323Sed  if (Ops.size() == 1) return Ops[0];
2458193323Sed
2459193323Sed  assert(!Ops.empty() && "Reduced smax down to nothing!");
2460193323Sed
2461193323Sed  // Okay, it looks like we really DO need an smax expr.  Check to see if we
2462193323Sed  // already have one, otherwise create a new one.
2463195340Sed  FoldingSetNodeID ID;
2464195340Sed  ID.AddInteger(scSMaxExpr);
2465195340Sed  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2466195340Sed    ID.AddPointer(Ops[i]);
2467195340Sed  void *IP = 0;
2468195340Sed  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2469205407Srdivacky  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2470205407Srdivacky  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2471205407Srdivacky  SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
2472205407Srdivacky                                             O, Ops.size());
2473195340Sed  UniqueSCEVs.InsertNode(S, IP);
2474195340Sed  return S;
2475193323Sed}
2476193323Sed
2477195098Sedconst SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
2478195098Sed                                         const SCEV *RHS) {
2479198090Srdivacky  SmallVector<const SCEV *, 2> Ops;
2480193323Sed  Ops.push_back(LHS);
2481193323Sed  Ops.push_back(RHS);
2482193323Sed  return getUMaxExpr(Ops);
2483193323Sed}
2484193323Sed
2485198090Srdivackyconst SCEV *
2486198090SrdivackyScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2487193323Sed  assert(!Ops.empty() && "Cannot get empty umax!");
2488193323Sed  if (Ops.size() == 1) return Ops[0];
2489193323Sed#ifndef NDEBUG
2490226633Sdim  Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2491193323Sed  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2492212904Sdim    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2493193323Sed           "SCEVUMaxExpr operand types don't match!");
2494193323Sed#endif
2495193323Sed
2496193323Sed  // Sort by complexity, this groups all similar expression types together.
2497193323Sed  GroupByComplexity(Ops, LI);
2498193323Sed
2499193323Sed  // If there are any constants, fold them together.
2500193323Sed  unsigned Idx = 0;
2501193323Sed  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2502193323Sed    ++Idx;
2503193323Sed    assert(Idx < Ops.size());
2504193323Sed    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2505193323Sed      // We found two constants, fold them together!
2506198090Srdivacky      ConstantInt *Fold = ConstantInt::get(getContext(),
2507193323Sed                              APIntOps::umax(LHSC->getValue()->getValue(),
2508193323Sed                                             RHSC->getValue()->getValue()));
2509193323Sed      Ops[0] = getConstant(Fold);
2510193323Sed      Ops.erase(Ops.begin()+1);  // Erase the folded element
2511193323Sed      if (Ops.size() == 1) return Ops[0];
2512193323Sed      LHSC = cast<SCEVConstant>(Ops[0]);
2513193323Sed    }
2514193323Sed
2515195098Sed    // If we are left with a constant minimum-int, strip it off.
2516193323Sed    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
2517193323Sed      Ops.erase(Ops.begin());
2518193323Sed      --Idx;
2519195098Sed    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
2520195098Sed      // If we have an umax with a constant maximum-int, it will always be
2521195098Sed      // maximum-int.
2522195098Sed      return Ops[0];
2523193323Sed    }
2524207618Srdivacky
2525207618Srdivacky    if (Ops.size() == 1) return Ops[0];
2526193323Sed  }
2527193323Sed
2528193323Sed  // Find the first UMax
2529193323Sed  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
2530193323Sed    ++Idx;
2531193323Sed
2532193323Sed  // Check to see if one of the operands is a UMax. If so, expand its operands
2533193323Sed  // onto our operand list, and recurse to simplify.
2534193323Sed  if (Idx < Ops.size()) {
2535193323Sed    bool DeletedUMax = false;
2536193323Sed    while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
2537193323Sed      Ops.erase(Ops.begin()+Idx);
2538210299Sed      Ops.append(UMax->op_begin(), UMax->op_end());
2539193323Sed      DeletedUMax = true;
2540193323Sed    }
2541193323Sed
2542193323Sed    if (DeletedUMax)
2543193323Sed      return getUMaxExpr(Ops);
2544193323Sed  }
2545193323Sed
2546193323Sed  // Okay, check to see if the same value occurs in the operand list twice.  If
2547193323Sed  // so, delete one.  Since we sorted the list, these values are required to
2548193323Sed  // be adjacent.
2549193323Sed  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2550207618Srdivacky    //  X umax Y umax Y  -->  X umax Y
2551207618Srdivacky    //  X umax Y         -->  X, if X is always greater than Y
2552207618Srdivacky    if (Ops[i] == Ops[i+1] ||
2553207618Srdivacky        isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
2554207618Srdivacky      Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2555207618Srdivacky      --i; --e;
2556207618Srdivacky    } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
2557193323Sed      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2558193323Sed      --i; --e;
2559193323Sed    }
2560193323Sed
2561193323Sed  if (Ops.size() == 1) return Ops[0];
2562193323Sed
2563193323Sed  assert(!Ops.empty() && "Reduced umax down to nothing!");
2564193323Sed
2565193323Sed  // Okay, it looks like we really DO need a umax expr.  Check to see if we
2566193323Sed  // already have one, otherwise create a new one.
2567195340Sed  FoldingSetNodeID ID;
2568195340Sed  ID.AddInteger(scUMaxExpr);
2569195340Sed  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2570195340Sed    ID.AddPointer(Ops[i]);
2571195340Sed  void *IP = 0;
2572195340Sed  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2573205407Srdivacky  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2574205407Srdivacky  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2575205407Srdivacky  SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
2576205407Srdivacky                                             O, Ops.size());
2577195340Sed  UniqueSCEVs.InsertNode(S, IP);
2578195340Sed  return S;
2579193323Sed}
2580193323Sed
2581195098Sedconst SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
2582195098Sed                                         const SCEV *RHS) {
2583194612Sed  // ~smax(~x, ~y) == smin(x, y).
2584194612Sed  return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2585194612Sed}
2586194612Sed
2587195098Sedconst SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
2588195098Sed                                         const SCEV *RHS) {
2589194612Sed  // ~umax(~x, ~y) == umin(x, y)
2590194612Sed  return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2591194612Sed}
2592194612Sed
2593263508Sdimconst SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
2594243830Sdim  // If we have DataLayout, we can bypass creating a target-independent
2595207618Srdivacky  // constant expression and then folding it back into a ConstantInt.
2596207618Srdivacky  // This is just a compile-time optimization.
2597207618Srdivacky  if (TD)
2598263508Sdim    return getConstant(IntTy, TD->getTypeAllocSize(AllocTy));
2599207618Srdivacky
2600203954Srdivacky  Constant *C = ConstantExpr::getSizeOf(AllocTy);
2601203954Srdivacky  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2602234353Sdim    if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
2603210299Sed      C = Folded;
2604226633Sdim  Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
2605263508Sdim  assert(Ty == IntTy && "Effective SCEV type doesn't match");
2606203954Srdivacky  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2607203954Srdivacky}
2608198090Srdivacky
2609263508Sdimconst SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
2610263508Sdim                                             StructType *STy,
2611203954Srdivacky                                             unsigned FieldNo) {
2612243830Sdim  // If we have DataLayout, we can bypass creating a target-independent
2613207618Srdivacky  // constant expression and then folding it back into a ConstantInt.
2614207618Srdivacky  // This is just a compile-time optimization.
2615263508Sdim  if (TD) {
2616263508Sdim    return getConstant(IntTy,
2617207618Srdivacky                       TD->getStructLayout(STy)->getElementOffset(FieldNo));
2618263508Sdim  }
2619207618Srdivacky
2620203954Srdivacky  Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
2621203954Srdivacky  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2622234353Sdim    if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
2623210299Sed      C = Folded;
2624263508Sdim
2625226633Sdim  Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
2626203954Srdivacky  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2627198090Srdivacky}
2628198090Srdivacky
2629198090Srdivackyconst SCEV *ScalarEvolution::getUnknown(Value *V) {
2630195098Sed  // Don't attempt to do anything other than create a SCEVUnknown object
2631195098Sed  // here.  createSCEV only calls getUnknown after checking for all other
2632195098Sed  // interesting possibilities, and any other code that calls getUnknown
2633195098Sed  // is doing so in order to hide a value from SCEV canonicalization.
2634195098Sed
2635195340Sed  FoldingSetNodeID ID;
2636195340Sed  ID.AddInteger(scUnknown);
2637195340Sed  ID.AddPointer(V);
2638195340Sed  void *IP = 0;
2639212904Sdim  if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
2640212904Sdim    assert(cast<SCEVUnknown>(S)->getValue() == V &&
2641212904Sdim           "Stale SCEVUnknown in uniquing map!");
2642212904Sdim    return S;
2643212904Sdim  }
2644212904Sdim  SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
2645212904Sdim                                            FirstUnknown);
2646212904Sdim  FirstUnknown = cast<SCEVUnknown>(S);
2647195340Sed  UniqueSCEVs.InsertNode(S, IP);
2648195340Sed  return S;
2649193323Sed}
2650193323Sed
2651193323Sed//===----------------------------------------------------------------------===//
2652193323Sed//            Basic SCEV Analysis and PHI Idiom Recognition Code
2653193323Sed//
2654193323Sed
2655193323Sed/// isSCEVable - Test if values of the given type are analyzable within
2656193323Sed/// the SCEV framework. This primarily includes integer types, and it
2657193323Sed/// can optionally include pointer types if the ScalarEvolution class
2658193323Sed/// has access to target-specific information.
2659226633Sdimbool ScalarEvolution::isSCEVable(Type *Ty) const {
2660198090Srdivacky  // Integers and pointers are always SCEVable.
2661204642Srdivacky  return Ty->isIntegerTy() || Ty->isPointerTy();
2662193323Sed}
2663193323Sed
2664193323Sed/// getTypeSizeInBits - Return the size in bits of the specified type,
2665193323Sed/// for which isSCEVable must return true.
2666226633Sdimuint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
2667193323Sed  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2668193323Sed
2669243830Sdim  // If we have a DataLayout, use it!
2670193323Sed  if (TD)
2671193323Sed    return TD->getTypeSizeInBits(Ty);
2672193323Sed
2673198090Srdivacky  // Integer types have fixed sizes.
2674203954Srdivacky  if (Ty->isIntegerTy())
2675198090Srdivacky    return Ty->getPrimitiveSizeInBits();
2676198090Srdivacky
2677243830Sdim  // The only other support type is pointer. Without DataLayout, conservatively
2678198090Srdivacky  // assume pointers are 64-bit.
2679204642Srdivacky  assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
2680198090Srdivacky  return 64;
2681193323Sed}
2682193323Sed
2683193323Sed/// getEffectiveSCEVType - Return a type with the same bitwidth as
2684193323Sed/// the given type and which represents how SCEV will treat the given
2685193323Sed/// type, for which isSCEVable must return true. For pointer types,
2686193323Sed/// this is the pointer-sized integer type.
2687226633SdimType *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
2688193323Sed  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2689193323Sed
2690263508Sdim  if (Ty->isIntegerTy()) {
2691193323Sed    return Ty;
2692263508Sdim  }
2693193323Sed
2694198090Srdivacky  // The only other support type is pointer.
2695204642Srdivacky  assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
2696198090Srdivacky
2697263508Sdim  if (TD)
2698263508Sdim    return TD->getIntPtrType(Ty);
2699263508Sdim
2700243830Sdim  // Without DataLayout, conservatively assume pointers are 64-bit.
2701198090Srdivacky  return Type::getInt64Ty(getContext());
2702193323Sed}
2703193323Sed
2704198090Srdivackyconst SCEV *ScalarEvolution::getCouldNotCompute() {
2705195340Sed  return &CouldNotCompute;
2706193323Sed}
2707193323Sed
2708263508Sdimnamespace {
2709263508Sdim  // Helper class working with SCEVTraversal to figure out if a SCEV contains
2710263508Sdim  // a SCEVUnknown with null value-pointer. FindInvalidSCEVUnknown::FindOne
2711263508Sdim  // is set iff if find such SCEVUnknown.
2712263508Sdim  //
2713263508Sdim  struct FindInvalidSCEVUnknown {
2714263508Sdim    bool FindOne;
2715263508Sdim    FindInvalidSCEVUnknown() { FindOne = false; }
2716263508Sdim    bool follow(const SCEV *S) {
2717263508Sdim      switch (S->getSCEVType()) {
2718263508Sdim      case scConstant:
2719263508Sdim        return false;
2720263508Sdim      case scUnknown:
2721263508Sdim        if (!cast<SCEVUnknown>(S)->getValue())
2722263508Sdim          FindOne = true;
2723263508Sdim        return false;
2724263508Sdim      default:
2725263508Sdim        return true;
2726263508Sdim      }
2727263508Sdim    }
2728263508Sdim    bool isDone() const { return FindOne; }
2729263508Sdim  };
2730263508Sdim}
2731263508Sdim
2732263508Sdimbool ScalarEvolution::checkValidity(const SCEV *S) const {
2733263508Sdim  FindInvalidSCEVUnknown F;
2734263508Sdim  SCEVTraversal<FindInvalidSCEVUnknown> ST(F);
2735263508Sdim  ST.visitAll(S);
2736263508Sdim
2737263508Sdim  return !F.FindOne;
2738263508Sdim}
2739263508Sdim
2740193323Sed/// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2741193323Sed/// expression and create a new one.
2742198090Srdivackyconst SCEV *ScalarEvolution::getSCEV(Value *V) {
2743193323Sed  assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2744193323Sed
2745263508Sdim  ValueExprMapType::iterator I = ValueExprMap.find_as(V);
2746263508Sdim  if (I != ValueExprMap.end()) {
2747263508Sdim    const SCEV *S = I->second;
2748263508Sdim    if (checkValidity(S))
2749263508Sdim      return S;
2750263508Sdim    else
2751263508Sdim      ValueExprMap.erase(I);
2752263508Sdim  }
2753198090Srdivacky  const SCEV *S = createSCEV(V);
2754212904Sdim
2755212904Sdim  // The process of creating a SCEV for V may have caused other SCEVs
2756212904Sdim  // to have been created, so it's necessary to insert the new entry
2757212904Sdim  // from scratch, rather than trying to remember the insert position
2758212904Sdim  // above.
2759212904Sdim  ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2760193323Sed  return S;
2761193323Sed}
2762193323Sed
2763193323Sed/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2764193323Sed///
2765198090Srdivackyconst SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
2766193323Sed  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2767198090Srdivacky    return getConstant(
2768198090Srdivacky               cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
2769193323Sed
2770226633Sdim  Type *Ty = V->getType();
2771193323Sed  Ty = getEffectiveSCEVType(Ty);
2772198090Srdivacky  return getMulExpr(V,
2773198090Srdivacky                  getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
2774193323Sed}
2775193323Sed
2776193323Sed/// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2777198090Srdivackyconst SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
2778193323Sed  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2779198090Srdivacky    return getConstant(
2780198090Srdivacky                cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
2781193323Sed
2782226633Sdim  Type *Ty = V->getType();
2783193323Sed  Ty = getEffectiveSCEVType(Ty);
2784198090Srdivacky  const SCEV *AllOnes =
2785198090Srdivacky                   getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
2786193323Sed  return getMinusSCEV(AllOnes, V);
2787193323Sed}
2788193323Sed
2789221345Sdim/// getMinusSCEV - Return LHS-RHS.  Minus is represented in SCEV as A+B*-1.
2790218893Sdimconst SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
2791221345Sdim                                          SCEV::NoWrapFlags Flags) {
2792221345Sdim  assert(!maskFlags(Flags, SCEV::FlagNUW) && "subtraction does not have NUW");
2793221345Sdim
2794212904Sdim  // Fast path: X - X --> 0.
2795212904Sdim  if (LHS == RHS)
2796212904Sdim    return getConstant(LHS->getType(), 0);
2797212904Sdim
2798193323Sed  // X - Y --> X + -Y
2799221345Sdim  return getAddExpr(LHS, getNegativeSCEV(RHS), Flags);
2800193323Sed}
2801193323Sed
2802193323Sed/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2803193323Sed/// input value to the specified type.  If the type must be extended, it is zero
2804193323Sed/// extended.
2805198090Srdivackyconst SCEV *
2806226633SdimScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) {
2807226633Sdim  Type *SrcTy = V->getType();
2808204642Srdivacky  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2809204642Srdivacky         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2810193323Sed         "Cannot truncate or zero extend with non-integer arguments!");
2811193323Sed  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2812193323Sed    return V;  // No conversion
2813193323Sed  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2814193323Sed    return getTruncateExpr(V, Ty);
2815193323Sed  return getZeroExtendExpr(V, Ty);
2816193323Sed}
2817193323Sed
2818193323Sed/// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2819193323Sed/// input value to the specified type.  If the type must be extended, it is sign
2820193323Sed/// extended.
2821198090Srdivackyconst SCEV *
2822198090SrdivackyScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
2823226633Sdim                                         Type *Ty) {
2824226633Sdim  Type *SrcTy = V->getType();
2825204642Srdivacky  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2826204642Srdivacky         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2827193323Sed         "Cannot truncate or zero extend with non-integer arguments!");
2828193323Sed  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2829193323Sed    return V;  // No conversion
2830193323Sed  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2831193323Sed    return getTruncateExpr(V, Ty);
2832193323Sed  return getSignExtendExpr(V, Ty);
2833193323Sed}
2834193323Sed
2835193323Sed/// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2836193323Sed/// input value to the specified type.  If the type must be extended, it is zero
2837193323Sed/// extended.  The conversion must not be narrowing.
2838198090Srdivackyconst SCEV *
2839226633SdimScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
2840226633Sdim  Type *SrcTy = V->getType();
2841204642Srdivacky  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2842204642Srdivacky         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2843193323Sed         "Cannot noop or zero extend with non-integer arguments!");
2844193323Sed  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2845193323Sed         "getNoopOrZeroExtend cannot truncate!");
2846193323Sed  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2847193323Sed    return V;  // No conversion
2848193323Sed  return getZeroExtendExpr(V, Ty);
2849193323Sed}
2850193323Sed
2851193323Sed/// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2852193323Sed/// input value to the specified type.  If the type must be extended, it is sign
2853193323Sed/// extended.  The conversion must not be narrowing.
2854198090Srdivackyconst SCEV *
2855226633SdimScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
2856226633Sdim  Type *SrcTy = V->getType();
2857204642Srdivacky  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2858204642Srdivacky         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2859193323Sed         "Cannot noop or sign extend with non-integer arguments!");
2860193323Sed  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2861193323Sed         "getNoopOrSignExtend cannot truncate!");
2862193323Sed  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2863193323Sed    return V;  // No conversion
2864193323Sed  return getSignExtendExpr(V, Ty);
2865193323Sed}
2866193323Sed
2867194178Sed/// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2868194178Sed/// the input value to the specified type. If the type must be extended,
2869194178Sed/// it is extended with unspecified bits. The conversion must not be
2870194178Sed/// narrowing.
2871198090Srdivackyconst SCEV *
2872226633SdimScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
2873226633Sdim  Type *SrcTy = V->getType();
2874204642Srdivacky  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2875204642Srdivacky         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2876194178Sed         "Cannot noop or any extend with non-integer arguments!");
2877194178Sed  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2878194178Sed         "getNoopOrAnyExtend cannot truncate!");
2879194178Sed  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2880194178Sed    return V;  // No conversion
2881194178Sed  return getAnyExtendExpr(V, Ty);
2882194178Sed}
2883194178Sed
2884193323Sed/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2885193323Sed/// input value to the specified type.  The conversion must not be widening.
2886198090Srdivackyconst SCEV *
2887226633SdimScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
2888226633Sdim  Type *SrcTy = V->getType();
2889204642Srdivacky  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2890204642Srdivacky         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2891193323Sed         "Cannot truncate or noop with non-integer arguments!");
2892193323Sed  assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2893193323Sed         "getTruncateOrNoop cannot extend!");
2894193323Sed  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2895193323Sed    return V;  // No conversion
2896193323Sed  return getTruncateExpr(V, Ty);
2897193323Sed}
2898193323Sed
2899194612Sed/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2900194612Sed/// the types using zero-extension, and then perform a umax operation
2901194612Sed/// with them.
2902195098Sedconst SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2903195098Sed                                                        const SCEV *RHS) {
2904198090Srdivacky  const SCEV *PromotedLHS = LHS;
2905198090Srdivacky  const SCEV *PromotedRHS = RHS;
2906194612Sed
2907194612Sed  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2908194612Sed    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2909194612Sed  else
2910194612Sed    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2911194612Sed
2912194612Sed  return getUMaxExpr(PromotedLHS, PromotedRHS);
2913194612Sed}
2914194612Sed
2915194710Sed/// getUMinFromMismatchedTypes - Promote the operands to the wider of
2916194710Sed/// the types using zero-extension, and then perform a umin operation
2917194710Sed/// with them.
2918195098Sedconst SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2919195098Sed                                                        const SCEV *RHS) {
2920198090Srdivacky  const SCEV *PromotedLHS = LHS;
2921198090Srdivacky  const SCEV *PromotedRHS = RHS;
2922194710Sed
2923194710Sed  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2924194710Sed    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2925194710Sed  else
2926194710Sed    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2927194710Sed
2928194710Sed  return getUMinExpr(PromotedLHS, PromotedRHS);
2929194710Sed}
2930194710Sed
2931221345Sdim/// getPointerBase - Transitively follow the chain of pointer-type operands
2932221345Sdim/// until reaching a SCEV that does not have a single pointer operand. This
2933221345Sdim/// returns a SCEVUnknown pointer for well-formed pointer-type expressions,
2934221345Sdim/// but corner cases do exist.
2935221345Sdimconst SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
2936221345Sdim  // A pointer operand may evaluate to a nonpointer expression, such as null.
2937221345Sdim  if (!V->getType()->isPointerTy())
2938221345Sdim    return V;
2939221345Sdim
2940221345Sdim  if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) {
2941221345Sdim    return getPointerBase(Cast->getOperand());
2942221345Sdim  }
2943221345Sdim  else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
2944221345Sdim    const SCEV *PtrOp = 0;
2945221345Sdim    for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
2946221345Sdim         I != E; ++I) {
2947221345Sdim      if ((*I)->getType()->isPointerTy()) {
2948221345Sdim        // Cannot find the base of an expression with multiple pointer operands.
2949221345Sdim        if (PtrOp)
2950221345Sdim          return V;
2951221345Sdim        PtrOp = *I;
2952221345Sdim      }
2953221345Sdim    }
2954221345Sdim    if (!PtrOp)
2955221345Sdim      return V;
2956221345Sdim    return getPointerBase(PtrOp);
2957221345Sdim  }
2958221345Sdim  return V;
2959221345Sdim}
2960221345Sdim
2961198090Srdivacky/// PushDefUseChildren - Push users of the given Instruction
2962198090Srdivacky/// onto the given Worklist.
2963198090Srdivackystatic void
2964198090SrdivackyPushDefUseChildren(Instruction *I,
2965198090Srdivacky                   SmallVectorImpl<Instruction *> &Worklist) {
2966198090Srdivacky  // Push the def-use children onto the Worklist stack.
2967198090Srdivacky  for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
2968198090Srdivacky       UI != UE; ++UI)
2969212904Sdim    Worklist.push_back(cast<Instruction>(*UI));
2970198090Srdivacky}
2971198090Srdivacky
2972198090Srdivacky/// ForgetSymbolicValue - This looks up computed SCEV values for all
2973198090Srdivacky/// instructions that depend on the given instruction and removes them from
2974212904Sdim/// the ValueExprMapType map if they reference SymName. This is used during PHI
2975198090Srdivacky/// resolution.
2976195098Sedvoid
2977204642SrdivackyScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
2978198090Srdivacky  SmallVector<Instruction *, 16> Worklist;
2979204642Srdivacky  PushDefUseChildren(PN, Worklist);
2980193323Sed
2981198090Srdivacky  SmallPtrSet<Instruction *, 8> Visited;
2982204642Srdivacky  Visited.insert(PN);
2983198090Srdivacky  while (!Worklist.empty()) {
2984198090Srdivacky    Instruction *I = Worklist.pop_back_val();
2985198090Srdivacky    if (!Visited.insert(I)) continue;
2986193323Sed
2987212904Sdim    ValueExprMapType::iterator It =
2988239462Sdim      ValueExprMap.find_as(static_cast<Value *>(I));
2989212904Sdim    if (It != ValueExprMap.end()) {
2990218893Sdim      const SCEV *Old = It->second;
2991218893Sdim
2992198090Srdivacky      // Short-circuit the def-use traversal if the symbolic name
2993198090Srdivacky      // ceases to appear in expressions.
2994218893Sdim      if (Old != SymName && !hasOperand(Old, SymName))
2995198090Srdivacky        continue;
2996193323Sed
2997198090Srdivacky      // SCEVUnknown for a PHI either means that it has an unrecognized
2998204642Srdivacky      // structure, it's a PHI that's in the progress of being computed
2999204642Srdivacky      // by createNodeForPHI, or it's a single-value PHI. In the first case,
3000204642Srdivacky      // additional loop trip count information isn't going to change anything.
3001204642Srdivacky      // In the second case, createNodeForPHI will perform the necessary
3002204642Srdivacky      // updates on its own when it gets to that point. In the third, we do
3003204642Srdivacky      // want to forget the SCEVUnknown.
3004204642Srdivacky      if (!isa<PHINode>(I) ||
3005218893Sdim          !isa<SCEVUnknown>(Old) ||
3006218893Sdim          (I != PN && Old == SymName)) {
3007218893Sdim        forgetMemoizedResults(Old);
3008212904Sdim        ValueExprMap.erase(It);
3009198090Srdivacky      }
3010198090Srdivacky    }
3011198090Srdivacky
3012198090Srdivacky    PushDefUseChildren(I, Worklist);
3013198090Srdivacky  }
3014193323Sed}
3015193323Sed
3016193323Sed/// createNodeForPHI - PHI nodes have two cases.  Either the PHI node exists in
3017193323Sed/// a loop header, making it a potential recurrence, or it doesn't.
3018193323Sed///
3019198090Srdivackyconst SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
3020207618Srdivacky  if (const Loop *L = LI->getLoopFor(PN->getParent()))
3021207618Srdivacky    if (L->getHeader() == PN->getParent()) {
3022207618Srdivacky      // The loop may have multiple entrances or multiple exits; we can analyze
3023207618Srdivacky      // this phi as an addrec if it has a unique entry value and a unique
3024207618Srdivacky      // backedge value.
3025207618Srdivacky      Value *BEValueV = 0, *StartValueV = 0;
3026207618Srdivacky      for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
3027207618Srdivacky        Value *V = PN->getIncomingValue(i);
3028207618Srdivacky        if (L->contains(PN->getIncomingBlock(i))) {
3029207618Srdivacky          if (!BEValueV) {
3030207618Srdivacky            BEValueV = V;
3031207618Srdivacky          } else if (BEValueV != V) {
3032207618Srdivacky            BEValueV = 0;
3033207618Srdivacky            break;
3034207618Srdivacky          }
3035207618Srdivacky        } else if (!StartValueV) {
3036207618Srdivacky          StartValueV = V;
3037207618Srdivacky        } else if (StartValueV != V) {
3038207618Srdivacky          StartValueV = 0;
3039207618Srdivacky          break;
3040207618Srdivacky        }
3041207618Srdivacky      }
3042207618Srdivacky      if (BEValueV && StartValueV) {
3043193323Sed        // While we are analyzing this PHI node, handle its value symbolically.
3044198090Srdivacky        const SCEV *SymbolicName = getUnknown(PN);
3045239462Sdim        assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
3046193323Sed               "PHI node already processed?");
3047212904Sdim        ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
3048193323Sed
3049193323Sed        // Using this symbolic name for the PHI, analyze the value coming around
3050193323Sed        // the back-edge.
3051198090Srdivacky        const SCEV *BEValue = getSCEV(BEValueV);
3052193323Sed
3053193323Sed        // NOTE: If BEValue is loop invariant, we know that the PHI node just
3054193323Sed        // has a special value for the first iteration of the loop.
3055193323Sed
3056193323Sed        // If the value coming around the backedge is an add with the symbolic
3057193323Sed        // value we just inserted, then we found a simple induction variable!
3058193323Sed        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
3059193323Sed          // If there is a single occurrence of the symbolic value, replace it
3060193323Sed          // with a recurrence.
3061193323Sed          unsigned FoundIndex = Add->getNumOperands();
3062193323Sed          for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
3063193323Sed            if (Add->getOperand(i) == SymbolicName)
3064193323Sed              if (FoundIndex == e) {
3065193323Sed                FoundIndex = i;
3066193323Sed                break;
3067193323Sed              }
3068193323Sed
3069193323Sed          if (FoundIndex != Add->getNumOperands()) {
3070193323Sed            // Create an add with everything but the specified operand.
3071198090Srdivacky            SmallVector<const SCEV *, 8> Ops;
3072193323Sed            for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
3073193323Sed              if (i != FoundIndex)
3074193323Sed                Ops.push_back(Add->getOperand(i));
3075198090Srdivacky            const SCEV *Accum = getAddExpr(Ops);
3076193323Sed
3077193323Sed            // This is not a valid addrec if the step amount is varying each
3078193323Sed            // loop iteration, but is not itself an addrec in this loop.
3079218893Sdim            if (isLoopInvariant(Accum, L) ||
3080193323Sed                (isa<SCEVAddRecExpr>(Accum) &&
3081193323Sed                 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
3082221345Sdim              SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
3083202878Srdivacky
3084202878Srdivacky              // If the increment doesn't overflow, then neither the addrec nor
3085202878Srdivacky              // the post-increment will overflow.
3086202878Srdivacky              if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
3087202878Srdivacky                if (OBO->hasNoUnsignedWrap())
3088221345Sdim                  Flags = setFlags(Flags, SCEV::FlagNUW);
3089202878Srdivacky                if (OBO->hasNoSignedWrap())
3090221345Sdim                  Flags = setFlags(Flags, SCEV::FlagNSW);
3091263508Sdim              } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
3092221345Sdim                // If the increment is an inbounds GEP, then we know the address
3093221345Sdim                // space cannot be wrapped around. We cannot make any guarantee
3094221345Sdim                // about signed or unsigned overflow because pointers are
3095221345Sdim                // unsigned but we may have a negative index from the base
3096263508Sdim                // pointer. We can guarantee that no unsigned wrap occurs if the
3097263508Sdim                // indices form a positive value.
3098263508Sdim                if (GEP->isInBounds()) {
3099221345Sdim                  Flags = setFlags(Flags, SCEV::FlagNW);
3100263508Sdim
3101263508Sdim                  const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
3102263508Sdim                  if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
3103263508Sdim                    Flags = setFlags(Flags, SCEV::FlagNUW);
3104263508Sdim                }
3105263508Sdim              } else if (const SubOperator *OBO =
3106263508Sdim                           dyn_cast<SubOperator>(BEValueV)) {
3107263508Sdim                if (OBO->hasNoUnsignedWrap())
3108263508Sdim                  Flags = setFlags(Flags, SCEV::FlagNUW);
3109263508Sdim                if (OBO->hasNoSignedWrap())
3110263508Sdim                  Flags = setFlags(Flags, SCEV::FlagNSW);
3111202878Srdivacky              }
3112202878Srdivacky
3113207618Srdivacky              const SCEV *StartVal = getSCEV(StartValueV);
3114221345Sdim              const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
3115193323Sed
3116202878Srdivacky              // Since the no-wrap flags are on the increment, they apply to the
3117202878Srdivacky              // post-incremented value as well.
3118218893Sdim              if (isLoopInvariant(Accum, L))
3119202878Srdivacky                (void)getAddRecExpr(getAddExpr(StartVal, Accum),
3120221345Sdim                                    Accum, L, Flags);
3121198090Srdivacky
3122193323Sed              // Okay, for the entire analysis of this edge we assumed the PHI
3123198090Srdivacky              // to be symbolic.  We now need to go back and purge all of the
3124198090Srdivacky              // entries for the scalars that use the symbolic expression.
3125198090Srdivacky              ForgetSymbolicName(PN, SymbolicName);
3126212904Sdim              ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
3127193323Sed              return PHISCEV;
3128193323Sed            }
3129193323Sed          }
3130193323Sed        } else if (const SCEVAddRecExpr *AddRec =
3131193323Sed                     dyn_cast<SCEVAddRecExpr>(BEValue)) {
3132193323Sed          // Otherwise, this could be a loop like this:
3133193323Sed          //     i = 0;  for (j = 1; ..; ++j) { ....  i = j; }
3134193323Sed          // In this case, j = {1,+,1}  and BEValue is j.
3135193323Sed          // Because the other in-value of i (0) fits the evolution of BEValue
3136193323Sed          // i really is an addrec evolution.
3137193323Sed          if (AddRec->getLoop() == L && AddRec->isAffine()) {
3138207618Srdivacky            const SCEV *StartVal = getSCEV(StartValueV);
3139193323Sed
3140193323Sed            // If StartVal = j.start - j.stride, we can use StartVal as the
3141193323Sed            // initial step of the addrec evolution.
3142193323Sed            if (StartVal == getMinusSCEV(AddRec->getOperand(0),
3143207618Srdivacky                                         AddRec->getOperand(1))) {
3144221345Sdim              // FIXME: For constant StartVal, we should be able to infer
3145221345Sdim              // no-wrap flags.
3146198090Srdivacky              const SCEV *PHISCEV =
3147221345Sdim                getAddRecExpr(StartVal, AddRec->getOperand(1), L,
3148221345Sdim                              SCEV::FlagAnyWrap);
3149193323Sed
3150193323Sed              // Okay, for the entire analysis of this edge we assumed the PHI
3151198090Srdivacky              // to be symbolic.  We now need to go back and purge all of the
3152198090Srdivacky              // entries for the scalars that use the symbolic expression.
3153198090Srdivacky              ForgetSymbolicName(PN, SymbolicName);
3154212904Sdim              ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
3155193323Sed              return PHISCEV;
3156193323Sed            }
3157193323Sed          }
3158193323Sed        }
3159193323Sed      }
3160207618Srdivacky    }
3161193323Sed
3162204642Srdivacky  // If the PHI has a single incoming value, follow that value, unless the
3163204642Srdivacky  // PHI's incoming blocks are in a different loop, in which case doing so
3164204642Srdivacky  // risks breaking LCSSA form. Instcombine would normally zap these, but
3165204642Srdivacky  // it doesn't have DominatorTree information, so it may miss cases.
3166234353Sdim  if (Value *V = SimplifyInstruction(PN, TD, TLI, DT))
3167218893Sdim    if (LI->replacementPreservesLCSSAForm(PN, V))
3168204642Srdivacky      return getSCEV(V);
3169198090Srdivacky
3170193323Sed  // If it's not a loop phi, we can't handle it yet.
3171193323Sed  return getUnknown(PN);
3172193323Sed}
3173193323Sed
3174193323Sed/// createNodeForGEP - Expand GEP instructions into add and multiply
3175193323Sed/// operations. This allows them to be analyzed by regular SCEV code.
3176193323Sed///
3177201360Srdivackyconst SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
3178263508Sdim  Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
3179263508Sdim  Value *Base = GEP->getOperand(0);
3180263508Sdim  // Don't attempt to analyze GEPs over unsized objects.
3181263508Sdim  if (!Base->getType()->getPointerElementType()->isSized())
3182263508Sdim    return getUnknown(GEP);
3183193323Sed
3184210299Sed  // Don't blindly transfer the inbounds flag from the GEP instruction to the
3185210299Sed  // Add expression, because the Instruction may be guarded by control flow
3186210299Sed  // and the no-overflow bits may not be valid for the expression in any
3187210299Sed  // context.
3188263508Sdim  SCEV::NoWrapFlags Wrap = GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
3189210299Sed
3190207618Srdivacky  const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
3191193323Sed  gep_type_iterator GTI = gep_type_begin(GEP);
3192212904Sdim  for (GetElementPtrInst::op_iterator I = llvm::next(GEP->op_begin()),
3193193323Sed                                      E = GEP->op_end();
3194193323Sed       I != E; ++I) {
3195193323Sed    Value *Index = *I;
3196193323Sed    // Compute the (potentially symbolic) offset in bytes for this index.
3197226633Sdim    if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
3198193323Sed      // For a struct, add the member offset.
3199193323Sed      unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
3200263508Sdim      const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo);
3201210299Sed
3202210299Sed      // Add the field offset to the running total offset.
3203210299Sed      TotalOffset = getAddExpr(TotalOffset, FieldOffset);
3204193323Sed    } else {
3205193323Sed      // For an array, add the element offset, explicitly scaled.
3206263508Sdim      const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, *GTI);
3207210299Sed      const SCEV *IndexS = getSCEV(Index);
3208204642Srdivacky      // Getelementptr indices are signed.
3209210299Sed      IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
3210210299Sed
3211210299Sed      // Multiply the index by the element size to compute the element offset.
3212263508Sdim      const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize, Wrap);
3213210299Sed
3214210299Sed      // Add the element offset to the running total offset.
3215210299Sed      TotalOffset = getAddExpr(TotalOffset, LocalOffset);
3216193323Sed    }
3217193323Sed  }
3218210299Sed
3219210299Sed  // Get the SCEV for the GEP base.
3220210299Sed  const SCEV *BaseS = getSCEV(Base);
3221210299Sed
3222210299Sed  // Add the total offset from all the GEP indices to the base.
3223263508Sdim  return getAddExpr(BaseS, TotalOffset, Wrap);
3224193323Sed}
3225193323Sed
3226193323Sed/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
3227193323Sed/// guaranteed to end in (at every loop iteration).  It is, at the same time,
3228193323Sed/// the minimum number of times S is divisible by 2.  For example, given {4,+,8}
3229193323Sed/// it returns 2.  If S is guaranteed to be 0, it returns the bitwidth of S.
3230194612Seduint32_t
3231198090SrdivackyScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
3232193323Sed  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
3233193323Sed    return C->getValue()->getValue().countTrailingZeros();
3234193323Sed
3235193323Sed  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
3236194612Sed    return std::min(GetMinTrailingZeros(T->getOperand()),
3237194612Sed                    (uint32_t)getTypeSizeInBits(T->getType()));
3238193323Sed
3239193323Sed  if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
3240194612Sed    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
3241194612Sed    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
3242194612Sed             getTypeSizeInBits(E->getType()) : OpRes;
3243193323Sed  }
3244193323Sed
3245193323Sed  if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
3246194612Sed    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
3247194612Sed    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
3248194612Sed             getTypeSizeInBits(E->getType()) : OpRes;
3249193323Sed  }
3250193323Sed
3251193323Sed  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
3252193323Sed    // The result is the min of all operands results.
3253194612Sed    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
3254193323Sed    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
3255194612Sed      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
3256193323Sed    return MinOpRes;
3257193323Sed  }
3258193323Sed
3259193323Sed  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
3260193323Sed    // The result is the sum of all operands results.
3261194612Sed    uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
3262194612Sed    uint32_t BitWidth = getTypeSizeInBits(M->getType());
3263193323Sed    for (unsigned i = 1, e = M->getNumOperands();
3264193323Sed         SumOpRes != BitWidth && i != e; ++i)
3265194612Sed      SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
3266193323Sed                          BitWidth);
3267193323Sed    return SumOpRes;
3268193323Sed  }
3269193323Sed
3270193323Sed  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
3271193323Sed    // The result is the min of all operands results.
3272194612Sed    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
3273193323Sed    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
3274194612Sed      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
3275193323Sed    return MinOpRes;
3276193323Sed  }
3277193323Sed
3278193323Sed  if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
3279193323Sed    // The result is the min of all operands results.
3280194612Sed    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
3281193323Sed    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
3282194612Sed      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
3283193323Sed    return MinOpRes;
3284193323Sed  }
3285193323Sed
3286193323Sed  if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
3287193323Sed    // The result is the min of all operands results.
3288194612Sed    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
3289193323Sed    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
3290194612Sed      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
3291193323Sed    return MinOpRes;
3292193323Sed  }
3293193323Sed
3294194612Sed  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3295194612Sed    // For a SCEVUnknown, ask ValueTracking.
3296194612Sed    unsigned BitWidth = getTypeSizeInBits(U->getType());
3297194612Sed    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
3298234353Sdim    ComputeMaskedBits(U->getValue(), Zeros, Ones);
3299194612Sed    return Zeros.countTrailingOnes();
3300194612Sed  }
3301194612Sed
3302194612Sed  // SCEVUDivExpr
3303193323Sed  return 0;
3304193323Sed}
3305193323Sed
3306198090Srdivacky/// getUnsignedRange - Determine the unsigned range for a particular SCEV.
3307198090Srdivacky///
3308198090SrdivackyConstantRange
3309198090SrdivackyScalarEvolution::getUnsignedRange(const SCEV *S) {
3310218893Sdim  // See if we've computed this range already.
3311218893Sdim  DenseMap<const SCEV *, ConstantRange>::iterator I = UnsignedRanges.find(S);
3312218893Sdim  if (I != UnsignedRanges.end())
3313218893Sdim    return I->second;
3314194612Sed
3315194612Sed  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
3316218893Sdim    return setUnsignedRange(C, ConstantRange(C->getValue()->getValue()));
3317194612Sed
3318203954Srdivacky  unsigned BitWidth = getTypeSizeInBits(S->getType());
3319203954Srdivacky  ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
3320203954Srdivacky
3321203954Srdivacky  // If the value has known zeros, the maximum unsigned value will have those
3322203954Srdivacky  // known zeros as well.
3323203954Srdivacky  uint32_t TZ = GetMinTrailingZeros(S);
3324203954Srdivacky  if (TZ != 0)
3325203954Srdivacky    ConservativeResult =
3326203954Srdivacky      ConstantRange(APInt::getMinValue(BitWidth),
3327203954Srdivacky                    APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
3328203954Srdivacky
3329198090Srdivacky  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3330198090Srdivacky    ConstantRange X = getUnsignedRange(Add->getOperand(0));
3331198090Srdivacky    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
3332198090Srdivacky      X = X.add(getUnsignedRange(Add->getOperand(i)));
3333218893Sdim    return setUnsignedRange(Add, ConservativeResult.intersectWith(X));
3334194612Sed  }
3335194612Sed
3336198090Srdivacky  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3337198090Srdivacky    ConstantRange X = getUnsignedRange(Mul->getOperand(0));
3338198090Srdivacky    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
3339198090Srdivacky      X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
3340218893Sdim    return setUnsignedRange(Mul, ConservativeResult.intersectWith(X));
3341198090Srdivacky  }
3342198090Srdivacky
3343198090Srdivacky  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
3344198090Srdivacky    ConstantRange X = getUnsignedRange(SMax->getOperand(0));
3345198090Srdivacky    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
3346198090Srdivacky      X = X.smax(getUnsignedRange(SMax->getOperand(i)));
3347218893Sdim    return setUnsignedRange(SMax, ConservativeResult.intersectWith(X));
3348198090Srdivacky  }
3349198090Srdivacky
3350198090Srdivacky  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
3351198090Srdivacky    ConstantRange X = getUnsignedRange(UMax->getOperand(0));
3352198090Srdivacky    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
3353198090Srdivacky      X = X.umax(getUnsignedRange(UMax->getOperand(i)));
3354218893Sdim    return setUnsignedRange(UMax, ConservativeResult.intersectWith(X));
3355198090Srdivacky  }
3356198090Srdivacky
3357198090Srdivacky  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
3358198090Srdivacky    ConstantRange X = getUnsignedRange(UDiv->getLHS());
3359198090Srdivacky    ConstantRange Y = getUnsignedRange(UDiv->getRHS());
3360218893Sdim    return setUnsignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
3361198090Srdivacky  }
3362198090Srdivacky
3363198090Srdivacky  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3364198090Srdivacky    ConstantRange X = getUnsignedRange(ZExt->getOperand());
3365218893Sdim    return setUnsignedRange(ZExt,
3366218893Sdim      ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
3367198090Srdivacky  }
3368198090Srdivacky
3369198090Srdivacky  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3370198090Srdivacky    ConstantRange X = getUnsignedRange(SExt->getOperand());
3371218893Sdim    return setUnsignedRange(SExt,
3372218893Sdim      ConservativeResult.intersectWith(X.signExtend(BitWidth)));
3373198090Srdivacky  }
3374198090Srdivacky
3375198090Srdivacky  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3376198090Srdivacky    ConstantRange X = getUnsignedRange(Trunc->getOperand());
3377218893Sdim    return setUnsignedRange(Trunc,
3378218893Sdim      ConservativeResult.intersectWith(X.truncate(BitWidth)));
3379198090Srdivacky  }
3380198090Srdivacky
3381198090Srdivacky  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
3382202878Srdivacky    // If there's no unsigned wrap, the value will never be less than its
3383202878Srdivacky    // initial value.
3384221345Sdim    if (AddRec->getNoWrapFlags(SCEV::FlagNUW))
3385202878Srdivacky      if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
3386207618Srdivacky        if (!C->getValue()->isZero())
3387207618Srdivacky          ConservativeResult =
3388210299Sed            ConservativeResult.intersectWith(
3389210299Sed              ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0)));
3390202878Srdivacky
3391198090Srdivacky    // TODO: non-affine addrec
3392203954Srdivacky    if (AddRec->isAffine()) {
3393226633Sdim      Type *Ty = AddRec->getType();
3394198090Srdivacky      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
3395203954Srdivacky      if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3396203954Srdivacky          getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
3397198090Srdivacky        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3398198090Srdivacky
3399198090Srdivacky        const SCEV *Start = AddRec->getStart();
3400207618Srdivacky        const SCEV *Step = AddRec->getStepRecurrence(*this);
3401198090Srdivacky
3402207618Srdivacky        ConstantRange StartRange = getUnsignedRange(Start);
3403207618Srdivacky        ConstantRange StepRange = getSignedRange(Step);
3404207618Srdivacky        ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
3405207618Srdivacky        ConstantRange EndRange =
3406207618Srdivacky          StartRange.add(MaxBECountRange.multiply(StepRange));
3407207618Srdivacky
3408207618Srdivacky        // Check for overflow. This must be done with ConstantRange arithmetic
3409207618Srdivacky        // because we could be called from within the ScalarEvolution overflow
3410207618Srdivacky        // checking code.
3411207618Srdivacky        ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1);
3412207618Srdivacky        ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
3413207618Srdivacky        ConstantRange ExtMaxBECountRange =
3414207618Srdivacky          MaxBECountRange.zextOrTrunc(BitWidth*2+1);
3415207618Srdivacky        ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1);
3416207618Srdivacky        if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
3417207618Srdivacky            ExtEndRange)
3418218893Sdim          return setUnsignedRange(AddRec, ConservativeResult);
3419198090Srdivacky
3420198090Srdivacky        APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
3421198090Srdivacky                                   EndRange.getUnsignedMin());
3422198090Srdivacky        APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
3423198090Srdivacky                                   EndRange.getUnsignedMax());
3424198090Srdivacky        if (Min.isMinValue() && Max.isMaxValue())
3425218893Sdim          return setUnsignedRange(AddRec, ConservativeResult);
3426218893Sdim        return setUnsignedRange(AddRec,
3427218893Sdim          ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
3428198090Srdivacky      }
3429198090Srdivacky    }
3430202878Srdivacky
3431218893Sdim    return setUnsignedRange(AddRec, ConservativeResult);
3432198090Srdivacky  }
3433198090Srdivacky
3434194612Sed  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3435194612Sed    // For a SCEVUnknown, ask ValueTracking.
3436194612Sed    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
3437234353Sdim    ComputeMaskedBits(U->getValue(), Zeros, Ones, TD);
3438198090Srdivacky    if (Ones == ~Zeros + 1)
3439218893Sdim      return setUnsignedRange(U, ConservativeResult);
3440218893Sdim    return setUnsignedRange(U,
3441218893Sdim      ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)));
3442194612Sed  }
3443194612Sed
3444218893Sdim  return setUnsignedRange(S, ConservativeResult);
3445194612Sed}
3446194612Sed
3447198090Srdivacky/// getSignedRange - Determine the signed range for a particular SCEV.
3448198090Srdivacky///
3449198090SrdivackyConstantRange
3450198090SrdivackyScalarEvolution::getSignedRange(const SCEV *S) {
3451218893Sdim  // See if we've computed this range already.
3452218893Sdim  DenseMap<const SCEV *, ConstantRange>::iterator I = SignedRanges.find(S);
3453218893Sdim  if (I != SignedRanges.end())
3454218893Sdim    return I->second;
3455194612Sed
3456198090Srdivacky  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
3457218893Sdim    return setSignedRange(C, ConstantRange(C->getValue()->getValue()));
3458198090Srdivacky
3459203954Srdivacky  unsigned BitWidth = getTypeSizeInBits(S->getType());
3460203954Srdivacky  ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
3461203954Srdivacky
3462203954Srdivacky  // If the value has known zeros, the maximum signed value will have those
3463203954Srdivacky  // known zeros as well.
3464203954Srdivacky  uint32_t TZ = GetMinTrailingZeros(S);
3465203954Srdivacky  if (TZ != 0)
3466203954Srdivacky    ConservativeResult =
3467203954Srdivacky      ConstantRange(APInt::getSignedMinValue(BitWidth),
3468203954Srdivacky                    APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
3469203954Srdivacky
3470198090Srdivacky  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3471198090Srdivacky    ConstantRange X = getSignedRange(Add->getOperand(0));
3472198090Srdivacky    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
3473198090Srdivacky      X = X.add(getSignedRange(Add->getOperand(i)));
3474218893Sdim    return setSignedRange(Add, ConservativeResult.intersectWith(X));
3475194612Sed  }
3476194612Sed
3477198090Srdivacky  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3478198090Srdivacky    ConstantRange X = getSignedRange(Mul->getOperand(0));
3479198090Srdivacky    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
3480198090Srdivacky      X = X.multiply(getSignedRange(Mul->getOperand(i)));
3481218893Sdim    return setSignedRange(Mul, ConservativeResult.intersectWith(X));
3482194612Sed  }
3483194612Sed
3484198090Srdivacky  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
3485198090Srdivacky    ConstantRange X = getSignedRange(SMax->getOperand(0));
3486198090Srdivacky    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
3487198090Srdivacky      X = X.smax(getSignedRange(SMax->getOperand(i)));
3488218893Sdim    return setSignedRange(SMax, ConservativeResult.intersectWith(X));
3489198090Srdivacky  }
3490195098Sed
3491198090Srdivacky  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
3492198090Srdivacky    ConstantRange X = getSignedRange(UMax->getOperand(0));
3493198090Srdivacky    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
3494198090Srdivacky      X = X.umax(getSignedRange(UMax->getOperand(i)));
3495218893Sdim    return setSignedRange(UMax, ConservativeResult.intersectWith(X));
3496198090Srdivacky  }
3497195098Sed
3498198090Srdivacky  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
3499198090Srdivacky    ConstantRange X = getSignedRange(UDiv->getLHS());
3500198090Srdivacky    ConstantRange Y = getSignedRange(UDiv->getRHS());
3501218893Sdim    return setSignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
3502198090Srdivacky  }
3503195098Sed
3504198090Srdivacky  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3505198090Srdivacky    ConstantRange X = getSignedRange(ZExt->getOperand());
3506218893Sdim    return setSignedRange(ZExt,
3507218893Sdim      ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
3508198090Srdivacky  }
3509198090Srdivacky
3510198090Srdivacky  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3511198090Srdivacky    ConstantRange X = getSignedRange(SExt->getOperand());
3512218893Sdim    return setSignedRange(SExt,
3513218893Sdim      ConservativeResult.intersectWith(X.signExtend(BitWidth)));
3514198090Srdivacky  }
3515198090Srdivacky
3516198090Srdivacky  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3517198090Srdivacky    ConstantRange X = getSignedRange(Trunc->getOperand());
3518218893Sdim    return setSignedRange(Trunc,
3519218893Sdim      ConservativeResult.intersectWith(X.truncate(BitWidth)));
3520198090Srdivacky  }
3521198090Srdivacky
3522198090Srdivacky  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
3523202878Srdivacky    // If there's no signed wrap, and all the operands have the same sign or
3524202878Srdivacky    // zero, the value won't ever change sign.
3525221345Sdim    if (AddRec->getNoWrapFlags(SCEV::FlagNSW)) {
3526202878Srdivacky      bool AllNonNeg = true;
3527202878Srdivacky      bool AllNonPos = true;
3528202878Srdivacky      for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
3529202878Srdivacky        if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
3530202878Srdivacky        if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
3531202878Srdivacky      }
3532202878Srdivacky      if (AllNonNeg)
3533203954Srdivacky        ConservativeResult = ConservativeResult.intersectWith(
3534203954Srdivacky          ConstantRange(APInt(BitWidth, 0),
3535203954Srdivacky                        APInt::getSignedMinValue(BitWidth)));
3536202878Srdivacky      else if (AllNonPos)
3537203954Srdivacky        ConservativeResult = ConservativeResult.intersectWith(
3538203954Srdivacky          ConstantRange(APInt::getSignedMinValue(BitWidth),
3539203954Srdivacky                        APInt(BitWidth, 1)));
3540202878Srdivacky    }
3541202878Srdivacky
3542198090Srdivacky    // TODO: non-affine addrec
3543203954Srdivacky    if (AddRec->isAffine()) {
3544226633Sdim      Type *Ty = AddRec->getType();
3545198090Srdivacky      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
3546203954Srdivacky      if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3547203954Srdivacky          getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
3548198090Srdivacky        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3549198090Srdivacky
3550198090Srdivacky        const SCEV *Start = AddRec->getStart();
3551207618Srdivacky        const SCEV *Step = AddRec->getStepRecurrence(*this);
3552198090Srdivacky
3553207618Srdivacky        ConstantRange StartRange = getSignedRange(Start);
3554207618Srdivacky        ConstantRange StepRange = getSignedRange(Step);
3555207618Srdivacky        ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
3556207618Srdivacky        ConstantRange EndRange =
3557207618Srdivacky          StartRange.add(MaxBECountRange.multiply(StepRange));
3558207618Srdivacky
3559207618Srdivacky        // Check for overflow. This must be done with ConstantRange arithmetic
3560207618Srdivacky        // because we could be called from within the ScalarEvolution overflow
3561207618Srdivacky        // checking code.
3562207618Srdivacky        ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1);
3563207618Srdivacky        ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
3564207618Srdivacky        ConstantRange ExtMaxBECountRange =
3565207618Srdivacky          MaxBECountRange.zextOrTrunc(BitWidth*2+1);
3566207618Srdivacky        ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1);
3567207618Srdivacky        if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
3568207618Srdivacky            ExtEndRange)
3569218893Sdim          return setSignedRange(AddRec, ConservativeResult);
3570198090Srdivacky
3571198090Srdivacky        APInt Min = APIntOps::smin(StartRange.getSignedMin(),
3572198090Srdivacky                                   EndRange.getSignedMin());
3573198090Srdivacky        APInt Max = APIntOps::smax(StartRange.getSignedMax(),
3574198090Srdivacky                                   EndRange.getSignedMax());
3575198090Srdivacky        if (Min.isMinSignedValue() && Max.isMaxSignedValue())
3576218893Sdim          return setSignedRange(AddRec, ConservativeResult);
3577218893Sdim        return setSignedRange(AddRec,
3578218893Sdim          ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
3579195098Sed      }
3580195098Sed    }
3581202878Srdivacky
3582218893Sdim    return setSignedRange(AddRec, ConservativeResult);
3583195098Sed  }
3584195098Sed
3585194612Sed  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3586194612Sed    // For a SCEVUnknown, ask ValueTracking.
3587203954Srdivacky    if (!U->getValue()->getType()->isIntegerTy() && !TD)
3588218893Sdim      return setSignedRange(U, ConservativeResult);
3589198090Srdivacky    unsigned NS = ComputeNumSignBits(U->getValue(), TD);
3590263508Sdim    if (NS <= 1)
3591218893Sdim      return setSignedRange(U, ConservativeResult);
3592218893Sdim    return setSignedRange(U, ConservativeResult.intersectWith(
3593198090Srdivacky      ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
3594218893Sdim                    APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1)));
3595194612Sed  }
3596194612Sed
3597218893Sdim  return setSignedRange(S, ConservativeResult);
3598194612Sed}
3599194612Sed
3600193323Sed/// createSCEV - We know that there is no SCEV for the specified value.
3601193323Sed/// Analyze the expression.
3602193323Sed///
3603198090Srdivackyconst SCEV *ScalarEvolution::createSCEV(Value *V) {
3604193323Sed  if (!isSCEVable(V->getType()))
3605193323Sed    return getUnknown(V);
3606193323Sed
3607193323Sed  unsigned Opcode = Instruction::UserOp1;
3608204961Srdivacky  if (Instruction *I = dyn_cast<Instruction>(V)) {
3609193323Sed    Opcode = I->getOpcode();
3610204961Srdivacky
3611204961Srdivacky    // Don't attempt to analyze instructions in blocks that aren't
3612204961Srdivacky    // reachable. Such instructions don't matter, and they aren't required
3613204961Srdivacky    // to obey basic rules for definitions dominating uses which this
3614204961Srdivacky    // analysis depends on.
3615204961Srdivacky    if (!DT->isReachableFromEntry(I->getParent()))
3616204961Srdivacky      return getUnknown(V);
3617204961Srdivacky  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
3618193323Sed    Opcode = CE->getOpcode();
3619195098Sed  else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
3620195098Sed    return getConstant(CI);
3621195098Sed  else if (isa<ConstantPointerNull>(V))
3622207618Srdivacky    return getConstant(V->getType(), 0);
3623198090Srdivacky  else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
3624198090Srdivacky    return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
3625193323Sed  else
3626193323Sed    return getUnknown(V);
3627193323Sed
3628198090Srdivacky  Operator *U = cast<Operator>(V);
3629193323Sed  switch (Opcode) {
3630212904Sdim  case Instruction::Add: {
3631212904Sdim    // The simple thing to do would be to just call getSCEV on both operands
3632212904Sdim    // and call getAddExpr with the result. However if we're looking at a
3633212904Sdim    // bunch of things all added together, this can be quite inefficient,
3634212904Sdim    // because it leads to N-1 getAddExpr calls for N ultimate operands.
3635212904Sdim    // Instead, gather up all the operands and make a single getAddExpr call.
3636212904Sdim    // LLVM IR canonical form means we need only traverse the left operands.
3637234353Sdim    //
3638234353Sdim    // Don't apply this instruction's NSW or NUW flags to the new
3639234353Sdim    // expression. The instruction may be guarded by control flow that the
3640234353Sdim    // no-wrap behavior depends on. Non-control-equivalent instructions can be
3641234353Sdim    // mapped to the same SCEV expression, and it would be incorrect to transfer
3642234353Sdim    // NSW/NUW semantics to those operations.
3643212904Sdim    SmallVector<const SCEV *, 4> AddOps;
3644212904Sdim    AddOps.push_back(getSCEV(U->getOperand(1)));
3645212904Sdim    for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
3646212904Sdim      unsigned Opcode = Op->getValueID() - Value::InstructionVal;
3647212904Sdim      if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
3648212904Sdim        break;
3649212904Sdim      U = cast<Operator>(Op);
3650212904Sdim      const SCEV *Op1 = getSCEV(U->getOperand(1));
3651212904Sdim      if (Opcode == Instruction::Sub)
3652212904Sdim        AddOps.push_back(getNegativeSCEV(Op1));
3653212904Sdim      else
3654212904Sdim        AddOps.push_back(Op1);
3655212904Sdim    }
3656212904Sdim    AddOps.push_back(getSCEV(U->getOperand(0)));
3657234353Sdim    return getAddExpr(AddOps);
3658212904Sdim  }
3659212904Sdim  case Instruction::Mul: {
3660234353Sdim    // Don't transfer NSW/NUW for the same reason as AddExpr.
3661212904Sdim    SmallVector<const SCEV *, 4> MulOps;
3662212904Sdim    MulOps.push_back(getSCEV(U->getOperand(1)));
3663212904Sdim    for (Value *Op = U->getOperand(0);
3664221345Sdim         Op->getValueID() == Instruction::Mul + Value::InstructionVal;
3665212904Sdim         Op = U->getOperand(0)) {
3666212904Sdim      U = cast<Operator>(Op);
3667212904Sdim      MulOps.push_back(getSCEV(U->getOperand(1)));
3668212904Sdim    }
3669212904Sdim    MulOps.push_back(getSCEV(U->getOperand(0)));
3670212904Sdim    return getMulExpr(MulOps);
3671212904Sdim  }
3672193323Sed  case Instruction::UDiv:
3673193323Sed    return getUDivExpr(getSCEV(U->getOperand(0)),
3674193323Sed                       getSCEV(U->getOperand(1)));
3675193323Sed  case Instruction::Sub:
3676193323Sed    return getMinusSCEV(getSCEV(U->getOperand(0)),
3677193323Sed                        getSCEV(U->getOperand(1)));
3678193323Sed  case Instruction::And:
3679193323Sed    // For an expression like x&255 that merely masks off the high bits,
3680193323Sed    // use zext(trunc(x)) as the SCEV expression.
3681193323Sed    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3682193323Sed      if (CI->isNullValue())
3683193323Sed        return getSCEV(U->getOperand(1));
3684193323Sed      if (CI->isAllOnesValue())
3685193323Sed        return getSCEV(U->getOperand(0));
3686193323Sed      const APInt &A = CI->getValue();
3687194612Sed
3688194612Sed      // Instcombine's ShrinkDemandedConstant may strip bits out of
3689194612Sed      // constants, obscuring what would otherwise be a low-bits mask.
3690194612Sed      // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
3691194612Sed      // knew about to reconstruct a low-bits mask value.
3692194612Sed      unsigned LZ = A.countLeadingZeros();
3693194612Sed      unsigned BitWidth = A.getBitWidth();
3694194612Sed      APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
3695234353Sdim      ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, TD);
3696194612Sed
3697194612Sed      APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
3698194612Sed
3699194612Sed      if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
3700193323Sed        return
3701193323Sed          getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
3702198090Srdivacky                                IntegerType::get(getContext(), BitWidth - LZ)),
3703193323Sed                            U->getType());
3704193323Sed    }
3705193323Sed    break;
3706194612Sed
3707193323Sed  case Instruction::Or:
3708193323Sed    // If the RHS of the Or is a constant, we may have something like:
3709193323Sed    // X*4+1 which got turned into X*4|1.  Handle this as an Add so loop
3710193323Sed    // optimizations will transparently handle this case.
3711193323Sed    //
3712193323Sed    // In order for this transformation to be safe, the LHS must be of the
3713193323Sed    // form X*(2^n) and the Or constant must be less than 2^n.
3714193323Sed    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3715198090Srdivacky      const SCEV *LHS = getSCEV(U->getOperand(0));
3716193323Sed      const APInt &CIVal = CI->getValue();
3717194612Sed      if (GetMinTrailingZeros(LHS) >=
3718198090Srdivacky          (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
3719198090Srdivacky        // Build a plain add SCEV.
3720198090Srdivacky        const SCEV *S = getAddExpr(LHS, getSCEV(CI));
3721198090Srdivacky        // If the LHS of the add was an addrec and it has no-wrap flags,
3722198090Srdivacky        // transfer the no-wrap flags, since an or won't introduce a wrap.
3723198090Srdivacky        if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
3724198090Srdivacky          const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
3725221345Sdim          const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags(
3726221345Sdim            OldAR->getNoWrapFlags());
3727198090Srdivacky        }
3728198090Srdivacky        return S;
3729198090Srdivacky      }
3730193323Sed    }
3731193323Sed    break;
3732193323Sed  case Instruction::Xor:
3733193323Sed    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3734193323Sed      // If the RHS of the xor is a signbit, then this is just an add.
3735193323Sed      // Instcombine turns add of signbit into xor as a strength reduction step.
3736193323Sed      if (CI->getValue().isSignBit())
3737193323Sed        return getAddExpr(getSCEV(U->getOperand(0)),
3738193323Sed                          getSCEV(U->getOperand(1)));
3739193323Sed
3740193323Sed      // If the RHS of xor is -1, then this is a not operation.
3741193323Sed      if (CI->isAllOnesValue())
3742193323Sed        return getNotSCEV(getSCEV(U->getOperand(0)));
3743193323Sed
3744193323Sed      // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
3745193323Sed      // This is a variant of the check for xor with -1, and it handles
3746193323Sed      // the case where instcombine has trimmed non-demanded bits out
3747193323Sed      // of an xor with -1.
3748193323Sed      if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
3749193323Sed        if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
3750193323Sed          if (BO->getOpcode() == Instruction::And &&
3751193323Sed              LCI->getValue() == CI->getValue())
3752193323Sed            if (const SCEVZeroExtendExpr *Z =
3753194612Sed                  dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
3754226633Sdim              Type *UTy = U->getType();
3755198090Srdivacky              const SCEV *Z0 = Z->getOperand();
3756226633Sdim              Type *Z0Ty = Z0->getType();
3757194612Sed              unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
3758194612Sed
3759204642Srdivacky              // If C is a low-bits mask, the zero extend is serving to
3760194612Sed              // mask off the high bits. Complement the operand and
3761194612Sed              // re-apply the zext.
3762194612Sed              if (APIntOps::isMask(Z0TySize, CI->getValue()))
3763194612Sed                return getZeroExtendExpr(getNotSCEV(Z0), UTy);
3764194612Sed
3765194612Sed              // If C is a single bit, it may be in the sign-bit position
3766194612Sed              // before the zero-extend. In this case, represent the xor
3767194612Sed              // using an add, which is equivalent, and re-apply the zext.
3768218893Sdim              APInt Trunc = CI->getValue().trunc(Z0TySize);
3769218893Sdim              if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
3770194612Sed                  Trunc.isSignBit())
3771194612Sed                return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
3772194612Sed                                         UTy);
3773194612Sed            }
3774193323Sed    }
3775193323Sed    break;
3776193323Sed
3777193323Sed  case Instruction::Shl:
3778193323Sed    // Turn shift left of a constant amount into a multiply.
3779193323Sed    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3780203954Srdivacky      uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3781207618Srdivacky
3782207618Srdivacky      // If the shift count is not less than the bitwidth, the result of
3783207618Srdivacky      // the shift is undefined. Don't try to analyze it, because the
3784207618Srdivacky      // resolution chosen here may differ from the resolution chosen in
3785207618Srdivacky      // other parts of the compiler.
3786207618Srdivacky      if (SA->getValue().uge(BitWidth))
3787207618Srdivacky        break;
3788207618Srdivacky
3789198090Srdivacky      Constant *X = ConstantInt::get(getContext(),
3790263508Sdim        APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
3791193323Sed      return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3792193323Sed    }
3793193323Sed    break;
3794193323Sed
3795193323Sed  case Instruction::LShr:
3796193323Sed    // Turn logical shift right of a constant into a unsigned divide.
3797193323Sed    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3798203954Srdivacky      uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3799207618Srdivacky
3800207618Srdivacky      // If the shift count is not less than the bitwidth, the result of
3801207618Srdivacky      // the shift is undefined. Don't try to analyze it, because the
3802207618Srdivacky      // resolution chosen here may differ from the resolution chosen in
3803207618Srdivacky      // other parts of the compiler.
3804207618Srdivacky      if (SA->getValue().uge(BitWidth))
3805207618Srdivacky        break;
3806207618Srdivacky
3807198090Srdivacky      Constant *X = ConstantInt::get(getContext(),
3808263508Sdim        APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
3809193323Sed      return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3810193323Sed    }
3811193323Sed    break;
3812193323Sed
3813193323Sed  case Instruction::AShr:
3814193323Sed    // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
3815193323Sed    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
3816207618Srdivacky      if (Operator *L = dyn_cast<Operator>(U->getOperand(0)))
3817193323Sed        if (L->getOpcode() == Instruction::Shl &&
3818193323Sed            L->getOperand(1) == U->getOperand(1)) {
3819207618Srdivacky          uint64_t BitWidth = getTypeSizeInBits(U->getType());
3820207618Srdivacky
3821207618Srdivacky          // If the shift count is not less than the bitwidth, the result of
3822207618Srdivacky          // the shift is undefined. Don't try to analyze it, because the
3823207618Srdivacky          // resolution chosen here may differ from the resolution chosen in
3824207618Srdivacky          // other parts of the compiler.
3825207618Srdivacky          if (CI->getValue().uge(BitWidth))
3826207618Srdivacky            break;
3827207618Srdivacky
3828193323Sed          uint64_t Amt = BitWidth - CI->getZExtValue();
3829193323Sed          if (Amt == BitWidth)
3830193323Sed            return getSCEV(L->getOperand(0));       // shift by zero --> noop
3831193323Sed          return
3832193323Sed            getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
3833207618Srdivacky                                              IntegerType::get(getContext(),
3834207618Srdivacky                                                               Amt)),
3835207618Srdivacky                              U->getType());
3836193323Sed        }
3837193323Sed    break;
3838193323Sed
3839193323Sed  case Instruction::Trunc:
3840193323Sed    return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
3841193323Sed
3842193323Sed  case Instruction::ZExt:
3843193323Sed    return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3844193323Sed
3845193323Sed  case Instruction::SExt:
3846193323Sed    return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3847193323Sed
3848193323Sed  case Instruction::BitCast:
3849193323Sed    // BitCasts are no-op casts so we just eliminate the cast.
3850193323Sed    if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
3851193323Sed      return getSCEV(U->getOperand(0));
3852193323Sed    break;
3853193323Sed
3854203954Srdivacky  // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
3855203954Srdivacky  // lead to pointer expressions which cannot safely be expanded to GEPs,
3856203954Srdivacky  // because ScalarEvolution doesn't respect the GEP aliasing rules when
3857203954Srdivacky  // simplifying integer expressions.
3858193323Sed
3859193323Sed  case Instruction::GetElementPtr:
3860201360Srdivacky    return createNodeForGEP(cast<GEPOperator>(U));
3861193323Sed
3862193323Sed  case Instruction::PHI:
3863193323Sed    return createNodeForPHI(cast<PHINode>(U));
3864193323Sed
3865193323Sed  case Instruction::Select:
3866193323Sed    // This could be a smax or umax that was lowered earlier.
3867193323Sed    // Try to recover it.
3868193323Sed    if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
3869193323Sed      Value *LHS = ICI->getOperand(0);
3870193323Sed      Value *RHS = ICI->getOperand(1);
3871193323Sed      switch (ICI->getPredicate()) {
3872193323Sed      case ICmpInst::ICMP_SLT:
3873193323Sed      case ICmpInst::ICMP_SLE:
3874193323Sed        std::swap(LHS, RHS);
3875193323Sed        // fall through
3876193323Sed      case ICmpInst::ICMP_SGT:
3877193323Sed      case ICmpInst::ICMP_SGE:
3878207618Srdivacky        // a >s b ? a+x : b+x  ->  smax(a, b)+x
3879207618Srdivacky        // a >s b ? b+x : a+x  ->  smin(a, b)+x
3880207618Srdivacky        if (LHS->getType() == U->getType()) {
3881207618Srdivacky          const SCEV *LS = getSCEV(LHS);
3882207618Srdivacky          const SCEV *RS = getSCEV(RHS);
3883207618Srdivacky          const SCEV *LA = getSCEV(U->getOperand(1));
3884207618Srdivacky          const SCEV *RA = getSCEV(U->getOperand(2));
3885207618Srdivacky          const SCEV *LDiff = getMinusSCEV(LA, LS);
3886207618Srdivacky          const SCEV *RDiff = getMinusSCEV(RA, RS);
3887207618Srdivacky          if (LDiff == RDiff)
3888207618Srdivacky            return getAddExpr(getSMaxExpr(LS, RS), LDiff);
3889207618Srdivacky          LDiff = getMinusSCEV(LA, RS);
3890207618Srdivacky          RDiff = getMinusSCEV(RA, LS);
3891207618Srdivacky          if (LDiff == RDiff)
3892207618Srdivacky            return getAddExpr(getSMinExpr(LS, RS), LDiff);
3893207618Srdivacky        }
3894193323Sed        break;
3895193323Sed      case ICmpInst::ICMP_ULT:
3896193323Sed      case ICmpInst::ICMP_ULE:
3897193323Sed        std::swap(LHS, RHS);
3898193323Sed        // fall through
3899193323Sed      case ICmpInst::ICMP_UGT:
3900193323Sed      case ICmpInst::ICMP_UGE:
3901207618Srdivacky        // a >u b ? a+x : b+x  ->  umax(a, b)+x
3902207618Srdivacky        // a >u b ? b+x : a+x  ->  umin(a, b)+x
3903207618Srdivacky        if (LHS->getType() == U->getType()) {
3904207618Srdivacky          const SCEV *LS = getSCEV(LHS);
3905207618Srdivacky          const SCEV *RS = getSCEV(RHS);
3906207618Srdivacky          const SCEV *LA = getSCEV(U->getOperand(1));
3907207618Srdivacky          const SCEV *RA = getSCEV(U->getOperand(2));
3908207618Srdivacky          const SCEV *LDiff = getMinusSCEV(LA, LS);
3909207618Srdivacky          const SCEV *RDiff = getMinusSCEV(RA, RS);
3910207618Srdivacky          if (LDiff == RDiff)
3911207618Srdivacky            return getAddExpr(getUMaxExpr(LS, RS), LDiff);
3912207618Srdivacky          LDiff = getMinusSCEV(LA, RS);
3913207618Srdivacky          RDiff = getMinusSCEV(RA, LS);
3914207618Srdivacky          if (LDiff == RDiff)
3915207618Srdivacky            return getAddExpr(getUMinExpr(LS, RS), LDiff);
3916207618Srdivacky        }
3917193323Sed        break;
3918194612Sed      case ICmpInst::ICMP_NE:
3919207618Srdivacky        // n != 0 ? n+x : 1+x  ->  umax(n, 1)+x
3920207618Srdivacky        if (LHS->getType() == U->getType() &&
3921194612Sed            isa<ConstantInt>(RHS) &&
3922207618Srdivacky            cast<ConstantInt>(RHS)->isZero()) {
3923207618Srdivacky          const SCEV *One = getConstant(LHS->getType(), 1);
3924207618Srdivacky          const SCEV *LS = getSCEV(LHS);
3925207618Srdivacky          const SCEV *LA = getSCEV(U->getOperand(1));
3926207618Srdivacky          const SCEV *RA = getSCEV(U->getOperand(2));
3927207618Srdivacky          const SCEV *LDiff = getMinusSCEV(LA, LS);
3928207618Srdivacky          const SCEV *RDiff = getMinusSCEV(RA, One);
3929207618Srdivacky          if (LDiff == RDiff)
3930212904Sdim            return getAddExpr(getUMaxExpr(One, LS), LDiff);
3931207618Srdivacky        }
3932194612Sed        break;
3933194612Sed      case ICmpInst::ICMP_EQ:
3934207618Srdivacky        // n == 0 ? 1+x : n+x  ->  umax(n, 1)+x
3935207618Srdivacky        if (LHS->getType() == U->getType() &&
3936194612Sed            isa<ConstantInt>(RHS) &&
3937207618Srdivacky            cast<ConstantInt>(RHS)->isZero()) {
3938207618Srdivacky          const SCEV *One = getConstant(LHS->getType(), 1);
3939207618Srdivacky          const SCEV *LS = getSCEV(LHS);
3940207618Srdivacky          const SCEV *LA = getSCEV(U->getOperand(1));
3941207618Srdivacky          const SCEV *RA = getSCEV(U->getOperand(2));
3942207618Srdivacky          const SCEV *LDiff = getMinusSCEV(LA, One);
3943207618Srdivacky          const SCEV *RDiff = getMinusSCEV(RA, LS);
3944207618Srdivacky          if (LDiff == RDiff)
3945212904Sdim            return getAddExpr(getUMaxExpr(One, LS), LDiff);
3946207618Srdivacky        }
3947194612Sed        break;
3948193323Sed      default:
3949193323Sed        break;
3950193323Sed      }
3951193323Sed    }
3952193323Sed
3953193323Sed  default: // We cannot analyze this expression.
3954193323Sed    break;
3955193323Sed  }
3956193323Sed
3957193323Sed  return getUnknown(V);
3958193323Sed}
3959193323Sed
3960193323Sed
3961193323Sed
3962193323Sed//===----------------------------------------------------------------------===//
3963193323Sed//                   Iteration Count Computation Code
3964193323Sed//
3965193323Sed
3966226633Sdim/// getSmallConstantTripCount - Returns the maximum trip count of this loop as a
3967234353Sdim/// normal unsigned value. Returns 0 if the trip count is unknown or not
3968234353Sdim/// constant. Will also return 0 if the maximum trip count is very large (>=
3969234353Sdim/// 2^32).
3970234353Sdim///
3971234353Sdim/// This "trip count" assumes that control exits via ExitingBlock. More
3972234353Sdim/// precisely, it is the number of times that control may reach ExitingBlock
3973234353Sdim/// before taking the branch. For loops with multiple exits, it may not be the
3974234353Sdim/// number times that the loop header executes because the loop may exit
3975234353Sdim/// prematurely via another branch.
3976251662Sdim///
3977251662Sdim/// FIXME: We conservatively call getBackedgeTakenCount(L) instead of
3978251662Sdim/// getExitCount(L, ExitingBlock) to compute a safe trip count considering all
3979251662Sdim/// loop exits. getExitCount() may return an exact count for this branch
3980251662Sdim/// assuming no-signed-wrap. The number of well-defined iterations may actually
3981251662Sdim/// be higher than this trip count if this exit test is skipped and the loop
3982251662Sdim/// exits via a different branch. Ideally, getExitCount() would know whether it
3983251662Sdim/// depends on a NSW assumption, and we would only fall back to a conservative
3984251662Sdim/// trip count in that case.
3985234353Sdimunsigned ScalarEvolution::
3986263508SdimgetSmallConstantTripCount(Loop *L, BasicBlock * /*ExitingBlock*/) {
3987226633Sdim  const SCEVConstant *ExitCount =
3988251662Sdim    dyn_cast<SCEVConstant>(getBackedgeTakenCount(L));
3989226633Sdim  if (!ExitCount)
3990226633Sdim    return 0;
3991226633Sdim
3992226633Sdim  ConstantInt *ExitConst = ExitCount->getValue();
3993226633Sdim
3994226633Sdim  // Guard against huge trip counts.
3995226633Sdim  if (ExitConst->getValue().getActiveBits() > 32)
3996226633Sdim    return 0;
3997226633Sdim
3998226633Sdim  // In case of integer overflow, this returns 0, which is correct.
3999226633Sdim  return ((unsigned)ExitConst->getZExtValue()) + 1;
4000226633Sdim}
4001226633Sdim
4002226633Sdim/// getSmallConstantTripMultiple - Returns the largest constant divisor of the
4003226633Sdim/// trip count of this loop as a normal unsigned value, if possible. This
4004226633Sdim/// means that the actual trip count is always a multiple of the returned
4005226633Sdim/// value (don't forget the trip count could very well be zero as well!).
4006226633Sdim///
4007226633Sdim/// Returns 1 if the trip count is unknown or not guaranteed to be the
4008226633Sdim/// multiple of a constant (which is also the case if the trip count is simply
4009226633Sdim/// constant, use getSmallConstantTripCount for that case), Will also return 1
4010226633Sdim/// if the trip count is very large (>= 2^32).
4011234353Sdim///
4012234353Sdim/// As explained in the comments for getSmallConstantTripCount, this assumes
4013234353Sdim/// that control exits the loop via ExitingBlock.
4014234353Sdimunsigned ScalarEvolution::
4015263508SdimgetSmallConstantTripMultiple(Loop *L, BasicBlock * /*ExitingBlock*/) {
4016251662Sdim  const SCEV *ExitCount = getBackedgeTakenCount(L);
4017226633Sdim  if (ExitCount == getCouldNotCompute())
4018226633Sdim    return 1;
4019226633Sdim
4020226633Sdim  // Get the trip count from the BE count by adding 1.
4021226633Sdim  const SCEV *TCMul = getAddExpr(ExitCount,
4022226633Sdim                                 getConstant(ExitCount->getType(), 1));
4023226633Sdim  // FIXME: SCEV distributes multiplication as V1*C1 + V2*C1. We could attempt
4024226633Sdim  // to factor simple cases.
4025226633Sdim  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(TCMul))
4026226633Sdim    TCMul = Mul->getOperand(0);
4027226633Sdim
4028226633Sdim  const SCEVConstant *MulC = dyn_cast<SCEVConstant>(TCMul);
4029226633Sdim  if (!MulC)
4030226633Sdim    return 1;
4031226633Sdim
4032226633Sdim  ConstantInt *Result = MulC->getValue();
4033226633Sdim
4034243830Sdim  // Guard against huge trip counts (this requires checking
4035243830Sdim  // for zero to handle the case where the trip count == -1 and the
4036243830Sdim  // addition wraps).
4037243830Sdim  if (!Result || Result->getValue().getActiveBits() > 32 ||
4038243830Sdim      Result->getValue().getActiveBits() == 0)
4039226633Sdim    return 1;
4040226633Sdim
4041226633Sdim  return (unsigned)Result->getZExtValue();
4042226633Sdim}
4043226633Sdim
4044226633Sdim// getExitCount - Get the expression for the number of loop iterations for which
4045251662Sdim// this loop is guaranteed not to exit via ExitingBlock. Otherwise return
4046226633Sdim// SCEVCouldNotCompute.
4047226633Sdimconst SCEV *ScalarEvolution::getExitCount(Loop *L, BasicBlock *ExitingBlock) {
4048226633Sdim  return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
4049226633Sdim}
4050226633Sdim
4051193323Sed/// getBackedgeTakenCount - If the specified loop has a predictable
4052193323Sed/// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
4053193323Sed/// object. The backedge-taken count is the number of times the loop header
4054193323Sed/// will be branched to from within the loop. This is one less than the
4055193323Sed/// trip count of the loop, since it doesn't count the first iteration,
4056193323Sed/// when the header is branched to from outside the loop.
4057193323Sed///
4058193323Sed/// Note that it is not valid to call this method on a loop without a
4059193323Sed/// loop-invariant backedge-taken count (see
4060193323Sed/// hasLoopInvariantBackedgeTakenCount).
4061193323Sed///
4062198090Srdivackyconst SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
4063226633Sdim  return getBackedgeTakenInfo(L).getExact(this);
4064193323Sed}
4065193323Sed
4066193323Sed/// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
4067193323Sed/// return the least SCEV value that is known never to be less than the
4068193323Sed/// actual backedge taken count.
4069198090Srdivackyconst SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
4070226633Sdim  return getBackedgeTakenInfo(L).getMax(this);
4071193323Sed}
4072193323Sed
4073198090Srdivacky/// PushLoopPHIs - Push PHI nodes in the header of the given loop
4074198090Srdivacky/// onto the given Worklist.
4075198090Srdivackystatic void
4076198090SrdivackyPushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
4077198090Srdivacky  BasicBlock *Header = L->getHeader();
4078198090Srdivacky
4079198090Srdivacky  // Push all Loop-header PHIs onto the Worklist stack.
4080198090Srdivacky  for (BasicBlock::iterator I = Header->begin();
4081198090Srdivacky       PHINode *PN = dyn_cast<PHINode>(I); ++I)
4082198090Srdivacky    Worklist.push_back(PN);
4083198090Srdivacky}
4084198090Srdivacky
4085193323Sedconst ScalarEvolution::BackedgeTakenInfo &
4086193323SedScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
4087226633Sdim  // Initially insert an invalid entry for this loop. If the insertion
4088204642Srdivacky  // succeeds, proceed to actually compute a backedge-taken count and
4089193323Sed  // update the value. The temporary CouldNotCompute value tells SCEV
4090193323Sed  // code elsewhere that it shouldn't attempt to request a new
4091193323Sed  // backedge-taken count, which could result in infinite recursion.
4092223017Sdim  std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
4093226633Sdim    BackedgeTakenCounts.insert(std::make_pair(L, BackedgeTakenInfo()));
4094218893Sdim  if (!Pair.second)
4095218893Sdim    return Pair.first->second;
4096193323Sed
4097226633Sdim  // ComputeBackedgeTakenCount may allocate memory for its result. Inserting it
4098226633Sdim  // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
4099226633Sdim  // must be cleared in this scope.
4100226633Sdim  BackedgeTakenInfo Result = ComputeBackedgeTakenCount(L);
4101226633Sdim
4102226633Sdim  if (Result.getExact(this) != getCouldNotCompute()) {
4103226633Sdim    assert(isLoopInvariant(Result.getExact(this), L) &&
4104226633Sdim           isLoopInvariant(Result.getMax(this), L) &&
4105218893Sdim           "Computed backedge-taken count isn't loop invariant for loop!");
4106218893Sdim    ++NumTripCountsComputed;
4107218893Sdim  }
4108226633Sdim  else if (Result.getMax(this) == getCouldNotCompute() &&
4109226633Sdim           isa<PHINode>(L->getHeader()->begin())) {
4110226633Sdim    // Only count loops that have phi nodes as not being computable.
4111226633Sdim    ++NumTripCountsNotComputed;
4112226633Sdim  }
4113193323Sed
4114218893Sdim  // Now that we know more about the trip count for this loop, forget any
4115218893Sdim  // existing SCEV values for PHI nodes in this loop since they are only
4116218893Sdim  // conservative estimates made without the benefit of trip count
4117218893Sdim  // information. This is similar to the code in forgetLoop, except that
4118218893Sdim  // it handles SCEVUnknown PHI nodes specially.
4119226633Sdim  if (Result.hasAnyInfo()) {
4120218893Sdim    SmallVector<Instruction *, 16> Worklist;
4121218893Sdim    PushLoopPHIs(L, Worklist);
4122198090Srdivacky
4123218893Sdim    SmallPtrSet<Instruction *, 8> Visited;
4124218893Sdim    while (!Worklist.empty()) {
4125218893Sdim      Instruction *I = Worklist.pop_back_val();
4126218893Sdim      if (!Visited.insert(I)) continue;
4127198090Srdivacky
4128218893Sdim      ValueExprMapType::iterator It =
4129239462Sdim        ValueExprMap.find_as(static_cast<Value *>(I));
4130218893Sdim      if (It != ValueExprMap.end()) {
4131218893Sdim        const SCEV *Old = It->second;
4132218893Sdim
4133218893Sdim        // SCEVUnknown for a PHI either means that it has an unrecognized
4134218893Sdim        // structure, or it's a PHI that's in the progress of being computed
4135218893Sdim        // by createNodeForPHI.  In the former case, additional loop trip
4136218893Sdim        // count information isn't going to change anything. In the later
4137218893Sdim        // case, createNodeForPHI will perform the necessary updates on its
4138218893Sdim        // own when it gets to that point.
4139218893Sdim        if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
4140218893Sdim          forgetMemoizedResults(Old);
4141218893Sdim          ValueExprMap.erase(It);
4142198090Srdivacky        }
4143218893Sdim        if (PHINode *PN = dyn_cast<PHINode>(I))
4144218893Sdim          ConstantEvolutionLoopExitValue.erase(PN);
4145218893Sdim      }
4146198090Srdivacky
4147218893Sdim      PushDefUseChildren(I, Worklist);
4148198090Srdivacky    }
4149193323Sed  }
4150221345Sdim
4151221345Sdim  // Re-lookup the insert position, since the call to
4152221345Sdim  // ComputeBackedgeTakenCount above could result in a
4153221345Sdim  // recusive call to getBackedgeTakenInfo (on a different
4154221345Sdim  // loop), which would invalidate the iterator computed
4155221345Sdim  // earlier.
4156221345Sdim  return BackedgeTakenCounts.find(L)->second = Result;
4157193323Sed}
4158193323Sed
4159198892Srdivacky/// forgetLoop - This method should be called by the client when it has
4160198892Srdivacky/// changed a loop in a way that may effect ScalarEvolution's ability to
4161198892Srdivacky/// compute a trip count, or if the loop is deleted.
4162198892Srdivackyvoid ScalarEvolution::forgetLoop(const Loop *L) {
4163198892Srdivacky  // Drop any stored trip count value.
4164226633Sdim  DenseMap<const Loop*, BackedgeTakenInfo>::iterator BTCPos =
4165226633Sdim    BackedgeTakenCounts.find(L);
4166226633Sdim  if (BTCPos != BackedgeTakenCounts.end()) {
4167226633Sdim    BTCPos->second.clear();
4168226633Sdim    BackedgeTakenCounts.erase(BTCPos);
4169226633Sdim  }
4170193323Sed
4171198892Srdivacky  // Drop information about expressions based on loop-header PHIs.
4172193323Sed  SmallVector<Instruction *, 16> Worklist;
4173198090Srdivacky  PushLoopPHIs(L, Worklist);
4174193323Sed
4175198090Srdivacky  SmallPtrSet<Instruction *, 8> Visited;
4176193323Sed  while (!Worklist.empty()) {
4177193323Sed    Instruction *I = Worklist.pop_back_val();
4178198090Srdivacky    if (!Visited.insert(I)) continue;
4179198090Srdivacky
4180239462Sdim    ValueExprMapType::iterator It =
4181239462Sdim      ValueExprMap.find_as(static_cast<Value *>(I));
4182212904Sdim    if (It != ValueExprMap.end()) {
4183218893Sdim      forgetMemoizedResults(It->second);
4184212904Sdim      ValueExprMap.erase(It);
4185198090Srdivacky      if (PHINode *PN = dyn_cast<PHINode>(I))
4186198090Srdivacky        ConstantEvolutionLoopExitValue.erase(PN);
4187198090Srdivacky    }
4188198090Srdivacky
4189198090Srdivacky    PushDefUseChildren(I, Worklist);
4190193323Sed  }
4191218893Sdim
4192218893Sdim  // Forget all contained loops too, to avoid dangling entries in the
4193218893Sdim  // ValuesAtScopes map.
4194218893Sdim  for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
4195218893Sdim    forgetLoop(*I);
4196193323Sed}
4197193323Sed
4198204642Srdivacky/// forgetValue - This method should be called by the client when it has
4199204642Srdivacky/// changed a value in a way that may effect its value, or which may
4200204642Srdivacky/// disconnect it from a def-use chain linking it to a loop.
4201204642Srdivackyvoid ScalarEvolution::forgetValue(Value *V) {
4202204642Srdivacky  Instruction *I = dyn_cast<Instruction>(V);
4203204642Srdivacky  if (!I) return;
4204204642Srdivacky
4205204642Srdivacky  // Drop information about expressions based on loop-header PHIs.
4206204642Srdivacky  SmallVector<Instruction *, 16> Worklist;
4207204642Srdivacky  Worklist.push_back(I);
4208204642Srdivacky
4209204642Srdivacky  SmallPtrSet<Instruction *, 8> Visited;
4210204642Srdivacky  while (!Worklist.empty()) {
4211204642Srdivacky    I = Worklist.pop_back_val();
4212204642Srdivacky    if (!Visited.insert(I)) continue;
4213204642Srdivacky
4214239462Sdim    ValueExprMapType::iterator It =
4215239462Sdim      ValueExprMap.find_as(static_cast<Value *>(I));
4216212904Sdim    if (It != ValueExprMap.end()) {
4217218893Sdim      forgetMemoizedResults(It->second);
4218212904Sdim      ValueExprMap.erase(It);
4219204642Srdivacky      if (PHINode *PN = dyn_cast<PHINode>(I))
4220204642Srdivacky        ConstantEvolutionLoopExitValue.erase(PN);
4221204642Srdivacky    }
4222204642Srdivacky
4223204642Srdivacky    PushDefUseChildren(I, Worklist);
4224204642Srdivacky  }
4225204642Srdivacky}
4226204642Srdivacky
4227226633Sdim/// getExact - Get the exact loop backedge taken count considering all loop
4228234353Sdim/// exits. A computable result can only be return for loops with a single exit.
4229234353Sdim/// Returning the minimum taken count among all exits is incorrect because one
4230234353Sdim/// of the loop's exit limit's may have been skipped. HowFarToZero assumes that
4231234353Sdim/// the limit of each loop test is never skipped. This is a valid assumption as
4232234353Sdim/// long as the loop exits via that test. For precise results, it is the
4233234353Sdim/// caller's responsibility to specify the relevant loop exit using
4234234353Sdim/// getExact(ExitingBlock, SE).
4235226633Sdimconst SCEV *
4236226633SdimScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const {
4237226633Sdim  // If any exits were not computable, the loop is not computable.
4238226633Sdim  if (!ExitNotTaken.isCompleteList()) return SE->getCouldNotCompute();
4239226633Sdim
4240234353Sdim  // We need exactly one computable exit.
4241226633Sdim  if (!ExitNotTaken.ExitingBlock) return SE->getCouldNotCompute();
4242226633Sdim  assert(ExitNotTaken.ExactNotTaken && "uninitialized not-taken info");
4243226633Sdim
4244226633Sdim  const SCEV *BECount = 0;
4245226633Sdim  for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
4246226633Sdim       ENT != 0; ENT = ENT->getNextExit()) {
4247226633Sdim
4248226633Sdim    assert(ENT->ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV");
4249226633Sdim
4250226633Sdim    if (!BECount)
4251226633Sdim      BECount = ENT->ExactNotTaken;
4252234353Sdim    else if (BECount != ENT->ExactNotTaken)
4253234353Sdim      return SE->getCouldNotCompute();
4254226633Sdim  }
4255226633Sdim  assert(BECount && "Invalid not taken count for loop exit");
4256226633Sdim  return BECount;
4257226633Sdim}
4258226633Sdim
4259226633Sdim/// getExact - Get the exact not taken count for this loop exit.
4260226633Sdimconst SCEV *
4261226633SdimScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock,
4262226633Sdim                                             ScalarEvolution *SE) const {
4263226633Sdim  for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
4264226633Sdim       ENT != 0; ENT = ENT->getNextExit()) {
4265226633Sdim
4266226633Sdim    if (ENT->ExitingBlock == ExitingBlock)
4267226633Sdim      return ENT->ExactNotTaken;
4268226633Sdim  }
4269226633Sdim  return SE->getCouldNotCompute();
4270226633Sdim}
4271226633Sdim
4272226633Sdim/// getMax - Get the max backedge taken count for the loop.
4273226633Sdimconst SCEV *
4274226633SdimScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const {
4275226633Sdim  return Max ? Max : SE->getCouldNotCompute();
4276226633Sdim}
4277226633Sdim
4278249423Sdimbool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S,
4279249423Sdim                                                    ScalarEvolution *SE) const {
4280249423Sdim  if (Max && Max != SE->getCouldNotCompute() && SE->hasOperand(Max, S))
4281249423Sdim    return true;
4282249423Sdim
4283249423Sdim  if (!ExitNotTaken.ExitingBlock)
4284249423Sdim    return false;
4285249423Sdim
4286249423Sdim  for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
4287249423Sdim       ENT != 0; ENT = ENT->getNextExit()) {
4288249423Sdim
4289249423Sdim    if (ENT->ExactNotTaken != SE->getCouldNotCompute()
4290249423Sdim        && SE->hasOperand(ENT->ExactNotTaken, S)) {
4291249423Sdim      return true;
4292249423Sdim    }
4293249423Sdim  }
4294249423Sdim  return false;
4295249423Sdim}
4296249423Sdim
4297226633Sdim/// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
4298226633Sdim/// computable exit into a persistent ExitNotTakenInfo array.
4299226633SdimScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
4300226633Sdim  SmallVectorImpl< std::pair<BasicBlock *, const SCEV *> > &ExitCounts,
4301226633Sdim  bool Complete, const SCEV *MaxCount) : Max(MaxCount) {
4302226633Sdim
4303226633Sdim  if (!Complete)
4304226633Sdim    ExitNotTaken.setIncomplete();
4305226633Sdim
4306226633Sdim  unsigned NumExits = ExitCounts.size();
4307226633Sdim  if (NumExits == 0) return;
4308226633Sdim
4309226633Sdim  ExitNotTaken.ExitingBlock = ExitCounts[0].first;
4310226633Sdim  ExitNotTaken.ExactNotTaken = ExitCounts[0].second;
4311226633Sdim  if (NumExits == 1) return;
4312226633Sdim
4313226633Sdim  // Handle the rare case of multiple computable exits.
4314226633Sdim  ExitNotTakenInfo *ENT = new ExitNotTakenInfo[NumExits-1];
4315226633Sdim
4316226633Sdim  ExitNotTakenInfo *PrevENT = &ExitNotTaken;
4317226633Sdim  for (unsigned i = 1; i < NumExits; ++i, PrevENT = ENT, ++ENT) {
4318226633Sdim    PrevENT->setNextExit(ENT);
4319226633Sdim    ENT->ExitingBlock = ExitCounts[i].first;
4320226633Sdim    ENT->ExactNotTaken = ExitCounts[i].second;
4321226633Sdim  }
4322226633Sdim}
4323226633Sdim
4324226633Sdim/// clear - Invalidate this result and free the ExitNotTakenInfo array.
4325226633Sdimvoid ScalarEvolution::BackedgeTakenInfo::clear() {
4326226633Sdim  ExitNotTaken.ExitingBlock = 0;
4327226633Sdim  ExitNotTaken.ExactNotTaken = 0;
4328226633Sdim  delete[] ExitNotTaken.getNextExit();
4329226633Sdim}
4330226633Sdim
4331193323Sed/// ComputeBackedgeTakenCount - Compute the number of times the backedge
4332193323Sed/// of the specified loop will execute.
4333193323SedScalarEvolution::BackedgeTakenInfo
4334193323SedScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
4335201360Srdivacky  SmallVector<BasicBlock *, 8> ExitingBlocks;
4336194612Sed  L->getExitingBlocks(ExitingBlocks);
4337193323Sed
4338194612Sed  // Examine all exits and pick the most conservative values.
4339198090Srdivacky  const SCEV *MaxBECount = getCouldNotCompute();
4340226633Sdim  bool CouldComputeBECount = true;
4341226633Sdim  SmallVector<std::pair<BasicBlock *, const SCEV *>, 4> ExitCounts;
4342194612Sed  for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
4343226633Sdim    ExitLimit EL = ComputeExitLimit(L, ExitingBlocks[i]);
4344226633Sdim    if (EL.Exact == getCouldNotCompute())
4345194612Sed      // We couldn't compute an exact value for this exit, so
4346194710Sed      // we won't be able to compute an exact value for the loop.
4347226633Sdim      CouldComputeBECount = false;
4348226633Sdim    else
4349226633Sdim      ExitCounts.push_back(std::make_pair(ExitingBlocks[i], EL.Exact));
4350226633Sdim
4351195340Sed    if (MaxBECount == getCouldNotCompute())
4352226633Sdim      MaxBECount = EL.Max;
4353234353Sdim    else if (EL.Max != getCouldNotCompute()) {
4354234353Sdim      // We cannot take the "min" MaxBECount, because non-unit stride loops may
4355234353Sdim      // skip some loop tests. Taking the max over the exits is sufficiently
4356234353Sdim      // conservative.  TODO: We could do better taking into consideration
4357234353Sdim      // that (1) the loop has unit stride (2) the last loop test is
4358234353Sdim      // less-than/greater-than (3) any loop test is less-than/greater-than AND
4359234353Sdim      // falls-through some constant times less then the other tests.
4360234353Sdim      MaxBECount = getUMaxFromMismatchedTypes(MaxBECount, EL.Max);
4361234353Sdim    }
4362194612Sed  }
4363194612Sed
4364226633Sdim  return BackedgeTakenInfo(ExitCounts, CouldComputeBECount, MaxBECount);
4365194612Sed}
4366194612Sed
4367226633Sdim/// ComputeExitLimit - Compute the number of times the backedge of the specified
4368226633Sdim/// loop will execute if it exits via the specified block.
4369226633SdimScalarEvolution::ExitLimit
4370226633SdimScalarEvolution::ComputeExitLimit(const Loop *L, BasicBlock *ExitingBlock) {
4371194612Sed
4372194612Sed  // Okay, we've chosen an exiting block.  See what condition causes us to
4373194612Sed  // exit at this block.
4374193323Sed  //
4375193323Sed  // FIXME: we should be able to handle switch instructions (with a single exit)
4376193323Sed  BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
4377195340Sed  if (ExitBr == 0) return getCouldNotCompute();
4378193323Sed  assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
4379195098Sed
4380193323Sed  // At this point, we know we have a conditional branch that determines whether
4381193323Sed  // the loop is exited.  However, we don't know if the branch is executed each
4382193323Sed  // time through the loop.  If not, then the execution count of the branch will
4383193323Sed  // not be equal to the trip count of the loop.
4384193323Sed  //
4385193323Sed  // Currently we check for this by checking to see if the Exit branch goes to
4386193323Sed  // the loop header.  If so, we know it will always execute the same number of
4387193323Sed  // times as the loop.  We also handle the case where the exit block *is* the
4388194612Sed  // loop header.  This is common for un-rotated loops.
4389194612Sed  //
4390194612Sed  // If both of those tests fail, walk up the unique predecessor chain to the
4391194612Sed  // header, stopping if there is an edge that doesn't exit the loop. If the
4392194612Sed  // header is reached, the execution count of the branch will be equal to the
4393194612Sed  // trip count of the loop.
4394194612Sed  //
4395194612Sed  //  More extensive analysis could be done to handle more cases here.
4396194612Sed  //
4397193323Sed  if (ExitBr->getSuccessor(0) != L->getHeader() &&
4398193323Sed      ExitBr->getSuccessor(1) != L->getHeader() &&
4399194612Sed      ExitBr->getParent() != L->getHeader()) {
4400194612Sed    // The simple checks failed, try climbing the unique predecessor chain
4401194612Sed    // up to the header.
4402194612Sed    bool Ok = false;
4403194612Sed    for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
4404194612Sed      BasicBlock *Pred = BB->getUniquePredecessor();
4405194612Sed      if (!Pred)
4406195340Sed        return getCouldNotCompute();
4407194612Sed      TerminatorInst *PredTerm = Pred->getTerminator();
4408194612Sed      for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
4409194612Sed        BasicBlock *PredSucc = PredTerm->getSuccessor(i);
4410194612Sed        if (PredSucc == BB)
4411194612Sed          continue;
4412194612Sed        // If the predecessor has a successor that isn't BB and isn't
4413194612Sed        // outside the loop, assume the worst.
4414194612Sed        if (L->contains(PredSucc))
4415195340Sed          return getCouldNotCompute();
4416194612Sed      }
4417194612Sed      if (Pred == L->getHeader()) {
4418194612Sed        Ok = true;
4419194612Sed        break;
4420194612Sed      }
4421194612Sed      BB = Pred;
4422194612Sed    }
4423194612Sed    if (!Ok)
4424195340Sed      return getCouldNotCompute();
4425194612Sed  }
4426193323Sed
4427204642Srdivacky  // Proceed to the next level to examine the exit condition expression.
4428226633Sdim  return ComputeExitLimitFromCond(L, ExitBr->getCondition(),
4429226633Sdim                                  ExitBr->getSuccessor(0),
4430251662Sdim                                  ExitBr->getSuccessor(1),
4431251662Sdim                                  /*IsSubExpr=*/false);
4432194612Sed}
4433194612Sed
4434226633Sdim/// ComputeExitLimitFromCond - Compute the number of times the
4435194612Sed/// backedge of the specified loop will execute if its exit condition
4436194612Sed/// were a conditional branch of ExitCond, TBB, and FBB.
4437251662Sdim///
4438251662Sdim/// @param IsSubExpr is true if ExitCond does not directly control the exit
4439251662Sdim/// branch. In this case, we cannot assume that the loop only exits when the
4440251662Sdim/// condition is true and cannot infer that failing to meet the condition prior
4441251662Sdim/// to integer wraparound results in undefined behavior.
4442226633SdimScalarEvolution::ExitLimit
4443226633SdimScalarEvolution::ComputeExitLimitFromCond(const Loop *L,
4444226633Sdim                                          Value *ExitCond,
4445226633Sdim                                          BasicBlock *TBB,
4446251662Sdim                                          BasicBlock *FBB,
4447251662Sdim                                          bool IsSubExpr) {
4448195098Sed  // Check if the controlling expression for this loop is an And or Or.
4449194612Sed  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
4450194612Sed    if (BO->getOpcode() == Instruction::And) {
4451194612Sed      // Recurse on the operands of the and.
4452251662Sdim      bool EitherMayExit = L->contains(TBB);
4453251662Sdim      ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB,
4454251662Sdim                                               IsSubExpr || EitherMayExit);
4455251662Sdim      ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB,
4456251662Sdim                                               IsSubExpr || EitherMayExit);
4457198090Srdivacky      const SCEV *BECount = getCouldNotCompute();
4458198090Srdivacky      const SCEV *MaxBECount = getCouldNotCompute();
4459251662Sdim      if (EitherMayExit) {
4460194612Sed        // Both conditions must be true for the loop to continue executing.
4461194612Sed        // Choose the less conservative count.
4462226633Sdim        if (EL0.Exact == getCouldNotCompute() ||
4463226633Sdim            EL1.Exact == getCouldNotCompute())
4464195340Sed          BECount = getCouldNotCompute();
4465194710Sed        else
4466226633Sdim          BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
4467226633Sdim        if (EL0.Max == getCouldNotCompute())
4468226633Sdim          MaxBECount = EL1.Max;
4469226633Sdim        else if (EL1.Max == getCouldNotCompute())
4470226633Sdim          MaxBECount = EL0.Max;
4471194710Sed        else
4472226633Sdim          MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
4473194612Sed      } else {
4474212904Sdim        // Both conditions must be true at the same time for the loop to exit.
4475212904Sdim        // For now, be conservative.
4476194612Sed        assert(L->contains(FBB) && "Loop block has no successor in loop!");
4477226633Sdim        if (EL0.Max == EL1.Max)
4478226633Sdim          MaxBECount = EL0.Max;
4479226633Sdim        if (EL0.Exact == EL1.Exact)
4480226633Sdim          BECount = EL0.Exact;
4481194612Sed      }
4482194612Sed
4483226633Sdim      return ExitLimit(BECount, MaxBECount);
4484194612Sed    }
4485194612Sed    if (BO->getOpcode() == Instruction::Or) {
4486194612Sed      // Recurse on the operands of the or.
4487251662Sdim      bool EitherMayExit = L->contains(FBB);
4488251662Sdim      ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB,
4489251662Sdim                                               IsSubExpr || EitherMayExit);
4490251662Sdim      ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB,
4491251662Sdim                                               IsSubExpr || EitherMayExit);
4492198090Srdivacky      const SCEV *BECount = getCouldNotCompute();
4493198090Srdivacky      const SCEV *MaxBECount = getCouldNotCompute();
4494251662Sdim      if (EitherMayExit) {
4495194612Sed        // Both conditions must be false for the loop to continue executing.
4496194612Sed        // Choose the less conservative count.
4497226633Sdim        if (EL0.Exact == getCouldNotCompute() ||
4498226633Sdim            EL1.Exact == getCouldNotCompute())
4499195340Sed          BECount = getCouldNotCompute();
4500194710Sed        else
4501226633Sdim          BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
4502226633Sdim        if (EL0.Max == getCouldNotCompute())
4503226633Sdim          MaxBECount = EL1.Max;
4504226633Sdim        else if (EL1.Max == getCouldNotCompute())
4505226633Sdim          MaxBECount = EL0.Max;
4506194710Sed        else
4507226633Sdim          MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
4508194612Sed      } else {
4509212904Sdim        // Both conditions must be false at the same time for the loop to exit.
4510212904Sdim        // For now, be conservative.
4511194612Sed        assert(L->contains(TBB) && "Loop block has no successor in loop!");
4512226633Sdim        if (EL0.Max == EL1.Max)
4513226633Sdim          MaxBECount = EL0.Max;
4514226633Sdim        if (EL0.Exact == EL1.Exact)
4515226633Sdim          BECount = EL0.Exact;
4516194612Sed      }
4517194612Sed
4518226633Sdim      return ExitLimit(BECount, MaxBECount);
4519194612Sed    }
4520194612Sed  }
4521194612Sed
4522194612Sed  // With an icmp, it may be feasible to compute an exact backedge-taken count.
4523204642Srdivacky  // Proceed to the next level to examine the icmp.
4524194612Sed  if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
4525251662Sdim    return ComputeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, IsSubExpr);
4526194612Sed
4527204642Srdivacky  // Check for a constant condition. These are normally stripped out by
4528204642Srdivacky  // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
4529204642Srdivacky  // preserve the CFG and is temporarily leaving constant conditions
4530204642Srdivacky  // in place.
4531204642Srdivacky  if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
4532204642Srdivacky    if (L->contains(FBB) == !CI->getZExtValue())
4533204642Srdivacky      // The backedge is always taken.
4534204642Srdivacky      return getCouldNotCompute();
4535204642Srdivacky    else
4536204642Srdivacky      // The backedge is never taken.
4537207618Srdivacky      return getConstant(CI->getType(), 0);
4538204642Srdivacky  }
4539204642Srdivacky
4540193323Sed  // If it's not an integer or pointer comparison then compute it the hard way.
4541226633Sdim  return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
4542194612Sed}
4543193323Sed
4544226633Sdim/// ComputeExitLimitFromICmp - Compute the number of times the
4545194612Sed/// backedge of the specified loop will execute if its exit condition
4546194612Sed/// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
4547226633SdimScalarEvolution::ExitLimit
4548226633SdimScalarEvolution::ComputeExitLimitFromICmp(const Loop *L,
4549226633Sdim                                          ICmpInst *ExitCond,
4550226633Sdim                                          BasicBlock *TBB,
4551251662Sdim                                          BasicBlock *FBB,
4552251662Sdim                                          bool IsSubExpr) {
4553194612Sed
4554193323Sed  // If the condition was exit on true, convert the condition to exit on false
4555193323Sed  ICmpInst::Predicate Cond;
4556194612Sed  if (!L->contains(FBB))
4557193323Sed    Cond = ExitCond->getPredicate();
4558193323Sed  else
4559193323Sed    Cond = ExitCond->getInversePredicate();
4560193323Sed
4561193323Sed  // Handle common loops like: for (X = "string"; *X; ++X)
4562193323Sed  if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
4563193323Sed    if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
4564226633Sdim      ExitLimit ItCnt =
4565226633Sdim        ComputeLoadConstantCompareExitLimit(LI, RHS, L, Cond);
4566204642Srdivacky      if (ItCnt.hasAnyInfo())
4567204642Srdivacky        return ItCnt;
4568193323Sed    }
4569193323Sed
4570198090Srdivacky  const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
4571198090Srdivacky  const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
4572193323Sed
4573193323Sed  // Try to evaluate any dependencies out of the loop.
4574193323Sed  LHS = getSCEVAtScope(LHS, L);
4575193323Sed  RHS = getSCEVAtScope(RHS, L);
4576193323Sed
4577195098Sed  // At this point, we would like to compute how many iterations of the
4578193323Sed  // loop the predicate will return true for these inputs.
4579218893Sdim  if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
4580193323Sed    // If there is a loop-invariant, force it into the RHS.
4581193323Sed    std::swap(LHS, RHS);
4582193323Sed    Cond = ICmpInst::getSwappedPredicate(Cond);
4583193323Sed  }
4584193323Sed
4585207618Srdivacky  // Simplify the operands before analyzing them.
4586207618Srdivacky  (void)SimplifyICmpOperands(Cond, LHS, RHS);
4587207618Srdivacky
4588193323Sed  // If we have a comparison of a chrec against a constant, try to use value
4589193323Sed  // ranges to answer this query.
4590193323Sed  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
4591193323Sed    if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
4592193323Sed      if (AddRec->getLoop() == L) {
4593193323Sed        // Form the constant range.
4594193323Sed        ConstantRange CompRange(
4595193323Sed            ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
4596193323Sed
4597198090Srdivacky        const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
4598193323Sed        if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
4599193323Sed      }
4600193323Sed
4601193323Sed  switch (Cond) {
4602193323Sed  case ICmpInst::ICMP_NE: {                     // while (X != Y)
4603193323Sed    // Convert to: while (X-Y != 0)
4604251662Sdim    ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L, IsSubExpr);
4605226633Sdim    if (EL.hasAnyInfo()) return EL;
4606193323Sed    break;
4607193323Sed  }
4608198090Srdivacky  case ICmpInst::ICMP_EQ: {                     // while (X == Y)
4609198090Srdivacky    // Convert to: while (X-Y == 0)
4610226633Sdim    ExitLimit EL = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
4611226633Sdim    if (EL.hasAnyInfo()) return EL;
4612193323Sed    break;
4613193323Sed  }
4614263508Sdim  case ICmpInst::ICMP_SLT:
4615263508Sdim  case ICmpInst::ICMP_ULT: {                    // while (X < Y)
4616263508Sdim    bool IsSigned = Cond == ICmpInst::ICMP_SLT;
4617263508Sdim    ExitLimit EL = HowManyLessThans(LHS, RHS, L, IsSigned, IsSubExpr);
4618226633Sdim    if (EL.hasAnyInfo()) return EL;
4619193323Sed    break;
4620193323Sed  }
4621263508Sdim  case ICmpInst::ICMP_SGT:
4622263508Sdim  case ICmpInst::ICMP_UGT: {                    // while (X > Y)
4623263508Sdim    bool IsSigned = Cond == ICmpInst::ICMP_SGT;
4624263508Sdim    ExitLimit EL = HowManyGreaterThans(LHS, RHS, L, IsSigned, IsSubExpr);
4625226633Sdim    if (EL.hasAnyInfo()) return EL;
4626193323Sed    break;
4627193323Sed  }
4628193323Sed  default:
4629193323Sed#if 0
4630201360Srdivacky    dbgs() << "ComputeBackedgeTakenCount ";
4631193323Sed    if (ExitCond->getOperand(0)->getType()->isUnsigned())
4632201360Srdivacky      dbgs() << "[unsigned] ";
4633201360Srdivacky    dbgs() << *LHS << "   "
4634195098Sed         << Instruction::getOpcodeName(Instruction::ICmp)
4635193323Sed         << "   " << *RHS << "\n";
4636193323Sed#endif
4637193323Sed    break;
4638193323Sed  }
4639226633Sdim  return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
4640193323Sed}
4641193323Sed
4642193323Sedstatic ConstantInt *
4643193323SedEvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
4644193323Sed                                ScalarEvolution &SE) {
4645198090Srdivacky  const SCEV *InVal = SE.getConstant(C);
4646198090Srdivacky  const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
4647193323Sed  assert(isa<SCEVConstant>(Val) &&
4648193323Sed         "Evaluation of SCEV at constant didn't fold correctly?");
4649193323Sed  return cast<SCEVConstant>(Val)->getValue();
4650193323Sed}
4651193323Sed
4652226633Sdim/// ComputeLoadConstantCompareExitLimit - Given an exit condition of
4653193323Sed/// 'icmp op load X, cst', try to see if we can compute the backedge
4654193323Sed/// execution count.
4655226633SdimScalarEvolution::ExitLimit
4656226633SdimScalarEvolution::ComputeLoadConstantCompareExitLimit(
4657226633Sdim  LoadInst *LI,
4658226633Sdim  Constant *RHS,
4659226633Sdim  const Loop *L,
4660226633Sdim  ICmpInst::Predicate predicate) {
4661226633Sdim
4662195340Sed  if (LI->isVolatile()) return getCouldNotCompute();
4663193323Sed
4664193323Sed  // Check to see if the loaded pointer is a getelementptr of a global.
4665204642Srdivacky  // TODO: Use SCEV instead of manually grubbing with GEPs.
4666193323Sed  GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
4667195340Sed  if (!GEP) return getCouldNotCompute();
4668193323Sed
4669193323Sed  // Make sure that it is really a constant global we are gepping, with an
4670193323Sed  // initializer, and make sure the first IDX is really 0.
4671193323Sed  GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
4672198090Srdivacky  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
4673193323Sed      GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
4674193323Sed      !cast<Constant>(GEP->getOperand(1))->isNullValue())
4675195340Sed    return getCouldNotCompute();
4676193323Sed
4677193323Sed  // Okay, we allow one non-constant index into the GEP instruction.
4678193323Sed  Value *VarIdx = 0;
4679234353Sdim  std::vector<Constant*> Indexes;
4680193323Sed  unsigned VarIdxNum = 0;
4681193323Sed  for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
4682193323Sed    if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
4683193323Sed      Indexes.push_back(CI);
4684193323Sed    } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
4685195340Sed      if (VarIdx) return getCouldNotCompute();  // Multiple non-constant idx's.
4686193323Sed      VarIdx = GEP->getOperand(i);
4687193323Sed      VarIdxNum = i-2;
4688193323Sed      Indexes.push_back(0);
4689193323Sed    }
4690193323Sed
4691234353Sdim  // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
4692234353Sdim  if (!VarIdx)
4693234353Sdim    return getCouldNotCompute();
4694234353Sdim
4695193323Sed  // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
4696193323Sed  // Check to see if X is a loop variant variable value now.
4697198090Srdivacky  const SCEV *Idx = getSCEV(VarIdx);
4698193323Sed  Idx = getSCEVAtScope(Idx, L);
4699193323Sed
4700193323Sed  // We can only recognize very limited forms of loop index expressions, in
4701193323Sed  // particular, only affine AddRec's like {C1,+,C2}.
4702193323Sed  const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
4703218893Sdim  if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) ||
4704193323Sed      !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
4705193323Sed      !isa<SCEVConstant>(IdxExpr->getOperand(1)))
4706195340Sed    return getCouldNotCompute();
4707193323Sed
4708193323Sed  unsigned MaxSteps = MaxBruteForceIterations;
4709193323Sed  for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
4710198090Srdivacky    ConstantInt *ItCst = ConstantInt::get(
4711198090Srdivacky                           cast<IntegerType>(IdxExpr->getType()), IterationNum);
4712193323Sed    ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
4713193323Sed
4714193323Sed    // Form the GEP offset.
4715193323Sed    Indexes[VarIdxNum] = Val;
4716193323Sed
4717234353Sdim    Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(),
4718234353Sdim                                                         Indexes);
4719193323Sed    if (Result == 0) break;  // Cannot compute!
4720193323Sed
4721193323Sed    // Evaluate the condition for this iteration.
4722193323Sed    Result = ConstantExpr::getICmp(predicate, Result, RHS);
4723193323Sed    if (!isa<ConstantInt>(Result)) break;  // Couldn't decide for sure
4724193323Sed    if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
4725193323Sed#if 0
4726201360Srdivacky      dbgs() << "\n***\n*** Computed loop count " << *ItCst
4727193323Sed             << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
4728193323Sed             << "***\n";
4729193323Sed#endif
4730193323Sed      ++NumArrayLenItCounts;
4731193323Sed      return getConstant(ItCst);   // Found terminating iteration!
4732193323Sed    }
4733193323Sed  }
4734195340Sed  return getCouldNotCompute();
4735193323Sed}
4736193323Sed
4737193323Sed
4738193323Sed/// CanConstantFold - Return true if we can constant fold an instruction of the
4739193323Sed/// specified type, assuming that all operands were constants.
4740193323Sedstatic bool CanConstantFold(const Instruction *I) {
4741193323Sed  if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
4742234353Sdim      isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
4743234353Sdim      isa<LoadInst>(I))
4744193323Sed    return true;
4745193323Sed
4746193323Sed  if (const CallInst *CI = dyn_cast<CallInst>(I))
4747193323Sed    if (const Function *F = CI->getCalledFunction())
4748193323Sed      return canConstantFoldCallTo(F);
4749193323Sed  return false;
4750193323Sed}
4751193323Sed
4752226633Sdim/// Determine whether this instruction can constant evolve within this loop
4753226633Sdim/// assuming its operands can all constant evolve.
4754226633Sdimstatic bool canConstantEvolve(Instruction *I, const Loop *L) {
4755226633Sdim  // An instruction outside of the loop can't be derived from a loop PHI.
4756226633Sdim  if (!L->contains(I)) return false;
4757193323Sed
4758226633Sdim  if (isa<PHINode>(I)) {
4759193323Sed    if (L->getHeader() == I->getParent())
4760226633Sdim      return true;
4761193323Sed    else
4762193323Sed      // We don't currently keep track of the control flow needed to evaluate
4763193323Sed      // PHIs, so we cannot handle PHIs inside of loops.
4764226633Sdim      return false;
4765193323Sed  }
4766193323Sed
4767193323Sed  // If we won't be able to constant fold this expression even if the operands
4768226633Sdim  // are constants, bail early.
4769226633Sdim  return CanConstantFold(I);
4770226633Sdim}
4771193323Sed
4772226633Sdim/// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
4773226633Sdim/// recursing through each instruction operand until reaching a loop header phi.
4774226633Sdimstatic PHINode *
4775226633SdimgetConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
4776226633Sdim                               DenseMap<Instruction *, PHINode *> &PHIMap) {
4777226633Sdim
4778193323Sed  // Otherwise, we can evaluate this instruction if all of its operands are
4779193323Sed  // constant or derived from a PHI node themselves.
4780193323Sed  PHINode *PHI = 0;
4781226633Sdim  for (Instruction::op_iterator OpI = UseInst->op_begin(),
4782226633Sdim         OpE = UseInst->op_end(); OpI != OpE; ++OpI) {
4783226633Sdim
4784226633Sdim    if (isa<Constant>(*OpI)) continue;
4785226633Sdim
4786226633Sdim    Instruction *OpInst = dyn_cast<Instruction>(*OpI);
4787226633Sdim    if (!OpInst || !canConstantEvolve(OpInst, L)) return 0;
4788226633Sdim
4789226633Sdim    PHINode *P = dyn_cast<PHINode>(OpInst);
4790226633Sdim    if (!P)
4791226633Sdim      // If this operand is already visited, reuse the prior result.
4792226633Sdim      // We may have P != PHI if this is the deepest point at which the
4793226633Sdim      // inconsistent paths meet.
4794226633Sdim      P = PHIMap.lookup(OpInst);
4795226633Sdim    if (!P) {
4796226633Sdim      // Recurse and memoize the results, whether a phi is found or not.
4797226633Sdim      // This recursive call invalidates pointers into PHIMap.
4798226633Sdim      P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap);
4799226633Sdim      PHIMap[OpInst] = P;
4800193323Sed    }
4801226633Sdim    if (P == 0) return 0;        // Not evolving from PHI
4802226633Sdim    if (PHI && PHI != P) return 0;  // Evolving from multiple different PHIs.
4803226633Sdim    PHI = P;
4804226633Sdim  }
4805193323Sed  // This is a expression evolving from a constant PHI!
4806193323Sed  return PHI;
4807193323Sed}
4808193323Sed
4809226633Sdim/// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
4810226633Sdim/// in the loop that V is derived from.  We allow arbitrary operations along the
4811226633Sdim/// way, but the operands of an operation must either be constants or a value
4812226633Sdim/// derived from a constant PHI.  If this expression does not fit with these
4813226633Sdim/// constraints, return null.
4814226633Sdimstatic PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
4815226633Sdim  Instruction *I = dyn_cast<Instruction>(V);
4816226633Sdim  if (I == 0 || !canConstantEvolve(I, L)) return 0;
4817226633Sdim
4818226633Sdim  if (PHINode *PN = dyn_cast<PHINode>(I)) {
4819226633Sdim    return PN;
4820226633Sdim  }
4821226633Sdim
4822226633Sdim  // Record non-constant instructions contained by the loop.
4823226633Sdim  DenseMap<Instruction *, PHINode *> PHIMap;
4824226633Sdim  return getConstantEvolvingPHIOperands(I, L, PHIMap);
4825226633Sdim}
4826226633Sdim
4827193323Sed/// EvaluateExpression - Given an expression that passes the
4828193323Sed/// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
4829193323Sed/// in the loop has the value PHIVal.  If we can't fold this expression for some
4830193323Sed/// reason, return null.
4831226633Sdimstatic Constant *EvaluateExpression(Value *V, const Loop *L,
4832226633Sdim                                    DenseMap<Instruction *, Constant *> &Vals,
4833243830Sdim                                    const DataLayout *TD,
4834234353Sdim                                    const TargetLibraryInfo *TLI) {
4835226633Sdim  // Convenient constant check, but redundant for recursive calls.
4836193323Sed  if (Constant *C = dyn_cast<Constant>(V)) return C;
4837234353Sdim  Instruction *I = dyn_cast<Instruction>(V);
4838234353Sdim  if (!I) return 0;
4839226633Sdim
4840226633Sdim  if (Constant *C = Vals.lookup(I)) return C;
4841193323Sed
4842234353Sdim  // An instruction inside the loop depends on a value outside the loop that we
4843234353Sdim  // weren't given a mapping for, or a value such as a call inside the loop.
4844234353Sdim  if (!canConstantEvolve(I, L)) return 0;
4845226633Sdim
4846234353Sdim  // An unmapped PHI can be due to a branch or another loop inside this loop,
4847234353Sdim  // or due to this not being the initial iteration through a loop where we
4848234353Sdim  // couldn't compute the evolution of this particular PHI last time.
4849234353Sdim  if (isa<PHINode>(I)) return 0;
4850234353Sdim
4851210299Sed  std::vector<Constant*> Operands(I->getNumOperands());
4852193323Sed
4853193323Sed  for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
4854226633Sdim    Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
4855226633Sdim    if (!Operand) {
4856226633Sdim      Operands[i] = dyn_cast<Constant>(I->getOperand(i));
4857226633Sdim      if (!Operands[i]) return 0;
4858226633Sdim      continue;
4859226633Sdim    }
4860234353Sdim    Constant *C = EvaluateExpression(Operand, L, Vals, TD, TLI);
4861226633Sdim    Vals[Operand] = C;
4862226633Sdim    if (!C) return 0;
4863226633Sdim    Operands[i] = C;
4864193323Sed  }
4865193323Sed
4866234353Sdim  if (CmpInst *CI = dyn_cast<CmpInst>(I))
4867199481Srdivacky    return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
4868234353Sdim                                           Operands[1], TD, TLI);
4869234353Sdim  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
4870234353Sdim    if (!LI->isVolatile())
4871234353Sdim      return ConstantFoldLoadFromConstPtr(Operands[0], TD);
4872234353Sdim  }
4873234353Sdim  return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, TD,
4874234353Sdim                                  TLI);
4875193323Sed}
4876193323Sed
4877193323Sed/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
4878193323Sed/// in the header of its containing loop, we know the loop executes a
4879193323Sed/// constant number of times, and the PHI node is just a recurrence
4880193323Sed/// involving constants, fold it.
4881195098SedConstant *
4882195098SedScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
4883201360Srdivacky                                                   const APInt &BEs,
4884195098Sed                                                   const Loop *L) {
4885223017Sdim  DenseMap<PHINode*, Constant*>::const_iterator I =
4886193323Sed    ConstantEvolutionLoopExitValue.find(PN);
4887193323Sed  if (I != ConstantEvolutionLoopExitValue.end())
4888193323Sed    return I->second;
4889193323Sed
4890207618Srdivacky  if (BEs.ugt(MaxBruteForceIterations))
4891193323Sed    return ConstantEvolutionLoopExitValue[PN] = 0;  // Not going to evaluate it.
4892193323Sed
4893193323Sed  Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
4894193323Sed
4895226633Sdim  DenseMap<Instruction *, Constant *> CurrentIterVals;
4896234353Sdim  BasicBlock *Header = L->getHeader();
4897234353Sdim  assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
4898226633Sdim
4899193323Sed  // Since the loop is canonicalized, the PHI node must have two entries.  One
4900193323Sed  // entry must be a constant (coming in from outside of the loop), and the
4901193323Sed  // second must be derived from the same PHI.
4902193323Sed  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4903234353Sdim  PHINode *PHI = 0;
4904234353Sdim  for (BasicBlock::iterator I = Header->begin();
4905234353Sdim       (PHI = dyn_cast<PHINode>(I)); ++I) {
4906234353Sdim    Constant *StartCST =
4907234353Sdim      dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
4908234353Sdim    if (StartCST == 0) continue;
4909234353Sdim    CurrentIterVals[PHI] = StartCST;
4910234353Sdim  }
4911234353Sdim  if (!CurrentIterVals.count(PN))
4912234353Sdim    return RetVal = 0;
4913193323Sed
4914193323Sed  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4915193323Sed
4916193323Sed  // Execute the loop symbolically to determine the exit value.
4917193323Sed  if (BEs.getActiveBits() >= 32)
4918193323Sed    return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
4919193323Sed
4920193323Sed  unsigned NumIterations = BEs.getZExtValue(); // must be in range
4921193323Sed  unsigned IterationNum = 0;
4922226633Sdim  for (; ; ++IterationNum) {
4923193323Sed    if (IterationNum == NumIterations)
4924226633Sdim      return RetVal = CurrentIterVals[PN];  // Got exit value!
4925193323Sed
4926234353Sdim    // Compute the value of the PHIs for the next iteration.
4927226633Sdim    // EvaluateExpression adds non-phi values to the CurrentIterVals map.
4928234353Sdim    DenseMap<Instruction *, Constant *> NextIterVals;
4929234353Sdim    Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD,
4930234353Sdim                                           TLI);
4931193323Sed    if (NextPHI == 0)
4932193323Sed      return 0;        // Couldn't evaluate!
4933226633Sdim    NextIterVals[PN] = NextPHI;
4934234353Sdim
4935234353Sdim    bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
4936234353Sdim
4937234353Sdim    // Also evaluate the other PHI nodes.  However, we don't get to stop if we
4938234353Sdim    // cease to be able to evaluate one of them or if they stop evolving,
4939234353Sdim    // because that doesn't necessarily prevent us from computing PN.
4940234353Sdim    SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute;
4941234353Sdim    for (DenseMap<Instruction *, Constant *>::const_iterator
4942234353Sdim           I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
4943234353Sdim      PHINode *PHI = dyn_cast<PHINode>(I->first);
4944234353Sdim      if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
4945234353Sdim      PHIsToCompute.push_back(std::make_pair(PHI, I->second));
4946234353Sdim    }
4947234353Sdim    // We use two distinct loops because EvaluateExpression may invalidate any
4948234353Sdim    // iterators into CurrentIterVals.
4949234353Sdim    for (SmallVectorImpl<std::pair<PHINode *, Constant*> >::const_iterator
4950234353Sdim             I = PHIsToCompute.begin(), E = PHIsToCompute.end(); I != E; ++I) {
4951234353Sdim      PHINode *PHI = I->first;
4952234353Sdim      Constant *&NextPHI = NextIterVals[PHI];
4953234353Sdim      if (!NextPHI) {   // Not already computed.
4954234353Sdim        Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
4955234353Sdim        NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
4956234353Sdim      }
4957234353Sdim      if (NextPHI != I->second)
4958234353Sdim        StoppedEvolving = false;
4959234353Sdim    }
4960234353Sdim
4961234353Sdim    // If all entries in CurrentIterVals == NextIterVals then we can stop
4962234353Sdim    // iterating, the loop can't continue to change.
4963234353Sdim    if (StoppedEvolving)
4964234353Sdim      return RetVal = CurrentIterVals[PN];
4965234353Sdim
4966226633Sdim    CurrentIterVals.swap(NextIterVals);
4967193323Sed  }
4968193323Sed}
4969193323Sed
4970226633Sdim/// ComputeExitCountExhaustively - If the loop is known to execute a
4971193323Sed/// constant number of times (the condition evolves only from constants),
4972193323Sed/// try to evaluate a few iterations of the loop until we get the exit
4973193323Sed/// condition gets a value of ExitWhen (true or false).  If we cannot
4974195340Sed/// evaluate the trip count of the loop, return getCouldNotCompute().
4975234353Sdimconst SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
4976234353Sdim                                                          Value *Cond,
4977234353Sdim                                                          bool ExitWhen) {
4978193323Sed  PHINode *PN = getConstantEvolvingPHI(Cond, L);
4979195340Sed  if (PN == 0) return getCouldNotCompute();
4980193323Sed
4981210299Sed  // If the loop is canonicalized, the PHI will have exactly two entries.
4982210299Sed  // That's the only form we support here.
4983210299Sed  if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
4984210299Sed
4985234353Sdim  DenseMap<Instruction *, Constant *> CurrentIterVals;
4986234353Sdim  BasicBlock *Header = L->getHeader();
4987234353Sdim  assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
4988234353Sdim
4989210299Sed  // One entry must be a constant (coming in from outside of the loop), and the
4990193323Sed  // second must be derived from the same PHI.
4991193323Sed  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4992234353Sdim  PHINode *PHI = 0;
4993234353Sdim  for (BasicBlock::iterator I = Header->begin();
4994234353Sdim       (PHI = dyn_cast<PHINode>(I)); ++I) {
4995234353Sdim    Constant *StartCST =
4996234353Sdim      dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
4997234353Sdim    if (StartCST == 0) continue;
4998234353Sdim    CurrentIterVals[PHI] = StartCST;
4999234353Sdim  }
5000234353Sdim  if (!CurrentIterVals.count(PN))
5001234353Sdim    return getCouldNotCompute();
5002193323Sed
5003193323Sed  // Okay, we find a PHI node that defines the trip count of this loop.  Execute
5004193323Sed  // the loop symbolically to determine when the condition gets a value of
5005193323Sed  // "ExitWhen".
5006234353Sdim
5007193323Sed  unsigned MaxIterations = MaxBruteForceIterations;   // Limit analysis.
5008234353Sdim  for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
5009193323Sed    ConstantInt *CondVal =
5010234353Sdim      dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, CurrentIterVals,
5011234353Sdim                                                       TD, TLI));
5012193323Sed
5013193323Sed    // Couldn't symbolically evaluate.
5014195340Sed    if (!CondVal) return getCouldNotCompute();
5015193323Sed
5016193323Sed    if (CondVal->getValue() == uint64_t(ExitWhen)) {
5017193323Sed      ++NumBruteForceTripCountsComputed;
5018198090Srdivacky      return getConstant(Type::getInt32Ty(getContext()), IterationNum);
5019193323Sed    }
5020193323Sed
5021234353Sdim    // Update all the PHI nodes for the next iteration.
5022234353Sdim    DenseMap<Instruction *, Constant *> NextIterVals;
5023234353Sdim
5024234353Sdim    // Create a list of which PHIs we need to compute. We want to do this before
5025234353Sdim    // calling EvaluateExpression on them because that may invalidate iterators
5026234353Sdim    // into CurrentIterVals.
5027234353Sdim    SmallVector<PHINode *, 8> PHIsToCompute;
5028234353Sdim    for (DenseMap<Instruction *, Constant *>::const_iterator
5029234353Sdim           I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){
5030234353Sdim      PHINode *PHI = dyn_cast<PHINode>(I->first);
5031234353Sdim      if (!PHI || PHI->getParent() != Header) continue;
5032234353Sdim      PHIsToCompute.push_back(PHI);
5033234353Sdim    }
5034234353Sdim    for (SmallVectorImpl<PHINode *>::const_iterator I = PHIsToCompute.begin(),
5035234353Sdim             E = PHIsToCompute.end(); I != E; ++I) {
5036234353Sdim      PHINode *PHI = *I;
5037234353Sdim      Constant *&NextPHI = NextIterVals[PHI];
5038234353Sdim      if (NextPHI) continue;    // Already computed!
5039234353Sdim
5040234353Sdim      Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
5041234353Sdim      NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
5042234353Sdim    }
5043234353Sdim    CurrentIterVals.swap(NextIterVals);
5044193323Sed  }
5045193323Sed
5046193323Sed  // Too many iterations were needed to evaluate.
5047195340Sed  return getCouldNotCompute();
5048193323Sed}
5049193323Sed
5050198090Srdivacky/// getSCEVAtScope - Return a SCEV expression for the specified value
5051193323Sed/// at the specified scope in the program.  The L value specifies a loop
5052193323Sed/// nest to evaluate the expression at, where null is the top-level or a
5053193323Sed/// specified loop is immediately inside of the loop.
5054193323Sed///
5055193323Sed/// This method can be used to compute the exit value for a variable defined
5056193323Sed/// in a loop by querying what the value will hold in the parent loop.
5057193323Sed///
5058193323Sed/// In the case that a relevant loop exit value cannot be computed, the
5059193323Sed/// original value V is returned.
5060198090Srdivackyconst SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
5061198090Srdivacky  // Check to see if we've folded this expression at this loop before.
5062263508Sdim  SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values = ValuesAtScopes[V];
5063263508Sdim  for (unsigned u = 0; u < Values.size(); u++) {
5064263508Sdim    if (Values[u].first == L)
5065263508Sdim      return Values[u].second ? Values[u].second : V;
5066263508Sdim  }
5067263508Sdim  Values.push_back(std::make_pair(L, static_cast<const SCEV *>(0)));
5068198090Srdivacky  // Otherwise compute it.
5069198090Srdivacky  const SCEV *C = computeSCEVAtScope(V, L);
5070263508Sdim  SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values2 = ValuesAtScopes[V];
5071263508Sdim  for (unsigned u = Values2.size(); u > 0; u--) {
5072263508Sdim    if (Values2[u - 1].first == L) {
5073263508Sdim      Values2[u - 1].second = C;
5074263508Sdim      break;
5075263508Sdim    }
5076263508Sdim  }
5077198090Srdivacky  return C;
5078198090Srdivacky}
5079198090Srdivacky
5080234353Sdim/// This builds up a Constant using the ConstantExpr interface.  That way, we
5081234353Sdim/// will return Constants for objects which aren't represented by a
5082234353Sdim/// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
5083234353Sdim/// Returns NULL if the SCEV isn't representable as a Constant.
5084234353Sdimstatic Constant *BuildConstantFromSCEV(const SCEV *V) {
5085234353Sdim  switch (V->getSCEVType()) {
5086234353Sdim    default:  // TODO: smax, umax.
5087234353Sdim    case scCouldNotCompute:
5088234353Sdim    case scAddRecExpr:
5089234353Sdim      break;
5090234353Sdim    case scConstant:
5091234353Sdim      return cast<SCEVConstant>(V)->getValue();
5092234353Sdim    case scUnknown:
5093234353Sdim      return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
5094234353Sdim    case scSignExtend: {
5095234353Sdim      const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V);
5096234353Sdim      if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand()))
5097234353Sdim        return ConstantExpr::getSExt(CastOp, SS->getType());
5098234353Sdim      break;
5099234353Sdim    }
5100234353Sdim    case scZeroExtend: {
5101234353Sdim      const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V);
5102234353Sdim      if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand()))
5103234353Sdim        return ConstantExpr::getZExt(CastOp, SZ->getType());
5104234353Sdim      break;
5105234353Sdim    }
5106234353Sdim    case scTruncate: {
5107234353Sdim      const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V);
5108234353Sdim      if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
5109234353Sdim        return ConstantExpr::getTrunc(CastOp, ST->getType());
5110234353Sdim      break;
5111234353Sdim    }
5112234353Sdim    case scAddExpr: {
5113234353Sdim      const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
5114234353Sdim      if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) {
5115263508Sdim        if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
5116263508Sdim          unsigned AS = PTy->getAddressSpace();
5117263508Sdim          Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
5118263508Sdim          C = ConstantExpr::getBitCast(C, DestPtrTy);
5119263508Sdim        }
5120234353Sdim        for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
5121234353Sdim          Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
5122234353Sdim          if (!C2) return 0;
5123234353Sdim
5124234353Sdim          // First pointer!
5125234353Sdim          if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
5126263508Sdim            unsigned AS = C2->getType()->getPointerAddressSpace();
5127234353Sdim            std::swap(C, C2);
5128263508Sdim            Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
5129234353Sdim            // The offsets have been converted to bytes.  We can add bytes to an
5130234353Sdim            // i8* by GEP with the byte count in the first index.
5131263508Sdim            C = ConstantExpr::getBitCast(C, DestPtrTy);
5132234353Sdim          }
5133234353Sdim
5134234353Sdim          // Don't bother trying to sum two pointers. We probably can't
5135234353Sdim          // statically compute a load that results from it anyway.
5136234353Sdim          if (C2->getType()->isPointerTy())
5137234353Sdim            return 0;
5138234353Sdim
5139263508Sdim          if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
5140263508Sdim            if (PTy->getElementType()->isStructTy())
5141234353Sdim              C2 = ConstantExpr::getIntegerCast(
5142234353Sdim                  C2, Type::getInt32Ty(C->getContext()), true);
5143234353Sdim            C = ConstantExpr::getGetElementPtr(C, C2);
5144234353Sdim          } else
5145234353Sdim            C = ConstantExpr::getAdd(C, C2);
5146234353Sdim        }
5147234353Sdim        return C;
5148234353Sdim      }
5149234353Sdim      break;
5150234353Sdim    }
5151234353Sdim    case scMulExpr: {
5152234353Sdim      const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
5153234353Sdim      if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
5154234353Sdim        // Don't bother with pointers at all.
5155234353Sdim        if (C->getType()->isPointerTy()) return 0;
5156234353Sdim        for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
5157234353Sdim          Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
5158234353Sdim          if (!C2 || C2->getType()->isPointerTy()) return 0;
5159234353Sdim          C = ConstantExpr::getMul(C, C2);
5160234353Sdim        }
5161234353Sdim        return C;
5162234353Sdim      }
5163234353Sdim      break;
5164234353Sdim    }
5165234353Sdim    case scUDivExpr: {
5166234353Sdim      const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
5167234353Sdim      if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
5168234353Sdim        if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
5169234353Sdim          if (LHS->getType() == RHS->getType())
5170234353Sdim            return ConstantExpr::getUDiv(LHS, RHS);
5171234353Sdim      break;
5172234353Sdim    }
5173234353Sdim  }
5174234353Sdim  return 0;
5175234353Sdim}
5176234353Sdim
5177198090Srdivackyconst SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
5178193323Sed  if (isa<SCEVConstant>(V)) return V;
5179193323Sed
5180193323Sed  // If this instruction is evolved from a constant-evolving PHI, compute the
5181193323Sed  // exit value from the loop without using SCEVs.
5182193323Sed  if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
5183193323Sed    if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
5184193323Sed      const Loop *LI = (*this->LI)[I->getParent()];
5185193323Sed      if (LI && LI->getParentLoop() == L)  // Looking for loop exit value.
5186193323Sed        if (PHINode *PN = dyn_cast<PHINode>(I))
5187193323Sed          if (PN->getParent() == LI->getHeader()) {
5188193323Sed            // Okay, there is no closed form solution for the PHI node.  Check
5189193323Sed            // to see if the loop that contains it has a known backedge-taken
5190193323Sed            // count.  If so, we may be able to force computation of the exit
5191193323Sed            // value.
5192198090Srdivacky            const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
5193193323Sed            if (const SCEVConstant *BTCC =
5194193323Sed                  dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
5195193323Sed              // Okay, we know how many times the containing loop executes.  If
5196193323Sed              // this is a constant evolving PHI node, get the final value at
5197193323Sed              // the specified iteration number.
5198193323Sed              Constant *RV = getConstantEvolutionLoopExitValue(PN,
5199193323Sed                                                   BTCC->getValue()->getValue(),
5200193323Sed                                                               LI);
5201195340Sed              if (RV) return getSCEV(RV);
5202193323Sed            }
5203193323Sed          }
5204193323Sed
5205193323Sed      // Okay, this is an expression that we cannot symbolically evaluate
5206193323Sed      // into a SCEV.  Check to see if it's possible to symbolically evaluate
5207193323Sed      // the arguments into constants, and if so, try to constant propagate the
5208193323Sed      // result.  This is particularly useful for computing loop exit values.
5209193323Sed      if (CanConstantFold(I)) {
5210210299Sed        SmallVector<Constant *, 4> Operands;
5211210299Sed        bool MadeImprovement = false;
5212193323Sed        for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
5213193323Sed          Value *Op = I->getOperand(i);
5214193323Sed          if (Constant *C = dyn_cast<Constant>(Op)) {
5215193323Sed            Operands.push_back(C);
5216210299Sed            continue;
5217210299Sed          }
5218193323Sed
5219210299Sed          // If any of the operands is non-constant and if they are
5220210299Sed          // non-integer and non-pointer, don't even try to analyze them
5221210299Sed          // with scev techniques.
5222210299Sed          if (!isSCEVable(Op->getType()))
5223210299Sed            return V;
5224210299Sed
5225210299Sed          const SCEV *OrigV = getSCEV(Op);
5226210299Sed          const SCEV *OpV = getSCEVAtScope(OrigV, L);
5227210299Sed          MadeImprovement |= OrigV != OpV;
5228210299Sed
5229234353Sdim          Constant *C = BuildConstantFromSCEV(OpV);
5230210299Sed          if (!C) return V;
5231210299Sed          if (C->getType() != Op->getType())
5232210299Sed            C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
5233210299Sed                                                              Op->getType(),
5234210299Sed                                                              false),
5235210299Sed                                      C, Op->getType());
5236210299Sed          Operands.push_back(C);
5237193323Sed        }
5238195098Sed
5239210299Sed        // Check to see if getSCEVAtScope actually made an improvement.
5240210299Sed        if (MadeImprovement) {
5241210299Sed          Constant *C = 0;
5242210299Sed          if (const CmpInst *CI = dyn_cast<CmpInst>(I))
5243210299Sed            C = ConstantFoldCompareInstOperands(CI->getPredicate(),
5244234353Sdim                                                Operands[0], Operands[1], TD,
5245234353Sdim                                                TLI);
5246234353Sdim          else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
5247234353Sdim            if (!LI->isVolatile())
5248234353Sdim              C = ConstantFoldLoadFromConstPtr(Operands[0], TD);
5249234353Sdim          } else
5250210299Sed            C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
5251234353Sdim                                         Operands, TD, TLI);
5252210299Sed          if (!C) return V;
5253204642Srdivacky          return getSCEV(C);
5254210299Sed        }
5255193323Sed      }
5256193323Sed    }
5257193323Sed
5258193323Sed    // This is some other type of SCEVUnknown, just return it.
5259193323Sed    return V;
5260193323Sed  }
5261193323Sed
5262193323Sed  if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
5263193323Sed    // Avoid performing the look-up in the common case where the specified
5264193323Sed    // expression has no loop-variant portions.
5265193323Sed    for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
5266198090Srdivacky      const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
5267193323Sed      if (OpAtScope != Comm->getOperand(i)) {
5268193323Sed        // Okay, at least one of these operands is loop variant but might be
5269193323Sed        // foldable.  Build a new instance of the folded commutative expression.
5270195098Sed        SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
5271195098Sed                                            Comm->op_begin()+i);
5272193323Sed        NewOps.push_back(OpAtScope);
5273193323Sed
5274193323Sed        for (++i; i != e; ++i) {
5275193323Sed          OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
5276193323Sed          NewOps.push_back(OpAtScope);
5277193323Sed        }
5278193323Sed        if (isa<SCEVAddExpr>(Comm))
5279193323Sed          return getAddExpr(NewOps);
5280193323Sed        if (isa<SCEVMulExpr>(Comm))
5281193323Sed          return getMulExpr(NewOps);
5282193323Sed        if (isa<SCEVSMaxExpr>(Comm))
5283193323Sed          return getSMaxExpr(NewOps);
5284193323Sed        if (isa<SCEVUMaxExpr>(Comm))
5285193323Sed          return getUMaxExpr(NewOps);
5286198090Srdivacky        llvm_unreachable("Unknown commutative SCEV type!");
5287193323Sed      }
5288193323Sed    }
5289193323Sed    // If we got here, all operands are loop invariant.
5290193323Sed    return Comm;
5291193323Sed  }
5292193323Sed
5293193323Sed  if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
5294198090Srdivacky    const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
5295198090Srdivacky    const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
5296193323Sed    if (LHS == Div->getLHS() && RHS == Div->getRHS())
5297193323Sed      return Div;   // must be loop invariant
5298193323Sed    return getUDivExpr(LHS, RHS);
5299193323Sed  }
5300193323Sed
5301193323Sed  // If this is a loop recurrence for a loop that does not contain L, then we
5302193323Sed  // are dealing with the final value computed by the loop.
5303193323Sed  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
5304210299Sed    // First, attempt to evaluate each operand.
5305210299Sed    // Avoid performing the look-up in the common case where the specified
5306210299Sed    // expression has no loop-variant portions.
5307210299Sed    for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
5308210299Sed      const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
5309210299Sed      if (OpAtScope == AddRec->getOperand(i))
5310210299Sed        continue;
5311210299Sed
5312210299Sed      // Okay, at least one of these operands is loop variant but might be
5313210299Sed      // foldable.  Build a new instance of the folded commutative expression.
5314210299Sed      SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
5315210299Sed                                          AddRec->op_begin()+i);
5316210299Sed      NewOps.push_back(OpAtScope);
5317210299Sed      for (++i; i != e; ++i)
5318210299Sed        NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
5319210299Sed
5320221345Sdim      const SCEV *FoldedRec =
5321221345Sdim        getAddRecExpr(NewOps, AddRec->getLoop(),
5322221345Sdim                      AddRec->getNoWrapFlags(SCEV::FlagNW));
5323221345Sdim      AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
5324221345Sdim      // The addrec may be folded to a nonrecurrence, for example, if the
5325221345Sdim      // induction variable is multiplied by zero after constant folding. Go
5326221345Sdim      // ahead and return the folded value.
5327221345Sdim      if (!AddRec)
5328221345Sdim        return FoldedRec;
5329210299Sed      break;
5330210299Sed    }
5331210299Sed
5332210299Sed    // If the scope is outside the addrec's loop, evaluate it by using the
5333210299Sed    // loop exit value of the addrec.
5334210299Sed    if (!AddRec->getLoop()->contains(L)) {
5335193323Sed      // To evaluate this recurrence, we need to know how many times the AddRec
5336193323Sed      // loop iterates.  Compute this now.
5337198090Srdivacky      const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
5338195340Sed      if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
5339193323Sed
5340193323Sed      // Then, evaluate the AddRec.
5341193323Sed      return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
5342193323Sed    }
5343210299Sed
5344193323Sed    return AddRec;
5345193323Sed  }
5346193323Sed
5347193323Sed  if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
5348198090Srdivacky    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
5349193323Sed    if (Op == Cast->getOperand())
5350193323Sed      return Cast;  // must be loop invariant
5351193323Sed    return getZeroExtendExpr(Op, Cast->getType());
5352193323Sed  }
5353193323Sed
5354193323Sed  if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
5355198090Srdivacky    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
5356193323Sed    if (Op == Cast->getOperand())
5357193323Sed      return Cast;  // must be loop invariant
5358193323Sed    return getSignExtendExpr(Op, Cast->getType());
5359193323Sed  }
5360193323Sed
5361193323Sed  if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
5362198090Srdivacky    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
5363193323Sed    if (Op == Cast->getOperand())
5364193323Sed      return Cast;  // must be loop invariant
5365193323Sed    return getTruncateExpr(Op, Cast->getType());
5366193323Sed  }
5367193323Sed
5368198090Srdivacky  llvm_unreachable("Unknown SCEV type!");
5369193323Sed}
5370193323Sed
5371193323Sed/// getSCEVAtScope - This is a convenience function which does
5372193323Sed/// getSCEVAtScope(getSCEV(V), L).
5373198090Srdivackyconst SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
5374193323Sed  return getSCEVAtScope(getSCEV(V), L);
5375193323Sed}
5376193323Sed
5377193323Sed/// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
5378193323Sed/// following equation:
5379193323Sed///
5380193323Sed///     A * X = B (mod N)
5381193323Sed///
5382193323Sed/// where N = 2^BW and BW is the common bit width of A and B. The signedness of
5383193323Sed/// A and B isn't important.
5384193323Sed///
5385193323Sed/// If the equation does not have a solution, SCEVCouldNotCompute is returned.
5386198090Srdivackystatic const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
5387193323Sed                                               ScalarEvolution &SE) {
5388193323Sed  uint32_t BW = A.getBitWidth();
5389193323Sed  assert(BW == B.getBitWidth() && "Bit widths must be the same.");
5390193323Sed  assert(A != 0 && "A must be non-zero.");
5391193323Sed
5392193323Sed  // 1. D = gcd(A, N)
5393193323Sed  //
5394193323Sed  // The gcd of A and N may have only one prime factor: 2. The number of
5395193323Sed  // trailing zeros in A is its multiplicity
5396193323Sed  uint32_t Mult2 = A.countTrailingZeros();
5397193323Sed  // D = 2^Mult2
5398193323Sed
5399193323Sed  // 2. Check if B is divisible by D.
5400193323Sed  //
5401193323Sed  // B is divisible by D if and only if the multiplicity of prime factor 2 for B
5402193323Sed  // is not less than multiplicity of this prime factor for D.
5403193323Sed  if (B.countTrailingZeros() < Mult2)
5404193323Sed    return SE.getCouldNotCompute();
5405193323Sed
5406193323Sed  // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
5407193323Sed  // modulo (N / D).
5408193323Sed  //
5409193323Sed  // (N / D) may need BW+1 bits in its representation.  Hence, we'll use this
5410193323Sed  // bit width during computations.
5411193323Sed  APInt AD = A.lshr(Mult2).zext(BW + 1);  // AD = A / D
5412193323Sed  APInt Mod(BW + 1, 0);
5413218893Sdim  Mod.setBit(BW - Mult2);  // Mod = N / D
5414193323Sed  APInt I = AD.multiplicativeInverse(Mod);
5415193323Sed
5416193323Sed  // 4. Compute the minimum unsigned root of the equation:
5417193323Sed  // I * (B / D) mod (N / D)
5418193323Sed  APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
5419193323Sed
5420193323Sed  // The result is guaranteed to be less than 2^BW so we may truncate it to BW
5421193323Sed  // bits.
5422193323Sed  return SE.getConstant(Result.trunc(BW));
5423193323Sed}
5424193323Sed
5425193323Sed/// SolveQuadraticEquation - Find the roots of the quadratic equation for the
5426193323Sed/// given quadratic chrec {L,+,M,+,N}.  This returns either the two roots (which
5427193323Sed/// might be the same) or two SCEVCouldNotCompute objects.
5428193323Sed///
5429198090Srdivackystatic std::pair<const SCEV *,const SCEV *>
5430193323SedSolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
5431193323Sed  assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
5432193323Sed  const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
5433193323Sed  const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
5434193323Sed  const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
5435193323Sed
5436193323Sed  // We currently can only solve this if the coefficients are constants.
5437193323Sed  if (!LC || !MC || !NC) {
5438193323Sed    const SCEV *CNC = SE.getCouldNotCompute();
5439193323Sed    return std::make_pair(CNC, CNC);
5440193323Sed  }
5441193323Sed
5442193323Sed  uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
5443193323Sed  const APInt &L = LC->getValue()->getValue();
5444193323Sed  const APInt &M = MC->getValue()->getValue();
5445193323Sed  const APInt &N = NC->getValue()->getValue();
5446193323Sed  APInt Two(BitWidth, 2);
5447193323Sed  APInt Four(BitWidth, 4);
5448193323Sed
5449195098Sed  {
5450193323Sed    using namespace APIntOps;
5451193323Sed    const APInt& C = L;
5452193323Sed    // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
5453193323Sed    // The B coefficient is M-N/2
5454193323Sed    APInt B(M);
5455193323Sed    B -= sdiv(N,Two);
5456193323Sed
5457193323Sed    // The A coefficient is N/2
5458193323Sed    APInt A(N.sdiv(Two));
5459193323Sed
5460193323Sed    // Compute the B^2-4ac term.
5461193323Sed    APInt SqrtTerm(B);
5462193323Sed    SqrtTerm *= B;
5463193323Sed    SqrtTerm -= Four * (A * C);
5464193323Sed
5465239462Sdim    if (SqrtTerm.isNegative()) {
5466239462Sdim      // The loop is provably infinite.
5467239462Sdim      const SCEV *CNC = SE.getCouldNotCompute();
5468239462Sdim      return std::make_pair(CNC, CNC);
5469239462Sdim    }
5470239462Sdim
5471193323Sed    // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
5472193323Sed    // integer value or else APInt::sqrt() will assert.
5473193323Sed    APInt SqrtVal(SqrtTerm.sqrt());
5474193323Sed
5475195098Sed    // Compute the two solutions for the quadratic formula.
5476193323Sed    // The divisions must be performed as signed divisions.
5477193323Sed    APInt NegB(-B);
5478226633Sdim    APInt TwoA(A << 1);
5479193323Sed    if (TwoA.isMinValue()) {
5480193323Sed      const SCEV *CNC = SE.getCouldNotCompute();
5481193323Sed      return std::make_pair(CNC, CNC);
5482193323Sed    }
5483193323Sed
5484198090Srdivacky    LLVMContext &Context = SE.getContext();
5485193323Sed
5486198090Srdivacky    ConstantInt *Solution1 =
5487198090Srdivacky      ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
5488198090Srdivacky    ConstantInt *Solution2 =
5489198090Srdivacky      ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
5490198090Srdivacky
5491195098Sed    return std::make_pair(SE.getConstant(Solution1),
5492193323Sed                          SE.getConstant(Solution2));
5493226633Sdim  } // end APIntOps namespace
5494193323Sed}
5495193323Sed
5496193323Sed/// HowFarToZero - Return the number of times a backedge comparing the specified
5497193630Sed/// value to zero will execute.  If not computable, return CouldNotCompute.
5498221345Sdim///
5499221345Sdim/// This is only used for loops with a "x != y" exit test. The exit condition is
5500221345Sdim/// now expressed as a single expression, V = x-y. So the exit test is
5501221345Sdim/// effectively V != 0.  We know and take advantage of the fact that this
5502221345Sdim/// expression only being used in a comparison by zero context.
5503226633SdimScalarEvolution::ExitLimit
5504251662SdimScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L, bool IsSubExpr) {
5505193323Sed  // If the value is a constant
5506193323Sed  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
5507193323Sed    // If the value is already zero, the branch will execute zero times.
5508193323Sed    if (C->getValue()->isZero()) return C;
5509195340Sed    return getCouldNotCompute();  // Otherwise it will loop infinitely.
5510193323Sed  }
5511193323Sed
5512193323Sed  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
5513193323Sed  if (!AddRec || AddRec->getLoop() != L)
5514195340Sed    return getCouldNotCompute();
5515193323Sed
5516218893Sdim  // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
5517218893Sdim  // the quadratic equation to solve it.
5518218893Sdim  if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
5519218893Sdim    std::pair<const SCEV *,const SCEV *> Roots =
5520218893Sdim      SolveQuadraticEquation(AddRec, *this);
5521193323Sed    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
5522193323Sed    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
5523218893Sdim    if (R1 && R2) {
5524193323Sed#if 0
5525201360Srdivacky      dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
5526193323Sed             << "  sol#2: " << *R2 << "\n";
5527193323Sed#endif
5528193323Sed      // Pick the smallest positive root value.
5529193323Sed      if (ConstantInt *CB =
5530218893Sdim          dyn_cast<ConstantInt>(ConstantExpr::getICmp(CmpInst::ICMP_ULT,
5531218893Sdim                                                      R1->getValue(),
5532218893Sdim                                                      R2->getValue()))) {
5533193323Sed        if (CB->getZExtValue() == false)
5534193323Sed          std::swap(R1, R2);   // R1 is the minimum root now.
5535221345Sdim
5536193323Sed        // We can only use this value if the chrec ends up with an exact zero
5537193323Sed        // value at this index.  When solving for "X*X != 5", for example, we
5538193323Sed        // should not accept a root of 2.
5539198090Srdivacky        const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
5540193323Sed        if (Val->isZero())
5541193323Sed          return R1;  // We found a quadratic root!
5542193323Sed      }
5543193323Sed    }
5544218893Sdim    return getCouldNotCompute();
5545193323Sed  }
5546193323Sed
5547218893Sdim  // Otherwise we can only handle this if it is affine.
5548218893Sdim  if (!AddRec->isAffine())
5549218893Sdim    return getCouldNotCompute();
5550218893Sdim
5551218893Sdim  // If this is an affine expression, the execution count of this branch is
5552218893Sdim  // the minimum unsigned root of the following equation:
5553218893Sdim  //
5554218893Sdim  //     Start + Step*N = 0 (mod 2^BW)
5555218893Sdim  //
5556218893Sdim  // equivalent to:
5557218893Sdim  //
5558218893Sdim  //             Step*N = -Start (mod 2^BW)
5559218893Sdim  //
5560218893Sdim  // where BW is the common bit width of Start and Step.
5561218893Sdim
5562218893Sdim  // Get the initial value for the loop.
5563218893Sdim  const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
5564218893Sdim  const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
5565218893Sdim
5566218893Sdim  // For now we handle only constant steps.
5567221345Sdim  //
5568221345Sdim  // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
5569221345Sdim  // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
5570221345Sdim  // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
5571221345Sdim  // We have not yet seen any such cases.
5572218893Sdim  const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
5573239462Sdim  if (StepC == 0 || StepC->getValue()->equalsInt(0))
5574218893Sdim    return getCouldNotCompute();
5575218893Sdim
5576221345Sdim  // For positive steps (counting up until unsigned overflow):
5577221345Sdim  //   N = -Start/Step (as unsigned)
5578221345Sdim  // For negative steps (counting down to zero):
5579221345Sdim  //   N = Start/-Step
5580221345Sdim  // First compute the unsigned distance from zero in the direction of Step.
5581221345Sdim  bool CountDown = StepC->getValue()->getValue().isNegative();
5582221345Sdim  const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
5583218893Sdim
5584221345Sdim  // Handle unitary steps, which cannot wraparound.
5585221345Sdim  // 1*N = -Start; -1*N = Start (mod 2^BW), so:
5586221345Sdim  //   N = Distance (as unsigned)
5587226633Sdim  if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue()) {
5588226633Sdim    ConstantRange CR = getUnsignedRange(Start);
5589226633Sdim    const SCEV *MaxBECount;
5590226633Sdim    if (!CountDown && CR.getUnsignedMin().isMinValue())
5591226633Sdim      // When counting up, the worst starting value is 1, not 0.
5592226633Sdim      MaxBECount = CR.getUnsignedMax().isMinValue()
5593226633Sdim        ? getConstant(APInt::getMinValue(CR.getBitWidth()))
5594226633Sdim        : getConstant(APInt::getMaxValue(CR.getBitWidth()));
5595226633Sdim    else
5596226633Sdim      MaxBECount = getConstant(CountDown ? CR.getUnsignedMax()
5597226633Sdim                                         : -CR.getUnsignedMin());
5598226633Sdim    return ExitLimit(Distance, MaxBECount);
5599226633Sdim  }
5600221345Sdim
5601221345Sdim  // If the recurrence is known not to wraparound, unsigned divide computes the
5602251662Sdim  // back edge count. (Ideally we would have an "isexact" bit for udiv). We know
5603251662Sdim  // that the value will either become zero (and thus the loop terminates), that
5604251662Sdim  // the loop will terminate through some other exit condition first, or that
5605251662Sdim  // the loop has undefined behavior.  This means we can't "miss" the exit
5606251662Sdim  // value, even with nonunit stride.
5607221345Sdim  //
5608251662Sdim  // This is only valid for expressions that directly compute the loop exit. It
5609251662Sdim  // is invalid for subexpressions in which the loop may exit through this
5610251662Sdim  // branch even if this subexpression is false. In that case, the trip count
5611251662Sdim  // computed by this udiv could be smaller than the number of well-defined
5612251662Sdim  // iterations.
5613251662Sdim  if (!IsSubExpr && AddRec->getNoWrapFlags(SCEV::FlagNW))
5614221345Sdim    return getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
5615251662Sdim
5616218893Sdim  // Then, try to solve the above equation provided that Start is constant.
5617218893Sdim  if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
5618218893Sdim    return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
5619218893Sdim                                        -StartC->getValue()->getValue(),
5620218893Sdim                                        *this);
5621195340Sed  return getCouldNotCompute();
5622193323Sed}
5623193323Sed
5624193323Sed/// HowFarToNonZero - Return the number of times a backedge checking the
5625193323Sed/// specified value for nonzero will execute.  If not computable, return
5626193630Sed/// CouldNotCompute
5627226633SdimScalarEvolution::ExitLimit
5628204642SrdivackyScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
5629193323Sed  // Loops that look like: while (X == 0) are very strange indeed.  We don't
5630193323Sed  // handle them yet except for the trivial case.  This could be expanded in the
5631193323Sed  // future as needed.
5632193323Sed
5633193323Sed  // If the value is a constant, check to see if it is known to be non-zero
5634193323Sed  // already.  If so, the backedge will execute zero times.
5635193323Sed  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
5636193323Sed    if (!C->getValue()->isNullValue())
5637207618Srdivacky      return getConstant(C->getType(), 0);
5638195340Sed    return getCouldNotCompute();  // Otherwise it will loop infinitely.
5639193323Sed  }
5640193323Sed
5641193323Sed  // We could implement others, but I really doubt anyone writes loops like
5642193323Sed  // this, and if they did, they would already be constant folded.
5643195340Sed  return getCouldNotCompute();
5644193323Sed}
5645193323Sed
5646193323Sed/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
5647193323Sed/// (which may not be an immediate predecessor) which has exactly one
5648193323Sed/// successor from which BB is reachable, or null if no such block is
5649193323Sed/// found.
5650193323Sed///
5651207618Srdivackystd::pair<BasicBlock *, BasicBlock *>
5652193323SedScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
5653193323Sed  // If the block has a unique predecessor, then there is no path from the
5654193323Sed  // predecessor to the block that does not go through the direct edge
5655193323Sed  // from the predecessor to the block.
5656193323Sed  if (BasicBlock *Pred = BB->getSinglePredecessor())
5657207618Srdivacky    return std::make_pair(Pred, BB);
5658193323Sed
5659193323Sed  // A loop's header is defined to be a block that dominates the loop.
5660193323Sed  // If the header has a unique predecessor outside the loop, it must be
5661193323Sed  // a block that has exactly one successor that can reach the loop.
5662193323Sed  if (Loop *L = LI->getLoopFor(BB))
5663210299Sed    return std::make_pair(L->getLoopPredecessor(), L->getHeader());
5664193323Sed
5665207618Srdivacky  return std::pair<BasicBlock *, BasicBlock *>();
5666193323Sed}
5667193323Sed
5668194612Sed/// HasSameValue - SCEV structural equivalence is usually sufficient for
5669194612Sed/// testing whether two expressions are equal, however for the purposes of
5670194612Sed/// looking for a condition guarding a loop, it can be useful to be a little
5671194612Sed/// more general, since a front-end may have replicated the controlling
5672194612Sed/// expression.
5673194612Sed///
5674198090Srdivackystatic bool HasSameValue(const SCEV *A, const SCEV *B) {
5675194612Sed  // Quick check to see if they are the same SCEV.
5676194612Sed  if (A == B) return true;
5677194612Sed
5678194612Sed  // Otherwise, if they're both SCEVUnknown, it's possible that they hold
5679194612Sed  // two different instructions with the same value. Check for this case.
5680194612Sed  if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
5681194612Sed    if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
5682194612Sed      if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
5683194612Sed        if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
5684198090Srdivacky          if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
5685194612Sed            return true;
5686194612Sed
5687194612Sed  // Otherwise assume they may have a different value.
5688194612Sed  return false;
5689194612Sed}
5690194612Sed
5691207618Srdivacky/// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
5692207618Srdivacky/// predicate Pred. Return true iff any changes were made.
5693207618Srdivacky///
5694207618Srdivackybool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
5695239462Sdim                                           const SCEV *&LHS, const SCEV *&RHS,
5696239462Sdim                                           unsigned Depth) {
5697207618Srdivacky  bool Changed = false;
5698207618Srdivacky
5699239462Sdim  // If we hit the max recursion limit bail out.
5700239462Sdim  if (Depth >= 3)
5701239462Sdim    return false;
5702239462Sdim
5703207618Srdivacky  // Canonicalize a constant to the right side.
5704207618Srdivacky  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
5705207618Srdivacky    // Check for both operands constant.
5706207618Srdivacky    if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
5707207618Srdivacky      if (ConstantExpr::getICmp(Pred,
5708207618Srdivacky                                LHSC->getValue(),
5709207618Srdivacky                                RHSC->getValue())->isNullValue())
5710207618Srdivacky        goto trivially_false;
5711207618Srdivacky      else
5712207618Srdivacky        goto trivially_true;
5713207618Srdivacky    }
5714207618Srdivacky    // Otherwise swap the operands to put the constant on the right.
5715207618Srdivacky    std::swap(LHS, RHS);
5716207618Srdivacky    Pred = ICmpInst::getSwappedPredicate(Pred);
5717207618Srdivacky    Changed = true;
5718207618Srdivacky  }
5719207618Srdivacky
5720207618Srdivacky  // If we're comparing an addrec with a value which is loop-invariant in the
5721207618Srdivacky  // addrec's loop, put the addrec on the left. Also make a dominance check,
5722207618Srdivacky  // as both operands could be addrecs loop-invariant in each other's loop.
5723207618Srdivacky  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
5724207618Srdivacky    const Loop *L = AR->getLoop();
5725218893Sdim    if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
5726207618Srdivacky      std::swap(LHS, RHS);
5727207618Srdivacky      Pred = ICmpInst::getSwappedPredicate(Pred);
5728207618Srdivacky      Changed = true;
5729207618Srdivacky    }
5730207618Srdivacky  }
5731207618Srdivacky
5732207618Srdivacky  // If there's a constant operand, canonicalize comparisons with boundary
5733207618Srdivacky  // cases, and canonicalize *-or-equal comparisons to regular comparisons.
5734207618Srdivacky  if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
5735207618Srdivacky    const APInt &RA = RC->getValue()->getValue();
5736207618Srdivacky    switch (Pred) {
5737207618Srdivacky    default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
5738207618Srdivacky    case ICmpInst::ICMP_EQ:
5739207618Srdivacky    case ICmpInst::ICMP_NE:
5740239462Sdim      // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
5741239462Sdim      if (!RA)
5742239462Sdim        if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS))
5743239462Sdim          if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(AE->getOperand(0)))
5744239462Sdim            if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 &&
5745239462Sdim                ME->getOperand(0)->isAllOnesValue()) {
5746239462Sdim              RHS = AE->getOperand(1);
5747239462Sdim              LHS = ME->getOperand(1);
5748239462Sdim              Changed = true;
5749239462Sdim            }
5750207618Srdivacky      break;
5751207618Srdivacky    case ICmpInst::ICMP_UGE:
5752207618Srdivacky      if ((RA - 1).isMinValue()) {
5753207618Srdivacky        Pred = ICmpInst::ICMP_NE;
5754207618Srdivacky        RHS = getConstant(RA - 1);
5755207618Srdivacky        Changed = true;
5756207618Srdivacky        break;
5757207618Srdivacky      }
5758207618Srdivacky      if (RA.isMaxValue()) {
5759207618Srdivacky        Pred = ICmpInst::ICMP_EQ;
5760207618Srdivacky        Changed = true;
5761207618Srdivacky        break;
5762207618Srdivacky      }
5763207618Srdivacky      if (RA.isMinValue()) goto trivially_true;
5764207618Srdivacky
5765207618Srdivacky      Pred = ICmpInst::ICMP_UGT;
5766207618Srdivacky      RHS = getConstant(RA - 1);
5767207618Srdivacky      Changed = true;
5768207618Srdivacky      break;
5769207618Srdivacky    case ICmpInst::ICMP_ULE:
5770207618Srdivacky      if ((RA + 1).isMaxValue()) {
5771207618Srdivacky        Pred = ICmpInst::ICMP_NE;
5772207618Srdivacky        RHS = getConstant(RA + 1);
5773207618Srdivacky        Changed = true;
5774207618Srdivacky        break;
5775207618Srdivacky      }
5776207618Srdivacky      if (RA.isMinValue()) {
5777207618Srdivacky        Pred = ICmpInst::ICMP_EQ;
5778207618Srdivacky        Changed = true;
5779207618Srdivacky        break;
5780207618Srdivacky      }
5781207618Srdivacky      if (RA.isMaxValue()) goto trivially_true;
5782207618Srdivacky
5783207618Srdivacky      Pred = ICmpInst::ICMP_ULT;
5784207618Srdivacky      RHS = getConstant(RA + 1);
5785207618Srdivacky      Changed = true;
5786207618Srdivacky      break;
5787207618Srdivacky    case ICmpInst::ICMP_SGE:
5788207618Srdivacky      if ((RA - 1).isMinSignedValue()) {
5789207618Srdivacky        Pred = ICmpInst::ICMP_NE;
5790207618Srdivacky        RHS = getConstant(RA - 1);
5791207618Srdivacky        Changed = true;
5792207618Srdivacky        break;
5793207618Srdivacky      }
5794207618Srdivacky      if (RA.isMaxSignedValue()) {
5795207618Srdivacky        Pred = ICmpInst::ICMP_EQ;
5796207618Srdivacky        Changed = true;
5797207618Srdivacky        break;
5798207618Srdivacky      }
5799207618Srdivacky      if (RA.isMinSignedValue()) goto trivially_true;
5800207618Srdivacky
5801207618Srdivacky      Pred = ICmpInst::ICMP_SGT;
5802207618Srdivacky      RHS = getConstant(RA - 1);
5803207618Srdivacky      Changed = true;
5804207618Srdivacky      break;
5805207618Srdivacky    case ICmpInst::ICMP_SLE:
5806207618Srdivacky      if ((RA + 1).isMaxSignedValue()) {
5807207618Srdivacky        Pred = ICmpInst::ICMP_NE;
5808207618Srdivacky        RHS = getConstant(RA + 1);
5809207618Srdivacky        Changed = true;
5810207618Srdivacky        break;
5811207618Srdivacky      }
5812207618Srdivacky      if (RA.isMinSignedValue()) {
5813207618Srdivacky        Pred = ICmpInst::ICMP_EQ;
5814207618Srdivacky        Changed = true;
5815207618Srdivacky        break;
5816207618Srdivacky      }
5817207618Srdivacky      if (RA.isMaxSignedValue()) goto trivially_true;
5818207618Srdivacky
5819207618Srdivacky      Pred = ICmpInst::ICMP_SLT;
5820207618Srdivacky      RHS = getConstant(RA + 1);
5821207618Srdivacky      Changed = true;
5822207618Srdivacky      break;
5823207618Srdivacky    case ICmpInst::ICMP_UGT:
5824207618Srdivacky      if (RA.isMinValue()) {
5825207618Srdivacky        Pred = ICmpInst::ICMP_NE;
5826207618Srdivacky        Changed = true;
5827207618Srdivacky        break;
5828207618Srdivacky      }
5829207618Srdivacky      if ((RA + 1).isMaxValue()) {
5830207618Srdivacky        Pred = ICmpInst::ICMP_EQ;
5831207618Srdivacky        RHS = getConstant(RA + 1);
5832207618Srdivacky        Changed = true;
5833207618Srdivacky        break;
5834207618Srdivacky      }
5835207618Srdivacky      if (RA.isMaxValue()) goto trivially_false;
5836207618Srdivacky      break;
5837207618Srdivacky    case ICmpInst::ICMP_ULT:
5838207618Srdivacky      if (RA.isMaxValue()) {
5839207618Srdivacky        Pred = ICmpInst::ICMP_NE;
5840207618Srdivacky        Changed = true;
5841207618Srdivacky        break;
5842207618Srdivacky      }
5843207618Srdivacky      if ((RA - 1).isMinValue()) {
5844207618Srdivacky        Pred = ICmpInst::ICMP_EQ;
5845207618Srdivacky        RHS = getConstant(RA - 1);
5846207618Srdivacky        Changed = true;
5847207618Srdivacky        break;
5848207618Srdivacky      }
5849207618Srdivacky      if (RA.isMinValue()) goto trivially_false;
5850207618Srdivacky      break;
5851207618Srdivacky    case ICmpInst::ICMP_SGT:
5852207618Srdivacky      if (RA.isMinSignedValue()) {
5853207618Srdivacky        Pred = ICmpInst::ICMP_NE;
5854207618Srdivacky        Changed = true;
5855207618Srdivacky        break;
5856207618Srdivacky      }
5857207618Srdivacky      if ((RA + 1).isMaxSignedValue()) {
5858207618Srdivacky        Pred = ICmpInst::ICMP_EQ;
5859207618Srdivacky        RHS = getConstant(RA + 1);
5860207618Srdivacky        Changed = true;
5861207618Srdivacky        break;
5862207618Srdivacky      }
5863207618Srdivacky      if (RA.isMaxSignedValue()) goto trivially_false;
5864207618Srdivacky      break;
5865207618Srdivacky    case ICmpInst::ICMP_SLT:
5866207618Srdivacky      if (RA.isMaxSignedValue()) {
5867207618Srdivacky        Pred = ICmpInst::ICMP_NE;
5868207618Srdivacky        Changed = true;
5869207618Srdivacky        break;
5870207618Srdivacky      }
5871207618Srdivacky      if ((RA - 1).isMinSignedValue()) {
5872207618Srdivacky       Pred = ICmpInst::ICMP_EQ;
5873207618Srdivacky       RHS = getConstant(RA - 1);
5874207618Srdivacky        Changed = true;
5875207618Srdivacky       break;
5876207618Srdivacky      }
5877207618Srdivacky      if (RA.isMinSignedValue()) goto trivially_false;
5878207618Srdivacky      break;
5879207618Srdivacky    }
5880207618Srdivacky  }
5881207618Srdivacky
5882207618Srdivacky  // Check for obvious equality.
5883207618Srdivacky  if (HasSameValue(LHS, RHS)) {
5884207618Srdivacky    if (ICmpInst::isTrueWhenEqual(Pred))
5885207618Srdivacky      goto trivially_true;
5886207618Srdivacky    if (ICmpInst::isFalseWhenEqual(Pred))
5887207618Srdivacky      goto trivially_false;
5888207618Srdivacky  }
5889207618Srdivacky
5890207618Srdivacky  // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
5891207618Srdivacky  // adding or subtracting 1 from one of the operands.
5892207618Srdivacky  switch (Pred) {
5893207618Srdivacky  case ICmpInst::ICMP_SLE:
5894207618Srdivacky    if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
5895207618Srdivacky      RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
5896221345Sdim                       SCEV::FlagNSW);
5897207618Srdivacky      Pred = ICmpInst::ICMP_SLT;
5898207618Srdivacky      Changed = true;
5899207618Srdivacky    } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
5900207618Srdivacky      LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
5901221345Sdim                       SCEV::FlagNSW);
5902207618Srdivacky      Pred = ICmpInst::ICMP_SLT;
5903207618Srdivacky      Changed = true;
5904207618Srdivacky    }
5905207618Srdivacky    break;
5906207618Srdivacky  case ICmpInst::ICMP_SGE:
5907207618Srdivacky    if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
5908207618Srdivacky      RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
5909221345Sdim                       SCEV::FlagNSW);
5910207618Srdivacky      Pred = ICmpInst::ICMP_SGT;
5911207618Srdivacky      Changed = true;
5912207618Srdivacky    } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
5913207618Srdivacky      LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
5914221345Sdim                       SCEV::FlagNSW);
5915207618Srdivacky      Pred = ICmpInst::ICMP_SGT;
5916207618Srdivacky      Changed = true;
5917207618Srdivacky    }
5918207618Srdivacky    break;
5919207618Srdivacky  case ICmpInst::ICMP_ULE:
5920207618Srdivacky    if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
5921207618Srdivacky      RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
5922221345Sdim                       SCEV::FlagNUW);
5923207618Srdivacky      Pred = ICmpInst::ICMP_ULT;
5924207618Srdivacky      Changed = true;
5925207618Srdivacky    } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
5926207618Srdivacky      LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
5927221345Sdim                       SCEV::FlagNUW);
5928207618Srdivacky      Pred = ICmpInst::ICMP_ULT;
5929207618Srdivacky      Changed = true;
5930207618Srdivacky    }
5931207618Srdivacky    break;
5932207618Srdivacky  case ICmpInst::ICMP_UGE:
5933207618Srdivacky    if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
5934207618Srdivacky      RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
5935221345Sdim                       SCEV::FlagNUW);
5936207618Srdivacky      Pred = ICmpInst::ICMP_UGT;
5937207618Srdivacky      Changed = true;
5938207618Srdivacky    } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
5939207618Srdivacky      LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
5940221345Sdim                       SCEV::FlagNUW);
5941207618Srdivacky      Pred = ICmpInst::ICMP_UGT;
5942207618Srdivacky      Changed = true;
5943207618Srdivacky    }
5944207618Srdivacky    break;
5945207618Srdivacky  default:
5946207618Srdivacky    break;
5947207618Srdivacky  }
5948207618Srdivacky
5949207618Srdivacky  // TODO: More simplifications are possible here.
5950207618Srdivacky
5951239462Sdim  // Recursively simplify until we either hit a recursion limit or nothing
5952239462Sdim  // changes.
5953239462Sdim  if (Changed)
5954239462Sdim    return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1);
5955239462Sdim
5956207618Srdivacky  return Changed;
5957207618Srdivacky
5958207618Srdivackytrivially_true:
5959207618Srdivacky  // Return 0 == 0.
5960218893Sdim  LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
5961207618Srdivacky  Pred = ICmpInst::ICMP_EQ;
5962207618Srdivacky  return true;
5963207618Srdivacky
5964207618Srdivackytrivially_false:
5965207618Srdivacky  // Return 0 != 0.
5966218893Sdim  LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
5967207618Srdivacky  Pred = ICmpInst::ICMP_NE;
5968207618Srdivacky  return true;
5969207618Srdivacky}
5970207618Srdivacky
5971198090Srdivackybool ScalarEvolution::isKnownNegative(const SCEV *S) {
5972198090Srdivacky  return getSignedRange(S).getSignedMax().isNegative();
5973198090Srdivacky}
5974198090Srdivacky
5975198090Srdivackybool ScalarEvolution::isKnownPositive(const SCEV *S) {
5976198090Srdivacky  return getSignedRange(S).getSignedMin().isStrictlyPositive();
5977198090Srdivacky}
5978198090Srdivacky
5979198090Srdivackybool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
5980198090Srdivacky  return !getSignedRange(S).getSignedMin().isNegative();
5981198090Srdivacky}
5982198090Srdivacky
5983198090Srdivackybool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
5984198090Srdivacky  return !getSignedRange(S).getSignedMax().isStrictlyPositive();
5985198090Srdivacky}
5986198090Srdivacky
5987198090Srdivackybool ScalarEvolution::isKnownNonZero(const SCEV *S) {
5988198090Srdivacky  return isKnownNegative(S) || isKnownPositive(S);
5989198090Srdivacky}
5990198090Srdivacky
5991198090Srdivackybool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
5992198090Srdivacky                                       const SCEV *LHS, const SCEV *RHS) {
5993207618Srdivacky  // Canonicalize the inputs first.
5994207618Srdivacky  (void)SimplifyICmpOperands(Pred, LHS, RHS);
5995198090Srdivacky
5996207618Srdivacky  // If LHS or RHS is an addrec, check to see if the condition is true in
5997207618Srdivacky  // every iteration of the loop.
5998207618Srdivacky  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
5999207618Srdivacky    if (isLoopEntryGuardedByCond(
6000207618Srdivacky          AR->getLoop(), Pred, AR->getStart(), RHS) &&
6001207618Srdivacky        isLoopBackedgeGuardedByCond(
6002207618Srdivacky          AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS))
6003207618Srdivacky      return true;
6004207618Srdivacky  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS))
6005207618Srdivacky    if (isLoopEntryGuardedByCond(
6006207618Srdivacky          AR->getLoop(), Pred, LHS, AR->getStart()) &&
6007207618Srdivacky        isLoopBackedgeGuardedByCond(
6008207618Srdivacky          AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this)))
6009207618Srdivacky      return true;
6010207618Srdivacky
6011207618Srdivacky  // Otherwise see what can be done with known constant ranges.
6012207618Srdivacky  return isKnownPredicateWithRanges(Pred, LHS, RHS);
6013207618Srdivacky}
6014207618Srdivacky
6015207618Srdivackybool
6016207618SrdivackyScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
6017207618Srdivacky                                            const SCEV *LHS, const SCEV *RHS) {
6018198090Srdivacky  if (HasSameValue(LHS, RHS))
6019198090Srdivacky    return ICmpInst::isTrueWhenEqual(Pred);
6020198090Srdivacky
6021207618Srdivacky  // This code is split out from isKnownPredicate because it is called from
6022207618Srdivacky  // within isLoopEntryGuardedByCond.
6023198090Srdivacky  switch (Pred) {
6024198090Srdivacky  default:
6025198090Srdivacky    llvm_unreachable("Unexpected ICmpInst::Predicate value!");
6026198090Srdivacky  case ICmpInst::ICMP_SGT:
6027198090Srdivacky    Pred = ICmpInst::ICMP_SLT;
6028198090Srdivacky    std::swap(LHS, RHS);
6029198090Srdivacky  case ICmpInst::ICMP_SLT: {
6030198090Srdivacky    ConstantRange LHSRange = getSignedRange(LHS);
6031198090Srdivacky    ConstantRange RHSRange = getSignedRange(RHS);
6032198090Srdivacky    if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
6033198090Srdivacky      return true;
6034198090Srdivacky    if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
6035198090Srdivacky      return false;
6036198090Srdivacky    break;
6037198090Srdivacky  }
6038198090Srdivacky  case ICmpInst::ICMP_SGE:
6039198090Srdivacky    Pred = ICmpInst::ICMP_SLE;
6040198090Srdivacky    std::swap(LHS, RHS);
6041198090Srdivacky  case ICmpInst::ICMP_SLE: {
6042198090Srdivacky    ConstantRange LHSRange = getSignedRange(LHS);
6043198090Srdivacky    ConstantRange RHSRange = getSignedRange(RHS);
6044198090Srdivacky    if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
6045198090Srdivacky      return true;
6046198090Srdivacky    if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
6047198090Srdivacky      return false;
6048198090Srdivacky    break;
6049198090Srdivacky  }
6050198090Srdivacky  case ICmpInst::ICMP_UGT:
6051198090Srdivacky    Pred = ICmpInst::ICMP_ULT;
6052198090Srdivacky    std::swap(LHS, RHS);
6053198090Srdivacky  case ICmpInst::ICMP_ULT: {
6054198090Srdivacky    ConstantRange LHSRange = getUnsignedRange(LHS);
6055198090Srdivacky    ConstantRange RHSRange = getUnsignedRange(RHS);
6056198090Srdivacky    if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
6057198090Srdivacky      return true;
6058198090Srdivacky    if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
6059198090Srdivacky      return false;
6060198090Srdivacky    break;
6061198090Srdivacky  }
6062198090Srdivacky  case ICmpInst::ICMP_UGE:
6063198090Srdivacky    Pred = ICmpInst::ICMP_ULE;
6064198090Srdivacky    std::swap(LHS, RHS);
6065198090Srdivacky  case ICmpInst::ICMP_ULE: {
6066198090Srdivacky    ConstantRange LHSRange = getUnsignedRange(LHS);
6067198090Srdivacky    ConstantRange RHSRange = getUnsignedRange(RHS);
6068198090Srdivacky    if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
6069198090Srdivacky      return true;
6070198090Srdivacky    if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
6071198090Srdivacky      return false;
6072198090Srdivacky    break;
6073198090Srdivacky  }
6074198090Srdivacky  case ICmpInst::ICMP_NE: {
6075198090Srdivacky    if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
6076198090Srdivacky      return true;
6077198090Srdivacky    if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
6078198090Srdivacky      return true;
6079198090Srdivacky
6080198090Srdivacky    const SCEV *Diff = getMinusSCEV(LHS, RHS);
6081198090Srdivacky    if (isKnownNonZero(Diff))
6082198090Srdivacky      return true;
6083198090Srdivacky    break;
6084198090Srdivacky  }
6085198090Srdivacky  case ICmpInst::ICMP_EQ:
6086198090Srdivacky    // The check at the top of the function catches the case where
6087198090Srdivacky    // the values are known to be equal.
6088198090Srdivacky    break;
6089198090Srdivacky  }
6090198090Srdivacky  return false;
6091198090Srdivacky}
6092198090Srdivacky
6093198090Srdivacky/// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
6094198090Srdivacky/// protected by a conditional between LHS and RHS.  This is used to
6095198090Srdivacky/// to eliminate casts.
6096198090Srdivackybool
6097198090SrdivackyScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
6098198090Srdivacky                                             ICmpInst::Predicate Pred,
6099198090Srdivacky                                             const SCEV *LHS, const SCEV *RHS) {
6100193323Sed  // Interpret a null as meaning no loop, where there is obviously no guard
6101193323Sed  // (interprocedural conditions notwithstanding).
6102198090Srdivacky  if (!L) return true;
6103198090Srdivacky
6104198090Srdivacky  BasicBlock *Latch = L->getLoopLatch();
6105198090Srdivacky  if (!Latch)
6106198090Srdivacky    return false;
6107198090Srdivacky
6108198090Srdivacky  BranchInst *LoopContinuePredicate =
6109198090Srdivacky    dyn_cast<BranchInst>(Latch->getTerminator());
6110198090Srdivacky  if (!LoopContinuePredicate ||
6111198090Srdivacky      LoopContinuePredicate->isUnconditional())
6112198090Srdivacky    return false;
6113198090Srdivacky
6114212904Sdim  return isImpliedCond(Pred, LHS, RHS,
6115212904Sdim                       LoopContinuePredicate->getCondition(),
6116198090Srdivacky                       LoopContinuePredicate->getSuccessor(0) != L->getHeader());
6117198090Srdivacky}
6118198090Srdivacky
6119207618Srdivacky/// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
6120198090Srdivacky/// by a conditional between LHS and RHS.  This is used to help avoid max
6121198090Srdivacky/// expressions in loop trip counts, and to eliminate casts.
6122198090Srdivackybool
6123207618SrdivackyScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
6124207618Srdivacky                                          ICmpInst::Predicate Pred,
6125207618Srdivacky                                          const SCEV *LHS, const SCEV *RHS) {
6126198090Srdivacky  // Interpret a null as meaning no loop, where there is obviously no guard
6127198090Srdivacky  // (interprocedural conditions notwithstanding).
6128193323Sed  if (!L) return false;
6129193323Sed
6130193323Sed  // Starting at the loop predecessor, climb up the predecessor chain, as long
6131193323Sed  // as there are predecessors that can be found that have unique successors
6132193323Sed  // leading to the original header.
6133207618Srdivacky  for (std::pair<BasicBlock *, BasicBlock *>
6134210299Sed         Pair(L->getLoopPredecessor(), L->getHeader());
6135207618Srdivacky       Pair.first;
6136207618Srdivacky       Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
6137193323Sed
6138193323Sed    BranchInst *LoopEntryPredicate =
6139207618Srdivacky      dyn_cast<BranchInst>(Pair.first->getTerminator());
6140193323Sed    if (!LoopEntryPredicate ||
6141193323Sed        LoopEntryPredicate->isUnconditional())
6142193323Sed      continue;
6143193323Sed
6144212904Sdim    if (isImpliedCond(Pred, LHS, RHS,
6145212904Sdim                      LoopEntryPredicate->getCondition(),
6146207618Srdivacky                      LoopEntryPredicate->getSuccessor(0) != Pair.second))
6147195098Sed      return true;
6148195098Sed  }
6149193323Sed
6150195098Sed  return false;
6151195098Sed}
6152193323Sed
6153239462Sdim/// RAII wrapper to prevent recursive application of isImpliedCond.
6154239462Sdim/// ScalarEvolution's PendingLoopPredicates set must be empty unless we are
6155239462Sdim/// currently evaluating isImpliedCond.
6156239462Sdimstruct MarkPendingLoopPredicate {
6157239462Sdim  Value *Cond;
6158239462Sdim  DenseSet<Value*> &LoopPreds;
6159239462Sdim  bool Pending;
6160239462Sdim
6161239462Sdim  MarkPendingLoopPredicate(Value *C, DenseSet<Value*> &LP)
6162239462Sdim    : Cond(C), LoopPreds(LP) {
6163239462Sdim    Pending = !LoopPreds.insert(Cond).second;
6164239462Sdim  }
6165239462Sdim  ~MarkPendingLoopPredicate() {
6166239462Sdim    if (!Pending)
6167239462Sdim      LoopPreds.erase(Cond);
6168239462Sdim  }
6169239462Sdim};
6170239462Sdim
6171198090Srdivacky/// isImpliedCond - Test whether the condition described by Pred, LHS,
6172198090Srdivacky/// and RHS is true whenever the given Cond value evaluates to true.
6173212904Sdimbool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
6174198090Srdivacky                                    const SCEV *LHS, const SCEV *RHS,
6175212904Sdim                                    Value *FoundCondValue,
6176198090Srdivacky                                    bool Inverse) {
6177239462Sdim  MarkPendingLoopPredicate Mark(FoundCondValue, PendingLoopPredicates);
6178239462Sdim  if (Mark.Pending)
6179239462Sdim    return false;
6180239462Sdim
6181204642Srdivacky  // Recursively handle And and Or conditions.
6182212904Sdim  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
6183195098Sed    if (BO->getOpcode() == Instruction::And) {
6184195098Sed      if (!Inverse)
6185212904Sdim        return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
6186212904Sdim               isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
6187195098Sed    } else if (BO->getOpcode() == Instruction::Or) {
6188195098Sed      if (Inverse)
6189212904Sdim        return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
6190212904Sdim               isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
6191195098Sed    }
6192195098Sed  }
6193195098Sed
6194212904Sdim  ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
6195195098Sed  if (!ICI) return false;
6196195098Sed
6197198090Srdivacky  // Bail if the ICmp's operands' types are wider than the needed type
6198198090Srdivacky  // before attempting to call getSCEV on them. This avoids infinite
6199198090Srdivacky  // recursion, since the analysis of widening casts can require loop
6200198090Srdivacky  // exit condition information for overflow checking, which would
6201198090Srdivacky  // lead back here.
6202198090Srdivacky  if (getTypeSizeInBits(LHS->getType()) <
6203198090Srdivacky      getTypeSizeInBits(ICI->getOperand(0)->getType()))
6204198090Srdivacky    return false;
6205198090Srdivacky
6206249423Sdim  // Now that we found a conditional branch that dominates the loop or controls
6207249423Sdim  // the loop latch. Check to see if it is the comparison we are looking for.
6208198090Srdivacky  ICmpInst::Predicate FoundPred;
6209195098Sed  if (Inverse)
6210198090Srdivacky    FoundPred = ICI->getInversePredicate();
6211195098Sed  else
6212198090Srdivacky    FoundPred = ICI->getPredicate();
6213195098Sed
6214198090Srdivacky  const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
6215198090Srdivacky  const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
6216198090Srdivacky
6217198090Srdivacky  // Balance the types. The case where FoundLHS' type is wider than
6218198090Srdivacky  // LHS' type is checked for above.
6219198090Srdivacky  if (getTypeSizeInBits(LHS->getType()) >
6220198090Srdivacky      getTypeSizeInBits(FoundLHS->getType())) {
6221266715Sdim    if (CmpInst::isSigned(FoundPred)) {
6222198090Srdivacky      FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
6223198090Srdivacky      FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
6224198090Srdivacky    } else {
6225198090Srdivacky      FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
6226198090Srdivacky      FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
6227198090Srdivacky    }
6228198090Srdivacky  }
6229198090Srdivacky
6230198090Srdivacky  // Canonicalize the query to match the way instcombine will have
6231198090Srdivacky  // canonicalized the comparison.
6232207618Srdivacky  if (SimplifyICmpOperands(Pred, LHS, RHS))
6233207618Srdivacky    if (LHS == RHS)
6234207618Srdivacky      return CmpInst::isTrueWhenEqual(Pred);
6235207618Srdivacky  if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
6236207618Srdivacky    if (FoundLHS == FoundRHS)
6237243830Sdim      return CmpInst::isFalseWhenEqual(FoundPred);
6238193323Sed
6239198090Srdivacky  // Check to see if we can make the LHS or RHS match.
6240198090Srdivacky  if (LHS == FoundRHS || RHS == FoundLHS) {
6241198090Srdivacky    if (isa<SCEVConstant>(RHS)) {
6242198090Srdivacky      std::swap(FoundLHS, FoundRHS);
6243198090Srdivacky      FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
6244198090Srdivacky    } else {
6245198090Srdivacky      std::swap(LHS, RHS);
6246198090Srdivacky      Pred = ICmpInst::getSwappedPredicate(Pred);
6247198090Srdivacky    }
6248198090Srdivacky  }
6249193323Sed
6250198090Srdivacky  // Check whether the found predicate is the same as the desired predicate.
6251198090Srdivacky  if (FoundPred == Pred)
6252198090Srdivacky    return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
6253198090Srdivacky
6254198090Srdivacky  // Check whether swapping the found predicate makes it the same as the
6255198090Srdivacky  // desired predicate.
6256198090Srdivacky  if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
6257198090Srdivacky    if (isa<SCEVConstant>(RHS))
6258198090Srdivacky      return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
6259198090Srdivacky    else
6260198090Srdivacky      return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
6261198090Srdivacky                                   RHS, LHS, FoundLHS, FoundRHS);
6262198090Srdivacky  }
6263198090Srdivacky
6264198090Srdivacky  // Check whether the actual condition is beyond sufficient.
6265198090Srdivacky  if (FoundPred == ICmpInst::ICMP_EQ)
6266198090Srdivacky    if (ICmpInst::isTrueWhenEqual(Pred))
6267198090Srdivacky      if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
6268198090Srdivacky        return true;
6269198090Srdivacky  if (Pred == ICmpInst::ICMP_NE)
6270198090Srdivacky    if (!ICmpInst::isTrueWhenEqual(FoundPred))
6271198090Srdivacky      if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
6272198090Srdivacky        return true;
6273198090Srdivacky
6274198090Srdivacky  // Otherwise assume the worst.
6275198090Srdivacky  return false;
6276193323Sed}
6277193323Sed
6278198090Srdivacky/// isImpliedCondOperands - Test whether the condition described by Pred,
6279204642Srdivacky/// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
6280198090Srdivacky/// and FoundRHS is true.
6281198090Srdivackybool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
6282198090Srdivacky                                            const SCEV *LHS, const SCEV *RHS,
6283198090Srdivacky                                            const SCEV *FoundLHS,
6284198090Srdivacky                                            const SCEV *FoundRHS) {
6285198090Srdivacky  return isImpliedCondOperandsHelper(Pred, LHS, RHS,
6286198090Srdivacky                                     FoundLHS, FoundRHS) ||
6287198090Srdivacky         // ~x < ~y --> x > y
6288198090Srdivacky         isImpliedCondOperandsHelper(Pred, LHS, RHS,
6289198090Srdivacky                                     getNotSCEV(FoundRHS),
6290198090Srdivacky                                     getNotSCEV(FoundLHS));
6291198090Srdivacky}
6292198090Srdivacky
6293198090Srdivacky/// isImpliedCondOperandsHelper - Test whether the condition described by
6294204642Srdivacky/// Pred, LHS, and RHS is true whenever the condition described by Pred,
6295198090Srdivacky/// FoundLHS, and FoundRHS is true.
6296198090Srdivackybool
6297198090SrdivackyScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
6298198090Srdivacky                                             const SCEV *LHS, const SCEV *RHS,
6299198090Srdivacky                                             const SCEV *FoundLHS,
6300198090Srdivacky                                             const SCEV *FoundRHS) {
6301198090Srdivacky  switch (Pred) {
6302198090Srdivacky  default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
6303198090Srdivacky  case ICmpInst::ICMP_EQ:
6304198090Srdivacky  case ICmpInst::ICMP_NE:
6305198090Srdivacky    if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
6306198090Srdivacky      return true;
6307198090Srdivacky    break;
6308198090Srdivacky  case ICmpInst::ICMP_SLT:
6309198090Srdivacky  case ICmpInst::ICMP_SLE:
6310207618Srdivacky    if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
6311207618Srdivacky        isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS))
6312198090Srdivacky      return true;
6313198090Srdivacky    break;
6314198090Srdivacky  case ICmpInst::ICMP_SGT:
6315198090Srdivacky  case ICmpInst::ICMP_SGE:
6316207618Srdivacky    if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
6317207618Srdivacky        isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS))
6318198090Srdivacky      return true;
6319198090Srdivacky    break;
6320198090Srdivacky  case ICmpInst::ICMP_ULT:
6321198090Srdivacky  case ICmpInst::ICMP_ULE:
6322207618Srdivacky    if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
6323207618Srdivacky        isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS))
6324198090Srdivacky      return true;
6325198090Srdivacky    break;
6326198090Srdivacky  case ICmpInst::ICMP_UGT:
6327198090Srdivacky  case ICmpInst::ICMP_UGE:
6328207618Srdivacky    if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
6329207618Srdivacky        isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS))
6330198090Srdivacky      return true;
6331198090Srdivacky    break;
6332198090Srdivacky  }
6333198090Srdivacky
6334198090Srdivacky  return false;
6335198090Srdivacky}
6336198090Srdivacky
6337263508Sdim// Verify if an linear IV with positive stride can overflow when in a
6338263508Sdim// less-than comparison, knowing the invariant term of the comparison, the
6339263508Sdim// stride and the knowledge of NSW/NUW flags on the recurrence.
6340263508Sdimbool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
6341263508Sdim                                         bool IsSigned, bool NoWrap) {
6342263508Sdim  if (NoWrap) return false;
6343203954Srdivacky
6344263508Sdim  unsigned BitWidth = getTypeSizeInBits(RHS->getType());
6345263508Sdim  const SCEV *One = getConstant(Stride->getType(), 1);
6346221345Sdim
6347263508Sdim  if (IsSigned) {
6348263508Sdim    APInt MaxRHS = getSignedRange(RHS).getSignedMax();
6349263508Sdim    APInt MaxValue = APInt::getSignedMaxValue(BitWidth);
6350263508Sdim    APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One))
6351263508Sdim                                .getSignedMax();
6352221345Sdim
6353263508Sdim    // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
6354263508Sdim    return (MaxValue - MaxStrideMinusOne).slt(MaxRHS);
6355263508Sdim  }
6356194612Sed
6357263508Sdim  APInt MaxRHS = getUnsignedRange(RHS).getUnsignedMax();
6358263508Sdim  APInt MaxValue = APInt::getMaxValue(BitWidth);
6359263508Sdim  APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One))
6360263508Sdim                              .getUnsignedMax();
6361194612Sed
6362263508Sdim  // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
6363263508Sdim  return (MaxValue - MaxStrideMinusOne).ult(MaxRHS);
6364263508Sdim}
6365263508Sdim
6366263508Sdim// Verify if an linear IV with negative stride can overflow when in a
6367263508Sdim// greater-than comparison, knowing the invariant term of the comparison,
6368263508Sdim// the stride and the knowledge of NSW/NUW flags on the recurrence.
6369263508Sdimbool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
6370263508Sdim                                         bool IsSigned, bool NoWrap) {
6371263508Sdim  if (NoWrap) return false;
6372263508Sdim
6373263508Sdim  unsigned BitWidth = getTypeSizeInBits(RHS->getType());
6374263508Sdim  const SCEV *One = getConstant(Stride->getType(), 1);
6375263508Sdim
6376263508Sdim  if (IsSigned) {
6377263508Sdim    APInt MinRHS = getSignedRange(RHS).getSignedMin();
6378263508Sdim    APInt MinValue = APInt::getSignedMinValue(BitWidth);
6379263508Sdim    APInt MaxStrideMinusOne = getSignedRange(getMinusSCEV(Stride, One))
6380263508Sdim                               .getSignedMax();
6381263508Sdim
6382263508Sdim    // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
6383263508Sdim    return (MinValue + MaxStrideMinusOne).sgt(MinRHS);
6384198090Srdivacky  }
6385194612Sed
6386263508Sdim  APInt MinRHS = getUnsignedRange(RHS).getUnsignedMin();
6387263508Sdim  APInt MinValue = APInt::getMinValue(BitWidth);
6388263508Sdim  APInt MaxStrideMinusOne = getUnsignedRange(getMinusSCEV(Stride, One))
6389263508Sdim                            .getUnsignedMax();
6390263508Sdim
6391263508Sdim  // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
6392263508Sdim  return (MinValue + MaxStrideMinusOne).ugt(MinRHS);
6393194612Sed}
6394194612Sed
6395263508Sdim// Compute the backedge taken count knowing the interval difference, the
6396263508Sdim// stride and presence of the equality in the comparison.
6397263508Sdimconst SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step,
6398263508Sdim                                            bool Equality) {
6399263508Sdim  const SCEV *One = getConstant(Step->getType(), 1);
6400263508Sdim  Delta = Equality ? getAddExpr(Delta, Step)
6401263508Sdim                   : getAddExpr(Delta, getMinusSCEV(Step, One));
6402263508Sdim  return getUDivExpr(Delta, Step);
6403263508Sdim}
6404263508Sdim
6405193323Sed/// HowManyLessThans - Return the number of times a backedge containing the
6406193323Sed/// specified less-than comparison will execute.  If not computable, return
6407193630Sed/// CouldNotCompute.
6408251662Sdim///
6409251662Sdim/// @param IsSubExpr is true when the LHS < RHS condition does not directly
6410251662Sdim/// control the branch. In this case, we can only compute an iteration count for
6411251662Sdim/// a subexpression that cannot overflow before evaluating true.
6412226633SdimScalarEvolution::ExitLimit
6413195098SedScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
6414263508Sdim                                  const Loop *L, bool IsSigned,
6415251662Sdim                                  bool IsSubExpr) {
6416263508Sdim  // We handle only IV < Invariant
6417263508Sdim  if (!isLoopInvariant(RHS, L))
6418263508Sdim    return getCouldNotCompute();
6419193323Sed
6420263508Sdim  const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
6421263508Sdim
6422263508Sdim  // Avoid weird loops
6423263508Sdim  if (!IV || IV->getLoop() != L || !IV->isAffine())
6424195340Sed    return getCouldNotCompute();
6425193323Sed
6426263508Sdim  bool NoWrap = !IsSubExpr &&
6427263508Sdim                IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
6428193323Sed
6429263508Sdim  const SCEV *Stride = IV->getStepRecurrence(*this);
6430193323Sed
6431263508Sdim  // Avoid negative or zero stride values
6432263508Sdim  if (!isKnownPositive(Stride))
6433263508Sdim    return getCouldNotCompute();
6434193323Sed
6435263508Sdim  // Avoid proven overflow cases: this will ensure that the backedge taken count
6436263508Sdim  // will not generate any unsigned overflow. Relaxed no-overflow conditions
6437263508Sdim  // exploit NoWrapFlags, allowing to optimize in presence of undefined
6438263508Sdim  // behaviors like the case of C language.
6439263508Sdim  if (!Stride->isOne() && doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap))
6440263508Sdim    return getCouldNotCompute();
6441193323Sed
6442263508Sdim  ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT
6443263508Sdim                                      : ICmpInst::ICMP_ULT;
6444263508Sdim  const SCEV *Start = IV->getStart();
6445263508Sdim  const SCEV *End = RHS;
6446263508Sdim  if (!isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS))
6447263508Sdim    End = IsSigned ? getSMaxExpr(RHS, Start)
6448263508Sdim                   : getUMaxExpr(RHS, Start);
6449193323Sed
6450263508Sdim  const SCEV *BECount = computeBECount(getMinusSCEV(End, Start), Stride, false);
6451193323Sed
6452263508Sdim  APInt MinStart = IsSigned ? getSignedRange(Start).getSignedMin()
6453263508Sdim                            : getUnsignedRange(Start).getUnsignedMin();
6454193323Sed
6455263508Sdim  APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin()
6456263508Sdim                             : getUnsignedRange(Stride).getUnsignedMin();
6457203954Srdivacky
6458263508Sdim  unsigned BitWidth = getTypeSizeInBits(LHS->getType());
6459263508Sdim  APInt Limit = IsSigned ? APInt::getSignedMaxValue(BitWidth) - (MinStride - 1)
6460263508Sdim                         : APInt::getMaxValue(BitWidth) - (MinStride - 1);
6461193323Sed
6462263508Sdim  // Although End can be a MAX expression we estimate MaxEnd considering only
6463263508Sdim  // the case End = RHS. This is safe because in the other case (End - Start)
6464263508Sdim  // is zero, leading to a zero maximum backedge taken count.
6465263508Sdim  APInt MaxEnd =
6466263508Sdim    IsSigned ? APIntOps::smin(getSignedRange(RHS).getSignedMax(), Limit)
6467263508Sdim             : APIntOps::umin(getUnsignedRange(RHS).getUnsignedMax(), Limit);
6468193323Sed
6469263508Sdim  const SCEV *MaxBECount = getCouldNotCompute();
6470263508Sdim  if (isa<SCEVConstant>(BECount))
6471263508Sdim    MaxBECount = BECount;
6472263508Sdim  else
6473263508Sdim    MaxBECount = computeBECount(getConstant(MaxEnd - MinStart),
6474263508Sdim                                getConstant(MinStride), false);
6475221345Sdim
6476263508Sdim  if (isa<SCEVCouldNotCompute>(MaxBECount))
6477263508Sdim    MaxBECount = BECount;
6478193323Sed
6479263508Sdim  return ExitLimit(BECount, MaxBECount);
6480193323Sed}
6481193323Sed
6482263508SdimScalarEvolution::ExitLimit
6483263508SdimScalarEvolution::HowManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
6484263508Sdim                                     const Loop *L, bool IsSigned,
6485263508Sdim                                     bool IsSubExpr) {
6486263508Sdim  // We handle only IV > Invariant
6487263508Sdim  if (!isLoopInvariant(RHS, L))
6488263508Sdim    return getCouldNotCompute();
6489263508Sdim
6490263508Sdim  const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
6491263508Sdim
6492263508Sdim  // Avoid weird loops
6493263508Sdim  if (!IV || IV->getLoop() != L || !IV->isAffine())
6494263508Sdim    return getCouldNotCompute();
6495263508Sdim
6496263508Sdim  bool NoWrap = !IsSubExpr &&
6497263508Sdim                IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
6498263508Sdim
6499263508Sdim  const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this));
6500263508Sdim
6501263508Sdim  // Avoid negative or zero stride values
6502263508Sdim  if (!isKnownPositive(Stride))
6503263508Sdim    return getCouldNotCompute();
6504263508Sdim
6505263508Sdim  // Avoid proven overflow cases: this will ensure that the backedge taken count
6506263508Sdim  // will not generate any unsigned overflow. Relaxed no-overflow conditions
6507263508Sdim  // exploit NoWrapFlags, allowing to optimize in presence of undefined
6508263508Sdim  // behaviors like the case of C language.
6509263508Sdim  if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap))
6510263508Sdim    return getCouldNotCompute();
6511263508Sdim
6512263508Sdim  ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT
6513263508Sdim                                      : ICmpInst::ICMP_UGT;
6514263508Sdim
6515263508Sdim  const SCEV *Start = IV->getStart();
6516263508Sdim  const SCEV *End = RHS;
6517263508Sdim  if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS))
6518263508Sdim    End = IsSigned ? getSMinExpr(RHS, Start)
6519263508Sdim                   : getUMinExpr(RHS, Start);
6520263508Sdim
6521263508Sdim  const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false);
6522263508Sdim
6523263508Sdim  APInt MaxStart = IsSigned ? getSignedRange(Start).getSignedMax()
6524263508Sdim                            : getUnsignedRange(Start).getUnsignedMax();
6525263508Sdim
6526263508Sdim  APInt MinStride = IsSigned ? getSignedRange(Stride).getSignedMin()
6527263508Sdim                             : getUnsignedRange(Stride).getUnsignedMin();
6528263508Sdim
6529263508Sdim  unsigned BitWidth = getTypeSizeInBits(LHS->getType());
6530263508Sdim  APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1)
6531263508Sdim                         : APInt::getMinValue(BitWidth) + (MinStride - 1);
6532263508Sdim
6533263508Sdim  // Although End can be a MIN expression we estimate MinEnd considering only
6534263508Sdim  // the case End = RHS. This is safe because in the other case (Start - End)
6535263508Sdim  // is zero, leading to a zero maximum backedge taken count.
6536263508Sdim  APInt MinEnd =
6537263508Sdim    IsSigned ? APIntOps::smax(getSignedRange(RHS).getSignedMin(), Limit)
6538263508Sdim             : APIntOps::umax(getUnsignedRange(RHS).getUnsignedMin(), Limit);
6539263508Sdim
6540263508Sdim
6541263508Sdim  const SCEV *MaxBECount = getCouldNotCompute();
6542263508Sdim  if (isa<SCEVConstant>(BECount))
6543263508Sdim    MaxBECount = BECount;
6544263508Sdim  else
6545263508Sdim    MaxBECount = computeBECount(getConstant(MaxStart - MinEnd),
6546263508Sdim                                getConstant(MinStride), false);
6547263508Sdim
6548263508Sdim  if (isa<SCEVCouldNotCompute>(MaxBECount))
6549263508Sdim    MaxBECount = BECount;
6550263508Sdim
6551263508Sdim  return ExitLimit(BECount, MaxBECount);
6552263508Sdim}
6553263508Sdim
6554193323Sed/// getNumIterationsInRange - Return the number of iterations of this loop that
6555193323Sed/// produce values in the specified constant range.  Another way of looking at
6556193323Sed/// this is that it returns the first iteration number where the value is not in
6557193323Sed/// the condition, thus computing the exit count. If the iteration count can't
6558193323Sed/// be computed, an instance of SCEVCouldNotCompute is returned.
6559198090Srdivackyconst SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
6560195098Sed                                                    ScalarEvolution &SE) const {
6561193323Sed  if (Range.isFullSet())  // Infinite loop.
6562193323Sed    return SE.getCouldNotCompute();
6563193323Sed
6564193323Sed  // If the start is a non-zero constant, shift the range to simplify things.
6565193323Sed  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
6566193323Sed    if (!SC->getValue()->isZero()) {
6567198090Srdivacky      SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
6568207618Srdivacky      Operands[0] = SE.getConstant(SC->getType(), 0);
6569221345Sdim      const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
6570221345Sdim                                             getNoWrapFlags(FlagNW));
6571193323Sed      if (const SCEVAddRecExpr *ShiftedAddRec =
6572193323Sed            dyn_cast<SCEVAddRecExpr>(Shifted))
6573193323Sed        return ShiftedAddRec->getNumIterationsInRange(
6574193323Sed                           Range.subtract(SC->getValue()->getValue()), SE);
6575193323Sed      // This is strange and shouldn't happen.
6576193323Sed      return SE.getCouldNotCompute();
6577193323Sed    }
6578193323Sed
6579193323Sed  // The only time we can solve this is when we have all constant indices.
6580193323Sed  // Otherwise, we cannot determine the overflow conditions.
6581193323Sed  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
6582193323Sed    if (!isa<SCEVConstant>(getOperand(i)))
6583193323Sed      return SE.getCouldNotCompute();
6584193323Sed
6585193323Sed
6586193323Sed  // Okay at this point we know that all elements of the chrec are constants and
6587193323Sed  // that the start element is zero.
6588193323Sed
6589193323Sed  // First check to see if the range contains zero.  If not, the first
6590193323Sed  // iteration exits.
6591193323Sed  unsigned BitWidth = SE.getTypeSizeInBits(getType());
6592193323Sed  if (!Range.contains(APInt(BitWidth, 0)))
6593207618Srdivacky    return SE.getConstant(getType(), 0);
6594193323Sed
6595193323Sed  if (isAffine()) {
6596193323Sed    // If this is an affine expression then we have this situation:
6597193323Sed    //   Solve {0,+,A} in Range  ===  Ax in Range
6598193323Sed
6599193323Sed    // We know that zero is in the range.  If A is positive then we know that
6600193323Sed    // the upper value of the range must be the first possible exit value.
6601193323Sed    // If A is negative then the lower of the range is the last possible loop
6602193323Sed    // value.  Also note that we already checked for a full range.
6603193323Sed    APInt One(BitWidth,1);
6604193323Sed    APInt A     = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
6605193323Sed    APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
6606193323Sed
6607193323Sed    // The exit value should be (End+A)/A.
6608193323Sed    APInt ExitVal = (End + A).udiv(A);
6609198090Srdivacky    ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
6610193323Sed
6611193323Sed    // Evaluate at the exit value.  If we really did fall out of the valid
6612193323Sed    // range, then we computed our trip count, otherwise wrap around or other
6613193323Sed    // things must have happened.
6614193323Sed    ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
6615193323Sed    if (Range.contains(Val->getValue()))
6616193323Sed      return SE.getCouldNotCompute();  // Something strange happened
6617193323Sed
6618193323Sed    // Ensure that the previous value is in the range.  This is a sanity check.
6619193323Sed    assert(Range.contains(
6620195098Sed           EvaluateConstantChrecAtConstant(this,
6621198090Srdivacky           ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
6622193323Sed           "Linear scev computation is off in a bad way!");
6623193323Sed    return SE.getConstant(ExitValue);
6624193323Sed  } else if (isQuadratic()) {
6625193323Sed    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
6626193323Sed    // quadratic equation to solve it.  To do this, we must frame our problem in
6627193323Sed    // terms of figuring out when zero is crossed, instead of when
6628193323Sed    // Range.getUpper() is crossed.
6629198090Srdivacky    SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
6630193323Sed    NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
6631221345Sdim    const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(),
6632221345Sdim                                             // getNoWrapFlags(FlagNW)
6633221345Sdim                                             FlagAnyWrap);
6634193323Sed
6635193323Sed    // Next, solve the constructed addrec
6636198090Srdivacky    std::pair<const SCEV *,const SCEV *> Roots =
6637193323Sed      SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
6638193323Sed    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
6639193323Sed    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
6640193323Sed    if (R1) {
6641193323Sed      // Pick the smallest positive root value.
6642193323Sed      if (ConstantInt *CB =
6643195098Sed          dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
6644198090Srdivacky                         R1->getValue(), R2->getValue()))) {
6645193323Sed        if (CB->getZExtValue() == false)
6646193323Sed          std::swap(R1, R2);   // R1 is the minimum root now.
6647193323Sed
6648193323Sed        // Make sure the root is not off by one.  The returned iteration should
6649193323Sed        // not be in the range, but the previous one should be.  When solving
6650193323Sed        // for "X*X < 5", for example, we should not return a root of 2.
6651193323Sed        ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
6652193323Sed                                                             R1->getValue(),
6653193323Sed                                                             SE);
6654193323Sed        if (Range.contains(R1Val->getValue())) {
6655193323Sed          // The next iteration must be out of the range...
6656198090Srdivacky          ConstantInt *NextVal =
6657198090Srdivacky                ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
6658193323Sed
6659193323Sed          R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
6660193323Sed          if (!Range.contains(R1Val->getValue()))
6661193323Sed            return SE.getConstant(NextVal);
6662193323Sed          return SE.getCouldNotCompute();  // Something strange happened
6663193323Sed        }
6664193323Sed
6665193323Sed        // If R1 was not in the range, then it is a good return value.  Make
6666193323Sed        // sure that R1-1 WAS in the range though, just in case.
6667198090Srdivacky        ConstantInt *NextVal =
6668198090Srdivacky               ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
6669193323Sed        R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
6670193323Sed        if (Range.contains(R1Val->getValue()))
6671193323Sed          return R1;
6672193323Sed        return SE.getCouldNotCompute();  // Something strange happened
6673193323Sed      }
6674193323Sed    }
6675193323Sed  }
6676193323Sed
6677193323Sed  return SE.getCouldNotCompute();
6678193323Sed}
6679193323Sed
6680263508Sdimstatic const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
6681263508Sdim  APInt A = C1->getValue()->getValue().abs();
6682263508Sdim  APInt B = C2->getValue()->getValue().abs();
6683263508Sdim  uint32_t ABW = A.getBitWidth();
6684263508Sdim  uint32_t BBW = B.getBitWidth();
6685193323Sed
6686263508Sdim  if (ABW > BBW)
6687263508Sdim    B = B.zext(ABW);
6688263508Sdim  else if (ABW < BBW)
6689263508Sdim    A = A.zext(BBW);
6690193323Sed
6691263508Sdim  return APIntOps::GreatestCommonDivisor(A, B);
6692263508Sdim}
6693263508Sdim
6694263508Sdimstatic const APInt srem(const SCEVConstant *C1, const SCEVConstant *C2) {
6695263508Sdim  APInt A = C1->getValue()->getValue();
6696263508Sdim  APInt B = C2->getValue()->getValue();
6697263508Sdim  uint32_t ABW = A.getBitWidth();
6698263508Sdim  uint32_t BBW = B.getBitWidth();
6699263508Sdim
6700263508Sdim  if (ABW > BBW)
6701263508Sdim    B = B.sext(ABW);
6702263508Sdim  else if (ABW < BBW)
6703263508Sdim    A = A.sext(BBW);
6704263508Sdim
6705263508Sdim  return APIntOps::srem(A, B);
6706263508Sdim}
6707263508Sdim
6708263508Sdimstatic const APInt sdiv(const SCEVConstant *C1, const SCEVConstant *C2) {
6709263508Sdim  APInt A = C1->getValue()->getValue();
6710263508Sdim  APInt B = C2->getValue()->getValue();
6711263508Sdim  uint32_t ABW = A.getBitWidth();
6712263508Sdim  uint32_t BBW = B.getBitWidth();
6713263508Sdim
6714263508Sdim  if (ABW > BBW)
6715263508Sdim    B = B.sext(ABW);
6716263508Sdim  else if (ABW < BBW)
6717263508Sdim    A = A.sext(BBW);
6718263508Sdim
6719263508Sdim  return APIntOps::sdiv(A, B);
6720263508Sdim}
6721263508Sdim
6722263508Sdimnamespace {
6723263508Sdimstruct SCEVGCD : public SCEVVisitor<SCEVGCD, const SCEV *> {
6724263508Sdimpublic:
6725263508Sdim  // Pattern match Step into Start. When Step is a multiply expression, find
6726263508Sdim  // the largest subexpression of Step that appears in Start. When Start is an
6727263508Sdim  // add expression, try to match Step in the subexpressions of Start, non
6728263508Sdim  // matching subexpressions are returned under Remainder.
6729263508Sdim  static const SCEV *findGCD(ScalarEvolution &SE, const SCEV *Start,
6730263508Sdim                             const SCEV *Step, const SCEV **Remainder) {
6731263508Sdim    assert(Remainder && "Remainder should not be NULL");
6732263508Sdim    SCEVGCD R(SE, Step, SE.getConstant(Step->getType(), 0));
6733263508Sdim    const SCEV *Res = R.visit(Start);
6734263508Sdim    *Remainder = R.Remainder;
6735263508Sdim    return Res;
6736263508Sdim  }
6737263508Sdim
6738263508Sdim  SCEVGCD(ScalarEvolution &S, const SCEV *G, const SCEV *R)
6739263508Sdim      : SE(S), GCD(G), Remainder(R) {
6740263508Sdim    Zero = SE.getConstant(GCD->getType(), 0);
6741263508Sdim    One = SE.getConstant(GCD->getType(), 1);
6742263508Sdim  }
6743263508Sdim
6744263508Sdim  const SCEV *visitConstant(const SCEVConstant *Constant) {
6745263508Sdim    if (GCD == Constant || Constant == Zero)
6746263508Sdim      return GCD;
6747263508Sdim
6748263508Sdim    if (const SCEVConstant *CGCD = dyn_cast<SCEVConstant>(GCD)) {
6749263508Sdim      const SCEV *Res = SE.getConstant(gcd(Constant, CGCD));
6750263508Sdim      if (Res != One)
6751263508Sdim        return Res;
6752263508Sdim
6753263508Sdim      Remainder = SE.getConstant(srem(Constant, CGCD));
6754263508Sdim      Constant = cast<SCEVConstant>(SE.getMinusSCEV(Constant, Remainder));
6755263508Sdim      Res = SE.getConstant(gcd(Constant, CGCD));
6756263508Sdim      return Res;
6757263508Sdim    }
6758263508Sdim
6759263508Sdim    // When GCD is not a constant, it could be that the GCD is an Add, Mul,
6760263508Sdim    // AddRec, etc., in which case we want to find out how many times the
6761263508Sdim    // Constant divides the GCD: we then return that as the new GCD.
6762263508Sdim    const SCEV *Rem = Zero;
6763263508Sdim    const SCEV *Res = findGCD(SE, GCD, Constant, &Rem);
6764263508Sdim
6765263508Sdim    if (Res == One || Rem != Zero) {
6766263508Sdim      Remainder = Constant;
6767263508Sdim      return One;
6768263508Sdim    }
6769263508Sdim
6770263508Sdim    assert(isa<SCEVConstant>(Res) && "Res should be a constant");
6771263508Sdim    Remainder = SE.getConstant(srem(Constant, cast<SCEVConstant>(Res)));
6772263508Sdim    return Res;
6773263508Sdim  }
6774263508Sdim
6775263508Sdim  const SCEV *visitTruncateExpr(const SCEVTruncateExpr *Expr) {
6776263508Sdim    if (GCD != Expr)
6777263508Sdim      Remainder = Expr;
6778263508Sdim    return GCD;
6779263508Sdim  }
6780263508Sdim
6781263508Sdim  const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
6782263508Sdim    if (GCD != Expr)
6783263508Sdim      Remainder = Expr;
6784263508Sdim    return GCD;
6785263508Sdim  }
6786263508Sdim
6787263508Sdim  const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
6788263508Sdim    if (GCD != Expr)
6789263508Sdim      Remainder = Expr;
6790263508Sdim    return GCD;
6791263508Sdim  }
6792263508Sdim
6793263508Sdim  const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
6794263508Sdim    if (GCD == Expr)
6795263508Sdim      return GCD;
6796263508Sdim
6797263508Sdim    for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
6798263508Sdim      const SCEV *Rem = Zero;
6799263508Sdim      const SCEV *Res = findGCD(SE, Expr->getOperand(e - 1 - i), GCD, &Rem);
6800263508Sdim
6801263508Sdim      // FIXME: There may be ambiguous situations: for instance,
6802263508Sdim      // GCD(-4 + (3 * %m), 2 * %m) where 2 divides -4 and %m divides (3 * %m).
6803263508Sdim      // The order in which the AddExpr is traversed computes a different GCD
6804263508Sdim      // and Remainder.
6805263508Sdim      if (Res != One)
6806263508Sdim        GCD = Res;
6807263508Sdim      if (Rem != Zero)
6808263508Sdim        Remainder = SE.getAddExpr(Remainder, Rem);
6809263508Sdim    }
6810263508Sdim
6811263508Sdim    return GCD;
6812263508Sdim  }
6813263508Sdim
6814263508Sdim  const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
6815263508Sdim    if (GCD == Expr)
6816263508Sdim      return GCD;
6817263508Sdim
6818263508Sdim    for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
6819263508Sdim      if (Expr->getOperand(i) == GCD)
6820263508Sdim        return GCD;
6821263508Sdim    }
6822263508Sdim
6823263508Sdim    // If we have not returned yet, it means that GCD is not part of Expr.
6824263508Sdim    const SCEV *PartialGCD = One;
6825263508Sdim    for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
6826263508Sdim      const SCEV *Rem = Zero;
6827263508Sdim      const SCEV *Res = findGCD(SE, Expr->getOperand(i), GCD, &Rem);
6828263508Sdim      if (Rem != Zero)
6829263508Sdim        // GCD does not divide Expr->getOperand(i).
6830263508Sdim        continue;
6831263508Sdim
6832263508Sdim      if (Res == GCD)
6833263508Sdim        return GCD;
6834263508Sdim      PartialGCD = SE.getMulExpr(PartialGCD, Res);
6835263508Sdim      if (PartialGCD == GCD)
6836263508Sdim        return GCD;
6837263508Sdim    }
6838263508Sdim
6839263508Sdim    if (PartialGCD != One)
6840263508Sdim      return PartialGCD;
6841263508Sdim
6842263508Sdim    Remainder = Expr;
6843263508Sdim    const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(GCD);
6844263508Sdim    if (!Mul)
6845263508Sdim      return PartialGCD;
6846263508Sdim
6847263508Sdim    // When the GCD is a multiply expression, try to decompose it:
6848263508Sdim    // this occurs when Step does not divide the Start expression
6849263508Sdim    // as in: {(-4 + (3 * %m)),+,(2 * %m)}
6850263508Sdim    for (int i = 0, e = Mul->getNumOperands(); i < e; ++i) {
6851263508Sdim      const SCEV *Rem = Zero;
6852263508Sdim      const SCEV *Res = findGCD(SE, Expr, Mul->getOperand(i), &Rem);
6853263508Sdim      if (Rem == Zero) {
6854263508Sdim        Remainder = Rem;
6855263508Sdim        return Res;
6856263508Sdim      }
6857263508Sdim    }
6858263508Sdim
6859263508Sdim    return PartialGCD;
6860263508Sdim  }
6861263508Sdim
6862263508Sdim  const SCEV *visitUDivExpr(const SCEVUDivExpr *Expr) {
6863263508Sdim    if (GCD != Expr)
6864263508Sdim      Remainder = Expr;
6865263508Sdim    return GCD;
6866263508Sdim  }
6867263508Sdim
6868263508Sdim  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
6869263508Sdim    if (GCD == Expr)
6870263508Sdim      return GCD;
6871263508Sdim
6872263508Sdim    if (!Expr->isAffine()) {
6873263508Sdim      Remainder = Expr;
6874263508Sdim      return GCD;
6875263508Sdim    }
6876263508Sdim
6877263508Sdim    const SCEV *Rem = Zero;
6878263508Sdim    const SCEV *Res = findGCD(SE, Expr->getOperand(0), GCD, &Rem);
6879263508Sdim    if (Rem != Zero)
6880263508Sdim      Remainder = SE.getAddExpr(Remainder, Rem);
6881263508Sdim
6882263508Sdim    Rem = Zero;
6883263508Sdim    Res = findGCD(SE, Expr->getOperand(1), Res, &Rem);
6884263508Sdim    if (Rem != Zero) {
6885263508Sdim      Remainder = Expr;
6886263508Sdim      return GCD;
6887263508Sdim    }
6888263508Sdim
6889263508Sdim    return Res;
6890263508Sdim  }
6891263508Sdim
6892263508Sdim  const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
6893263508Sdim    if (GCD != Expr)
6894263508Sdim      Remainder = Expr;
6895263508Sdim    return GCD;
6896263508Sdim  }
6897263508Sdim
6898263508Sdim  const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
6899263508Sdim    if (GCD != Expr)
6900263508Sdim      Remainder = Expr;
6901263508Sdim    return GCD;
6902263508Sdim  }
6903263508Sdim
6904263508Sdim  const SCEV *visitUnknown(const SCEVUnknown *Expr) {
6905263508Sdim    if (GCD != Expr)
6906263508Sdim      Remainder = Expr;
6907263508Sdim    return GCD;
6908263508Sdim  }
6909263508Sdim
6910263508Sdim  const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
6911263508Sdim    return One;
6912263508Sdim  }
6913263508Sdim
6914263508Sdimprivate:
6915263508Sdim  ScalarEvolution &SE;
6916263508Sdim  const SCEV *GCD, *Remainder, *Zero, *One;
6917263508Sdim};
6918263508Sdim
6919263508Sdimstruct SCEVDivision : public SCEVVisitor<SCEVDivision, const SCEV *> {
6920263508Sdimpublic:
6921263508Sdim  // Remove from Start all multiples of Step.
6922263508Sdim  static const SCEV *divide(ScalarEvolution &SE, const SCEV *Start,
6923263508Sdim                            const SCEV *Step) {
6924263508Sdim    SCEVDivision D(SE, Step);
6925263508Sdim    const SCEV *Rem = D.Zero;
6926263508Sdim    (void)Rem;
6927263508Sdim    // The division is guaranteed to succeed: Step should divide Start with no
6928263508Sdim    // remainder.
6929263508Sdim    assert(Step == SCEVGCD::findGCD(SE, Start, Step, &Rem) && Rem == D.Zero &&
6930263508Sdim           "Step should divide Start with no remainder.");
6931263508Sdim    return D.visit(Start);
6932263508Sdim  }
6933263508Sdim
6934263508Sdim  SCEVDivision(ScalarEvolution &S, const SCEV *G) : SE(S), GCD(G) {
6935263508Sdim    Zero = SE.getConstant(GCD->getType(), 0);
6936263508Sdim    One = SE.getConstant(GCD->getType(), 1);
6937263508Sdim  }
6938263508Sdim
6939263508Sdim  const SCEV *visitConstant(const SCEVConstant *Constant) {
6940263508Sdim    if (GCD == Constant)
6941263508Sdim      return One;
6942263508Sdim
6943263508Sdim    if (const SCEVConstant *CGCD = dyn_cast<SCEVConstant>(GCD))
6944263508Sdim      return SE.getConstant(sdiv(Constant, CGCD));
6945263508Sdim    return Constant;
6946263508Sdim  }
6947263508Sdim
6948263508Sdim  const SCEV *visitTruncateExpr(const SCEVTruncateExpr *Expr) {
6949263508Sdim    if (GCD == Expr)
6950263508Sdim      return One;
6951263508Sdim    return Expr;
6952263508Sdim  }
6953263508Sdim
6954263508Sdim  const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
6955263508Sdim    if (GCD == Expr)
6956263508Sdim      return One;
6957263508Sdim    return Expr;
6958263508Sdim  }
6959263508Sdim
6960263508Sdim  const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
6961263508Sdim    if (GCD == Expr)
6962263508Sdim      return One;
6963263508Sdim    return Expr;
6964263508Sdim  }
6965263508Sdim
6966263508Sdim  const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
6967263508Sdim    if (GCD == Expr)
6968263508Sdim      return One;
6969263508Sdim
6970263508Sdim    SmallVector<const SCEV *, 2> Operands;
6971263508Sdim    for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
6972263508Sdim      Operands.push_back(divide(SE, Expr->getOperand(i), GCD));
6973263508Sdim
6974263508Sdim    if (Operands.size() == 1)
6975263508Sdim      return Operands[0];
6976263508Sdim    return SE.getAddExpr(Operands);
6977263508Sdim  }
6978263508Sdim
6979263508Sdim  const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
6980263508Sdim    if (GCD == Expr)
6981263508Sdim      return One;
6982263508Sdim
6983263508Sdim    bool FoundGCDTerm = false;
6984263508Sdim    for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
6985263508Sdim      if (Expr->getOperand(i) == GCD)
6986263508Sdim        FoundGCDTerm = true;
6987263508Sdim
6988263508Sdim    SmallVector<const SCEV *, 2> Operands;
6989263508Sdim    if (FoundGCDTerm) {
6990263508Sdim      FoundGCDTerm = false;
6991263508Sdim      for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
6992263508Sdim        if (FoundGCDTerm)
6993263508Sdim          Operands.push_back(Expr->getOperand(i));
6994263508Sdim        else if (Expr->getOperand(i) == GCD)
6995263508Sdim          FoundGCDTerm = true;
6996263508Sdim        else
6997263508Sdim          Operands.push_back(Expr->getOperand(i));
6998263508Sdim      }
6999263508Sdim    } else {
7000263508Sdim      FoundGCDTerm = false;
7001263508Sdim      const SCEV *PartialGCD = One;
7002263508Sdim      for (int i = 0, e = Expr->getNumOperands(); i < e; ++i) {
7003263508Sdim        if (PartialGCD == GCD) {
7004263508Sdim          Operands.push_back(Expr->getOperand(i));
7005263508Sdim          continue;
7006263508Sdim        }
7007263508Sdim
7008263508Sdim        const SCEV *Rem = Zero;
7009263508Sdim        const SCEV *Res = SCEVGCD::findGCD(SE, Expr->getOperand(i), GCD, &Rem);
7010263508Sdim        if (Rem == Zero) {
7011263508Sdim          PartialGCD = SE.getMulExpr(PartialGCD, Res);
7012263508Sdim          Operands.push_back(divide(SE, Expr->getOperand(i), GCD));
7013263508Sdim        } else {
7014263508Sdim          Operands.push_back(Expr->getOperand(i));
7015263508Sdim        }
7016263508Sdim      }
7017263508Sdim    }
7018263508Sdim
7019263508Sdim    if (Operands.size() == 1)
7020263508Sdim      return Operands[0];
7021263508Sdim    return SE.getMulExpr(Operands);
7022263508Sdim  }
7023263508Sdim
7024263508Sdim  const SCEV *visitUDivExpr(const SCEVUDivExpr *Expr) {
7025263508Sdim    if (GCD == Expr)
7026263508Sdim      return One;
7027263508Sdim    return Expr;
7028263508Sdim  }
7029263508Sdim
7030263508Sdim  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
7031263508Sdim    if (GCD == Expr)
7032263508Sdim      return One;
7033263508Sdim
7034263508Sdim    assert(Expr->isAffine() && "Expr should be affine");
7035263508Sdim
7036263508Sdim    const SCEV *Start = divide(SE, Expr->getStart(), GCD);
7037263508Sdim    const SCEV *Step = divide(SE, Expr->getStepRecurrence(SE), GCD);
7038263508Sdim
7039263508Sdim    return SE.getAddRecExpr(Start, Step, Expr->getLoop(),
7040263508Sdim                            Expr->getNoWrapFlags());
7041263508Sdim  }
7042263508Sdim
7043263508Sdim  const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
7044263508Sdim    if (GCD == Expr)
7045263508Sdim      return One;
7046263508Sdim    return Expr;
7047263508Sdim  }
7048263508Sdim
7049263508Sdim  const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
7050263508Sdim    if (GCD == Expr)
7051263508Sdim      return One;
7052263508Sdim    return Expr;
7053263508Sdim  }
7054263508Sdim
7055263508Sdim  const SCEV *visitUnknown(const SCEVUnknown *Expr) {
7056263508Sdim    if (GCD == Expr)
7057263508Sdim      return One;
7058263508Sdim    return Expr;
7059263508Sdim  }
7060263508Sdim
7061263508Sdim  const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
7062263508Sdim    return Expr;
7063263508Sdim  }
7064263508Sdim
7065263508Sdimprivate:
7066263508Sdim  ScalarEvolution &SE;
7067263508Sdim  const SCEV *GCD, *Zero, *One;
7068263508Sdim};
7069263508Sdim}
7070263508Sdim
7071263508Sdim/// Splits the SCEV into two vectors of SCEVs representing the subscripts and
7072263508Sdim/// sizes of an array access. Returns the remainder of the delinearization that
7073263508Sdim/// is the offset start of the array.  The SCEV->delinearize algorithm computes
7074263508Sdim/// the multiples of SCEV coefficients: that is a pattern matching of sub
7075263508Sdim/// expressions in the stride and base of a SCEV corresponding to the
7076263508Sdim/// computation of a GCD (greatest common divisor) of base and stride.  When
7077263508Sdim/// SCEV->delinearize fails, it returns the SCEV unchanged.
7078263508Sdim///
7079263508Sdim/// For example: when analyzing the memory access A[i][j][k] in this loop nest
7080263508Sdim///
7081263508Sdim///  void foo(long n, long m, long o, double A[n][m][o]) {
7082263508Sdim///
7083263508Sdim///    for (long i = 0; i < n; i++)
7084263508Sdim///      for (long j = 0; j < m; j++)
7085263508Sdim///        for (long k = 0; k < o; k++)
7086263508Sdim///          A[i][j][k] = 1.0;
7087263508Sdim///  }
7088263508Sdim///
7089263508Sdim/// the delinearization input is the following AddRec SCEV:
7090263508Sdim///
7091263508Sdim///  AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
7092263508Sdim///
7093263508Sdim/// From this SCEV, we are able to say that the base offset of the access is %A
7094263508Sdim/// because it appears as an offset that does not divide any of the strides in
7095263508Sdim/// the loops:
7096263508Sdim///
7097263508Sdim///  CHECK: Base offset: %A
7098263508Sdim///
7099263508Sdim/// and then SCEV->delinearize determines the size of some of the dimensions of
7100263508Sdim/// the array as these are the multiples by which the strides are happening:
7101263508Sdim///
7102263508Sdim///  CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
7103263508Sdim///
7104263508Sdim/// Note that the outermost dimension remains of UnknownSize because there are
7105263508Sdim/// no strides that would help identifying the size of the last dimension: when
7106263508Sdim/// the array has been statically allocated, one could compute the size of that
7107263508Sdim/// dimension by dividing the overall size of the array by the size of the known
7108263508Sdim/// dimensions: %m * %o * 8.
7109263508Sdim///
7110263508Sdim/// Finally delinearize provides the access functions for the array reference
7111263508Sdim/// that does correspond to A[i][j][k] of the above C testcase:
7112263508Sdim///
7113263508Sdim///  CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>]
7114263508Sdim///
7115263508Sdim/// The testcases are checking the output of a function pass:
7116263508Sdim/// DelinearizationPass that walks through all loads and stores of a function
7117263508Sdim/// asking for the SCEV of the memory access with respect to all enclosing
7118263508Sdim/// loops, calling SCEV->delinearize on that and printing the results.
7119263508Sdim
7120263508Sdimconst SCEV *
7121263508SdimSCEVAddRecExpr::delinearize(ScalarEvolution &SE,
7122263508Sdim                            SmallVectorImpl<const SCEV *> &Subscripts,
7123263508Sdim                            SmallVectorImpl<const SCEV *> &Sizes) const {
7124263508Sdim  // Early exit in case this SCEV is not an affine multivariate function.
7125263508Sdim  if (!this->isAffine())
7126263508Sdim    return this;
7127263508Sdim
7128263508Sdim  const SCEV *Start = this->getStart();
7129263508Sdim  const SCEV *Step = this->getStepRecurrence(SE);
7130263508Sdim
7131263508Sdim  // Build the SCEV representation of the cannonical induction variable in the
7132263508Sdim  // loop of this SCEV.
7133263508Sdim  const SCEV *Zero = SE.getConstant(this->getType(), 0);
7134263508Sdim  const SCEV *One = SE.getConstant(this->getType(), 1);
7135263508Sdim  const SCEV *IV =
7136263508Sdim      SE.getAddRecExpr(Zero, One, this->getLoop(), this->getNoWrapFlags());
7137263508Sdim
7138263508Sdim  DEBUG(dbgs() << "(delinearize: " << *this << "\n");
7139263508Sdim
7140263508Sdim  // Currently we fail to delinearize when the stride of this SCEV is 1. We
7141263508Sdim  // could decide to not fail in this case: we could just return 1 for the size
7142263508Sdim  // of the subscript, and this same SCEV for the access function.
7143263508Sdim  if (Step == One) {
7144263508Sdim    DEBUG(dbgs() << "failed to delinearize " << *this << "\n)\n");
7145263508Sdim    return this;
7146263508Sdim  }
7147263508Sdim
7148263508Sdim  // Find the GCD and Remainder of the Start and Step coefficients of this SCEV.
7149263508Sdim  const SCEV *Remainder = NULL;
7150263508Sdim  const SCEV *GCD = SCEVGCD::findGCD(SE, Start, Step, &Remainder);
7151263508Sdim
7152263508Sdim  DEBUG(dbgs() << "GCD: " << *GCD << "\n");
7153263508Sdim  DEBUG(dbgs() << "Remainder: " << *Remainder << "\n");
7154263508Sdim
7155263508Sdim  // Same remark as above: we currently fail the delinearization, although we
7156263508Sdim  // can very well handle this special case.
7157263508Sdim  if (GCD == One) {
7158263508Sdim    DEBUG(dbgs() << "failed to delinearize " << *this << "\n)\n");
7159263508Sdim    return this;
7160263508Sdim  }
7161263508Sdim
7162263508Sdim  // As findGCD computed Remainder, GCD divides "Start - Remainder." The
7163263508Sdim  // Quotient is then this SCEV without Remainder, scaled down by the GCD.  The
7164263508Sdim  // Quotient is what will be used in the next subscript delinearization.
7165263508Sdim  const SCEV *Quotient =
7166263508Sdim      SCEVDivision::divide(SE, SE.getMinusSCEV(Start, Remainder), GCD);
7167263508Sdim  DEBUG(dbgs() << "Quotient: " << *Quotient << "\n");
7168263508Sdim
7169263508Sdim  const SCEV *Rem;
7170263508Sdim  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Quotient))
7171263508Sdim    // Recursively call delinearize on the Quotient until there are no more
7172263508Sdim    // multiples that can be recognized.
7173263508Sdim    Rem = AR->delinearize(SE, Subscripts, Sizes);
7174263508Sdim  else
7175263508Sdim    Rem = Quotient;
7176263508Sdim
7177263508Sdim  // Scale up the cannonical induction variable IV by whatever remains from the
7178263508Sdim  // Step after division by the GCD: the GCD is the size of all the sub-array.
7179263508Sdim  if (Step != GCD) {
7180263508Sdim    Step = SCEVDivision::divide(SE, Step, GCD);
7181263508Sdim    IV = SE.getMulExpr(IV, Step);
7182263508Sdim  }
7183263508Sdim  // The access function in the current subscript is computed as the cannonical
7184263508Sdim  // induction variable IV (potentially scaled up by the step) and offset by
7185263508Sdim  // Rem, the offset of delinearization in the sub-array.
7186263508Sdim  const SCEV *Index = SE.getAddExpr(IV, Rem);
7187263508Sdim
7188263508Sdim  // Record the access function and the size of the current subscript.
7189263508Sdim  Subscripts.push_back(Index);
7190263508Sdim  Sizes.push_back(GCD);
7191263508Sdim
7192263508Sdim#ifndef NDEBUG
7193263508Sdim  int Size = Sizes.size();
7194263508Sdim  DEBUG(dbgs() << "succeeded to delinearize " << *this << "\n");
7195263508Sdim  DEBUG(dbgs() << "ArrayDecl[UnknownSize]");
7196263508Sdim  for (int i = 0; i < Size - 1; i++)
7197263508Sdim    DEBUG(dbgs() << "[" << *Sizes[i] << "]");
7198263508Sdim  DEBUG(dbgs() << " with elements of " << *Sizes[Size - 1] << " bytes.\n");
7199263508Sdim
7200263508Sdim  DEBUG(dbgs() << "ArrayRef");
7201263508Sdim  for (int i = 0; i < Size; i++)
7202263508Sdim    DEBUG(dbgs() << "[" << *Subscripts[i] << "]");
7203263508Sdim  DEBUG(dbgs() << "\n)\n");
7204263508Sdim#endif
7205263508Sdim
7206263508Sdim  return Remainder;
7207263508Sdim}
7208263508Sdim
7209193323Sed//===----------------------------------------------------------------------===//
7210193323Sed//                   SCEVCallbackVH Class Implementation
7211193323Sed//===----------------------------------------------------------------------===//
7212193323Sed
7213193323Sedvoid ScalarEvolution::SCEVCallbackVH::deleted() {
7214198090Srdivacky  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
7215193323Sed  if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
7216193323Sed    SE->ConstantEvolutionLoopExitValue.erase(PN);
7217212904Sdim  SE->ValueExprMap.erase(getValPtr());
7218193323Sed  // this now dangles!
7219193323Sed}
7220193323Sed
7221212904Sdimvoid ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
7222198090Srdivacky  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
7223193323Sed
7224193323Sed  // Forget all the expressions associated with users of the old value,
7225193323Sed  // so that future queries will recompute the expressions using the new
7226193323Sed  // value.
7227212904Sdim  Value *Old = getValPtr();
7228193323Sed  SmallVector<User *, 16> Worklist;
7229198090Srdivacky  SmallPtrSet<User *, 8> Visited;
7230193323Sed  for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
7231193323Sed       UI != UE; ++UI)
7232193323Sed    Worklist.push_back(*UI);
7233193323Sed  while (!Worklist.empty()) {
7234193323Sed    User *U = Worklist.pop_back_val();
7235193323Sed    // Deleting the Old value will cause this to dangle. Postpone
7236193323Sed    // that until everything else is done.
7237212904Sdim    if (U == Old)
7238193323Sed      continue;
7239198090Srdivacky    if (!Visited.insert(U))
7240198090Srdivacky      continue;
7241193323Sed    if (PHINode *PN = dyn_cast<PHINode>(U))
7242193323Sed      SE->ConstantEvolutionLoopExitValue.erase(PN);
7243212904Sdim    SE->ValueExprMap.erase(U);
7244198090Srdivacky    for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
7245198090Srdivacky         UI != UE; ++UI)
7246198090Srdivacky      Worklist.push_back(*UI);
7247193323Sed  }
7248212904Sdim  // Delete the Old value.
7249212904Sdim  if (PHINode *PN = dyn_cast<PHINode>(Old))
7250212904Sdim    SE->ConstantEvolutionLoopExitValue.erase(PN);
7251212904Sdim  SE->ValueExprMap.erase(Old);
7252212904Sdim  // this now dangles!
7253193323Sed}
7254193323Sed
7255193323SedScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
7256193323Sed  : CallbackVH(V), SE(se) {}
7257193323Sed
7258193323Sed//===----------------------------------------------------------------------===//
7259193323Sed//                   ScalarEvolution Class Implementation
7260193323Sed//===----------------------------------------------------------------------===//
7261193323Sed
7262193323SedScalarEvolution::ScalarEvolution()
7263263508Sdim  : FunctionPass(ID), ValuesAtScopes(64), LoopDispositions(64), BlockDispositions(64), FirstUnknown(0) {
7264218893Sdim  initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
7265193323Sed}
7266193323Sed
7267193323Sedbool ScalarEvolution::runOnFunction(Function &F) {
7268193323Sed  this->F = &F;
7269193323Sed  LI = &getAnalysis<LoopInfo>();
7270243830Sdim  TD = getAnalysisIfAvailable<DataLayout>();
7271234353Sdim  TLI = &getAnalysis<TargetLibraryInfo>();
7272202878Srdivacky  DT = &getAnalysis<DominatorTree>();
7273193323Sed  return false;
7274193323Sed}
7275193323Sed
7276193323Sedvoid ScalarEvolution::releaseMemory() {
7277212904Sdim  // Iterate through all the SCEVUnknown instances and call their
7278212904Sdim  // destructors, so that they release their references to their values.
7279212904Sdim  for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
7280212904Sdim    U->~SCEVUnknown();
7281212904Sdim  FirstUnknown = 0;
7282212904Sdim
7283212904Sdim  ValueExprMap.clear();
7284226633Sdim
7285226633Sdim  // Free any extra memory created for ExitNotTakenInfo in the unlikely event
7286226633Sdim  // that a loop had multiple computable exits.
7287226633Sdim  for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I =
7288226633Sdim         BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end();
7289226633Sdim       I != E; ++I) {
7290226633Sdim    I->second.clear();
7291226633Sdim  }
7292226633Sdim
7293239462Sdim  assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
7294239462Sdim
7295193323Sed  BackedgeTakenCounts.clear();
7296193323Sed  ConstantEvolutionLoopExitValue.clear();
7297193323Sed  ValuesAtScopes.clear();
7298218893Sdim  LoopDispositions.clear();
7299218893Sdim  BlockDispositions.clear();
7300218893Sdim  UnsignedRanges.clear();
7301218893Sdim  SignedRanges.clear();
7302195340Sed  UniqueSCEVs.clear();
7303195340Sed  SCEVAllocator.Reset();
7304193323Sed}
7305193323Sed
7306193323Sedvoid ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
7307193323Sed  AU.setPreservesAll();
7308193323Sed  AU.addRequiredTransitive<LoopInfo>();
7309202878Srdivacky  AU.addRequiredTransitive<DominatorTree>();
7310234353Sdim  AU.addRequired<TargetLibraryInfo>();
7311193323Sed}
7312193323Sed
7313193323Sedbool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
7314193323Sed  return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
7315193323Sed}
7316193323Sed
7317193323Sedstatic void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
7318193323Sed                          const Loop *L) {
7319193323Sed  // Print all inner loops first
7320193323Sed  for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
7321193323Sed    PrintLoopInfo(OS, SE, *I);
7322193323Sed
7323202375Srdivacky  OS << "Loop ";
7324202375Srdivacky  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
7325202375Srdivacky  OS << ": ";
7326193323Sed
7327201360Srdivacky  SmallVector<BasicBlock *, 8> ExitBlocks;
7328193323Sed  L->getExitBlocks(ExitBlocks);
7329193323Sed  if (ExitBlocks.size() != 1)
7330193323Sed    OS << "<multiple exits> ";
7331193323Sed
7332193323Sed  if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
7333193323Sed    OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
7334193323Sed  } else {
7335193323Sed    OS << "Unpredictable backedge-taken count. ";
7336193323Sed  }
7337193323Sed
7338202375Srdivacky  OS << "\n"
7339202375Srdivacky        "Loop ";
7340202375Srdivacky  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
7341202375Srdivacky  OS << ": ";
7342195098Sed
7343195098Sed  if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
7344195098Sed    OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
7345195098Sed  } else {
7346195098Sed    OS << "Unpredictable max backedge-taken count. ";
7347195098Sed  }
7348195098Sed
7349195098Sed  OS << "\n";
7350193323Sed}
7351193323Sed
7352201360Srdivackyvoid ScalarEvolution::print(raw_ostream &OS, const Module *) const {
7353204642Srdivacky  // ScalarEvolution's implementation of the print method is to print
7354193323Sed  // out SCEV values of all instructions that are interesting. Doing
7355193323Sed  // this potentially causes it to create new SCEV objects though,
7356193323Sed  // which technically conflicts with the const qualifier. This isn't
7357198090Srdivacky  // observable from outside the class though, so casting away the
7358198090Srdivacky  // const isn't dangerous.
7359201360Srdivacky  ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
7360193323Sed
7361202375Srdivacky  OS << "Classifying expressions for: ";
7362202375Srdivacky  WriteAsOperand(OS, F, /*PrintType=*/false);
7363202375Srdivacky  OS << "\n";
7364193323Sed  for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
7365207618Srdivacky    if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
7366198090Srdivacky      OS << *I << '\n';
7367193323Sed      OS << "  -->  ";
7368198090Srdivacky      const SCEV *SV = SE.getSCEV(&*I);
7369193323Sed      SV->print(OS);
7370193323Sed
7371194612Sed      const Loop *L = LI->getLoopFor((*I).getParent());
7372194612Sed
7373198090Srdivacky      const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
7374194612Sed      if (AtUse != SV) {
7375194612Sed        OS << "  -->  ";
7376194612Sed        AtUse->print(OS);
7377194612Sed      }
7378194612Sed
7379194612Sed      if (L) {
7380194612Sed        OS << "\t\t" "Exits: ";
7381198090Srdivacky        const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
7382218893Sdim        if (!SE.isLoopInvariant(ExitValue, L)) {
7383193323Sed          OS << "<<Unknown>>";
7384193323Sed        } else {
7385193323Sed          OS << *ExitValue;
7386193323Sed        }
7387193323Sed      }
7388193323Sed
7389193323Sed      OS << "\n";
7390193323Sed    }
7391193323Sed
7392202375Srdivacky  OS << "Determining loop execution counts for: ";
7393202375Srdivacky  WriteAsOperand(OS, F, /*PrintType=*/false);
7394202375Srdivacky  OS << "\n";
7395193323Sed  for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
7396193323Sed    PrintLoopInfo(OS, &SE, *I);
7397193323Sed}
7398193323Sed
7399218893SdimScalarEvolution::LoopDisposition
7400218893SdimScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
7401263508Sdim  SmallVector<std::pair<const Loop *, LoopDisposition>, 2> &Values = LoopDispositions[S];
7402263508Sdim  for (unsigned u = 0; u < Values.size(); u++) {
7403263508Sdim    if (Values[u].first == L)
7404263508Sdim      return Values[u].second;
7405263508Sdim  }
7406263508Sdim  Values.push_back(std::make_pair(L, LoopVariant));
7407218893Sdim  LoopDisposition D = computeLoopDisposition(S, L);
7408263508Sdim  SmallVector<std::pair<const Loop *, LoopDisposition>, 2> &Values2 = LoopDispositions[S];
7409263508Sdim  for (unsigned u = Values2.size(); u > 0; u--) {
7410263508Sdim    if (Values2[u - 1].first == L) {
7411263508Sdim      Values2[u - 1].second = D;
7412263508Sdim      break;
7413263508Sdim    }
7414263508Sdim  }
7415263508Sdim  return D;
7416218893Sdim}
7417218893Sdim
7418218893SdimScalarEvolution::LoopDisposition
7419218893SdimScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
7420218893Sdim  switch (S->getSCEVType()) {
7421218893Sdim  case scConstant:
7422218893Sdim    return LoopInvariant;
7423218893Sdim  case scTruncate:
7424218893Sdim  case scZeroExtend:
7425218893Sdim  case scSignExtend:
7426218893Sdim    return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
7427218893Sdim  case scAddRecExpr: {
7428218893Sdim    const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
7429218893Sdim
7430218893Sdim    // If L is the addrec's loop, it's computable.
7431218893Sdim    if (AR->getLoop() == L)
7432218893Sdim      return LoopComputable;
7433218893Sdim
7434218893Sdim    // Add recurrences are never invariant in the function-body (null loop).
7435218893Sdim    if (!L)
7436218893Sdim      return LoopVariant;
7437218893Sdim
7438218893Sdim    // This recurrence is variant w.r.t. L if L contains AR's loop.
7439218893Sdim    if (L->contains(AR->getLoop()))
7440218893Sdim      return LoopVariant;
7441218893Sdim
7442218893Sdim    // This recurrence is invariant w.r.t. L if AR's loop contains L.
7443218893Sdim    if (AR->getLoop()->contains(L))
7444218893Sdim      return LoopInvariant;
7445218893Sdim
7446218893Sdim    // This recurrence is variant w.r.t. L if any of its operands
7447218893Sdim    // are variant.
7448218893Sdim    for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
7449218893Sdim         I != E; ++I)
7450218893Sdim      if (!isLoopInvariant(*I, L))
7451218893Sdim        return LoopVariant;
7452218893Sdim
7453218893Sdim    // Otherwise it's loop-invariant.
7454218893Sdim    return LoopInvariant;
7455218893Sdim  }
7456218893Sdim  case scAddExpr:
7457218893Sdim  case scMulExpr:
7458218893Sdim  case scUMaxExpr:
7459218893Sdim  case scSMaxExpr: {
7460218893Sdim    const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
7461218893Sdim    bool HasVarying = false;
7462218893Sdim    for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
7463218893Sdim         I != E; ++I) {
7464218893Sdim      LoopDisposition D = getLoopDisposition(*I, L);
7465218893Sdim      if (D == LoopVariant)
7466218893Sdim        return LoopVariant;
7467218893Sdim      if (D == LoopComputable)
7468218893Sdim        HasVarying = true;
7469218893Sdim    }
7470218893Sdim    return HasVarying ? LoopComputable : LoopInvariant;
7471218893Sdim  }
7472218893Sdim  case scUDivExpr: {
7473218893Sdim    const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
7474218893Sdim    LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
7475218893Sdim    if (LD == LoopVariant)
7476218893Sdim      return LoopVariant;
7477218893Sdim    LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
7478218893Sdim    if (RD == LoopVariant)
7479218893Sdim      return LoopVariant;
7480218893Sdim    return (LD == LoopInvariant && RD == LoopInvariant) ?
7481218893Sdim           LoopInvariant : LoopComputable;
7482218893Sdim  }
7483218893Sdim  case scUnknown:
7484218893Sdim    // All non-instruction values are loop invariant.  All instructions are loop
7485218893Sdim    // invariant if they are not contained in the specified loop.
7486218893Sdim    // Instructions are never considered invariant in the function body
7487218893Sdim    // (null loop) because they are defined within the "loop".
7488218893Sdim    if (Instruction *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
7489218893Sdim      return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
7490218893Sdim    return LoopInvariant;
7491218893Sdim  case scCouldNotCompute:
7492218893Sdim    llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
7493234353Sdim  default: llvm_unreachable("Unknown SCEV kind!");
7494218893Sdim  }
7495218893Sdim}
7496218893Sdim
7497218893Sdimbool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
7498218893Sdim  return getLoopDisposition(S, L) == LoopInvariant;
7499218893Sdim}
7500218893Sdim
7501218893Sdimbool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
7502218893Sdim  return getLoopDisposition(S, L) == LoopComputable;
7503218893Sdim}
7504218893Sdim
7505218893SdimScalarEvolution::BlockDisposition
7506218893SdimScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
7507263508Sdim  SmallVector<std::pair<const BasicBlock *, BlockDisposition>, 2> &Values = BlockDispositions[S];
7508263508Sdim  for (unsigned u = 0; u < Values.size(); u++) {
7509263508Sdim    if (Values[u].first == BB)
7510263508Sdim      return Values[u].second;
7511263508Sdim  }
7512263508Sdim  Values.push_back(std::make_pair(BB, DoesNotDominateBlock));
7513218893Sdim  BlockDisposition D = computeBlockDisposition(S, BB);
7514263508Sdim  SmallVector<std::pair<const BasicBlock *, BlockDisposition>, 2> &Values2 = BlockDispositions[S];
7515263508Sdim  for (unsigned u = Values2.size(); u > 0; u--) {
7516263508Sdim    if (Values2[u - 1].first == BB) {
7517263508Sdim      Values2[u - 1].second = D;
7518263508Sdim      break;
7519263508Sdim    }
7520263508Sdim  }
7521263508Sdim  return D;
7522218893Sdim}
7523218893Sdim
7524218893SdimScalarEvolution::BlockDisposition
7525218893SdimScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
7526218893Sdim  switch (S->getSCEVType()) {
7527218893Sdim  case scConstant:
7528218893Sdim    return ProperlyDominatesBlock;
7529218893Sdim  case scTruncate:
7530218893Sdim  case scZeroExtend:
7531218893Sdim  case scSignExtend:
7532218893Sdim    return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
7533218893Sdim  case scAddRecExpr: {
7534218893Sdim    // This uses a "dominates" query instead of "properly dominates" query
7535218893Sdim    // to test for proper dominance too, because the instruction which
7536218893Sdim    // produces the addrec's value is a PHI, and a PHI effectively properly
7537218893Sdim    // dominates its entire containing block.
7538218893Sdim    const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
7539218893Sdim    if (!DT->dominates(AR->getLoop()->getHeader(), BB))
7540218893Sdim      return DoesNotDominateBlock;
7541218893Sdim  }
7542218893Sdim  // FALL THROUGH into SCEVNAryExpr handling.
7543218893Sdim  case scAddExpr:
7544218893Sdim  case scMulExpr:
7545218893Sdim  case scUMaxExpr:
7546218893Sdim  case scSMaxExpr: {
7547218893Sdim    const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
7548218893Sdim    bool Proper = true;
7549218893Sdim    for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
7550218893Sdim         I != E; ++I) {
7551218893Sdim      BlockDisposition D = getBlockDisposition(*I, BB);
7552218893Sdim      if (D == DoesNotDominateBlock)
7553218893Sdim        return DoesNotDominateBlock;
7554218893Sdim      if (D == DominatesBlock)
7555218893Sdim        Proper = false;
7556218893Sdim    }
7557218893Sdim    return Proper ? ProperlyDominatesBlock : DominatesBlock;
7558218893Sdim  }
7559218893Sdim  case scUDivExpr: {
7560218893Sdim    const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
7561218893Sdim    const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
7562218893Sdim    BlockDisposition LD = getBlockDisposition(LHS, BB);
7563218893Sdim    if (LD == DoesNotDominateBlock)
7564218893Sdim      return DoesNotDominateBlock;
7565218893Sdim    BlockDisposition RD = getBlockDisposition(RHS, BB);
7566218893Sdim    if (RD == DoesNotDominateBlock)
7567218893Sdim      return DoesNotDominateBlock;
7568218893Sdim    return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
7569218893Sdim      ProperlyDominatesBlock : DominatesBlock;
7570218893Sdim  }
7571218893Sdim  case scUnknown:
7572218893Sdim    if (Instruction *I =
7573218893Sdim          dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
7574218893Sdim      if (I->getParent() == BB)
7575218893Sdim        return DominatesBlock;
7576218893Sdim      if (DT->properlyDominates(I->getParent(), BB))
7577218893Sdim        return ProperlyDominatesBlock;
7578218893Sdim      return DoesNotDominateBlock;
7579218893Sdim    }
7580218893Sdim    return ProperlyDominatesBlock;
7581218893Sdim  case scCouldNotCompute:
7582218893Sdim    llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
7583234353Sdim  default:
7584234353Sdim    llvm_unreachable("Unknown SCEV kind!");
7585218893Sdim  }
7586218893Sdim}
7587218893Sdim
7588218893Sdimbool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
7589218893Sdim  return getBlockDisposition(S, BB) >= DominatesBlock;
7590218893Sdim}
7591218893Sdim
7592218893Sdimbool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
7593218893Sdim  return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
7594218893Sdim}
7595218893Sdim
7596239462Sdimnamespace {
7597239462Sdim// Search for a SCEV expression node within an expression tree.
7598239462Sdim// Implements SCEVTraversal::Visitor.
7599239462Sdimstruct SCEVSearch {
7600239462Sdim  const SCEV *Node;
7601239462Sdim  bool IsFound;
7602239462Sdim
7603239462Sdim  SCEVSearch(const SCEV *N): Node(N), IsFound(false) {}
7604239462Sdim
7605239462Sdim  bool follow(const SCEV *S) {
7606239462Sdim    IsFound |= (S == Node);
7607239462Sdim    return !IsFound;
7608218893Sdim  }
7609239462Sdim  bool isDone() const { return IsFound; }
7610239462Sdim};
7611218893Sdim}
7612218893Sdim
7613239462Sdimbool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
7614239462Sdim  SCEVSearch Search(Op);
7615239462Sdim  visitAll(S, Search);
7616239462Sdim  return Search.IsFound;
7617239462Sdim}
7618239462Sdim
7619218893Sdimvoid ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
7620218893Sdim  ValuesAtScopes.erase(S);
7621218893Sdim  LoopDispositions.erase(S);
7622218893Sdim  BlockDispositions.erase(S);
7623218893Sdim  UnsignedRanges.erase(S);
7624218893Sdim  SignedRanges.erase(S);
7625249423Sdim
7626249423Sdim  for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I =
7627249423Sdim         BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end(); I != E; ) {
7628249423Sdim    BackedgeTakenInfo &BEInfo = I->second;
7629249423Sdim    if (BEInfo.hasOperand(S, this)) {
7630249423Sdim      BEInfo.clear();
7631249423Sdim      BackedgeTakenCounts.erase(I++);
7632249423Sdim    }
7633249423Sdim    else
7634249423Sdim      ++I;
7635249423Sdim  }
7636218893Sdim}
7637243830Sdim
7638243830Sdimtypedef DenseMap<const Loop *, std::string> VerifyMap;
7639243830Sdim
7640243830Sdim/// replaceSubString - Replaces all occurences of From in Str with To.
7641243830Sdimstatic void replaceSubString(std::string &Str, StringRef From, StringRef To) {
7642243830Sdim  size_t Pos = 0;
7643243830Sdim  while ((Pos = Str.find(From, Pos)) != std::string::npos) {
7644243830Sdim    Str.replace(Pos, From.size(), To.data(), To.size());
7645243830Sdim    Pos += To.size();
7646243830Sdim  }
7647243830Sdim}
7648243830Sdim
7649243830Sdim/// getLoopBackedgeTakenCounts - Helper method for verifyAnalysis.
7650243830Sdimstatic void
7651243830SdimgetLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) {
7652243830Sdim  for (Loop::reverse_iterator I = L->rbegin(), E = L->rend(); I != E; ++I) {
7653243830Sdim    getLoopBackedgeTakenCounts(*I, Map, SE); // recurse.
7654243830Sdim
7655243830Sdim    std::string &S = Map[L];
7656243830Sdim    if (S.empty()) {
7657243830Sdim      raw_string_ostream OS(S);
7658243830Sdim      SE.getBackedgeTakenCount(L)->print(OS);
7659243830Sdim
7660243830Sdim      // false and 0 are semantically equivalent. This can happen in dead loops.
7661243830Sdim      replaceSubString(OS.str(), "false", "0");
7662243830Sdim      // Remove wrap flags, their use in SCEV is highly fragile.
7663243830Sdim      // FIXME: Remove this when SCEV gets smarter about them.
7664243830Sdim      replaceSubString(OS.str(), "<nw>", "");
7665243830Sdim      replaceSubString(OS.str(), "<nsw>", "");
7666243830Sdim      replaceSubString(OS.str(), "<nuw>", "");
7667243830Sdim    }
7668243830Sdim  }
7669243830Sdim}
7670243830Sdim
7671243830Sdimvoid ScalarEvolution::verifyAnalysis() const {
7672243830Sdim  if (!VerifySCEV)
7673243830Sdim    return;
7674243830Sdim
7675243830Sdim  ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
7676243830Sdim
7677243830Sdim  // Gather stringified backedge taken counts for all loops using SCEV's caches.
7678243830Sdim  // FIXME: It would be much better to store actual values instead of strings,
7679243830Sdim  //        but SCEV pointers will change if we drop the caches.
7680243830Sdim  VerifyMap BackedgeDumpsOld, BackedgeDumpsNew;
7681243830Sdim  for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
7682243830Sdim    getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE);
7683243830Sdim
7684243830Sdim  // Gather stringified backedge taken counts for all loops without using
7685243830Sdim  // SCEV's caches.
7686243830Sdim  SE.releaseMemory();
7687243830Sdim  for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I)
7688243830Sdim    getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE);
7689243830Sdim
7690243830Sdim  // Now compare whether they're the same with and without caches. This allows
7691243830Sdim  // verifying that no pass changed the cache.
7692243830Sdim  assert(BackedgeDumpsOld.size() == BackedgeDumpsNew.size() &&
7693243830Sdim         "New loops suddenly appeared!");
7694243830Sdim
7695243830Sdim  for (VerifyMap::iterator OldI = BackedgeDumpsOld.begin(),
7696243830Sdim                           OldE = BackedgeDumpsOld.end(),
7697243830Sdim                           NewI = BackedgeDumpsNew.begin();
7698243830Sdim       OldI != OldE; ++OldI, ++NewI) {
7699243830Sdim    assert(OldI->first == NewI->first && "Loop order changed!");
7700243830Sdim
7701243830Sdim    // Compare the stringified SCEVs. We don't care if undef backedgetaken count
7702243830Sdim    // changes.
7703243830Sdim    // FIXME: We currently ignore SCEV changes from/to CouldNotCompute. This
7704243830Sdim    // means that a pass is buggy or SCEV has to learn a new pattern but is
7705243830Sdim    // usually not harmful.
7706243830Sdim    if (OldI->second != NewI->second &&
7707243830Sdim        OldI->second.find("undef") == std::string::npos &&
7708243830Sdim        NewI->second.find("undef") == std::string::npos &&
7709243830Sdim        OldI->second != "***COULDNOTCOMPUTE***" &&
7710243830Sdim        NewI->second != "***COULDNOTCOMPUTE***") {
7711243830Sdim      dbgs() << "SCEVValidator: SCEV for loop '"
7712243830Sdim             << OldI->first->getHeader()->getName()
7713243830Sdim             << "' changed from '" << OldI->second
7714243830Sdim             << "' to '" << NewI->second << "'!\n";
7715243830Sdim      std::abort();
7716243830Sdim    }
7717243830Sdim  }
7718243830Sdim
7719243830Sdim  // TODO: Verify more things.
7720243830Sdim}
7721