ScalarEvolution.cpp revision 205407
1//===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the scalar evolution analysis
11// engine, which is used primarily to analyze expressions involving induction
12// variables in loops.
13//
14// There are several aspects to this library.  First is the representation of
15// scalar expressions, which are represented as subclasses of the SCEV class.
16// These classes are used to represent certain types of subexpressions that we
17// can handle. We only create one SCEV of a particular shape, so
18// pointer-comparisons for equality are legal.
19//
20// One important aspect of the SCEV objects is that they are never cyclic, even
21// if there is a cycle in the dataflow for an expression (ie, a PHI node).  If
22// the PHI node is one of the idioms that we can represent (e.g., a polynomial
23// recurrence) then we represent it directly as a recurrence node, otherwise we
24// represent it as a SCEVUnknown node.
25//
26// In addition to being able to represent expressions of various types, we also
27// have folders that are used to build the *canonical* representation for a
28// particular expression.  These folders are capable of using a variety of
29// rewrite rules to simplify the expressions.
30//
31// Once the folders are defined, we can implement the more interesting
32// higher-level code, such as the code that recognizes PHI nodes of various
33// types, computes the execution count of a loop, etc.
34//
35// TODO: We should use these routines and value representations to implement
36// dependence analysis!
37//
38//===----------------------------------------------------------------------===//
39//
40// There are several good references for the techniques used in this analysis.
41//
42//  Chains of recurrences -- a method to expedite the evaluation
43//  of closed-form functions
44//  Olaf Bachmann, Paul S. Wang, Eugene V. Zima
45//
46//  On computational properties of chains of recurrences
47//  Eugene V. Zima
48//
49//  Symbolic Evaluation of Chains of Recurrences for Loop Optimization
50//  Robert A. van Engelen
51//
52//  Efficient Symbolic Analysis for Optimizing Compilers
53//  Robert A. van Engelen
54//
55//  Using the chains of recurrences algebra for data dependence testing and
56//  induction variable substitution
57//  MS Thesis, Johnie Birch
58//
59//===----------------------------------------------------------------------===//
60
61#define DEBUG_TYPE "scalar-evolution"
62#include "llvm/Analysis/ScalarEvolutionExpressions.h"
63#include "llvm/Constants.h"
64#include "llvm/DerivedTypes.h"
65#include "llvm/GlobalVariable.h"
66#include "llvm/GlobalAlias.h"
67#include "llvm/Instructions.h"
68#include "llvm/LLVMContext.h"
69#include "llvm/Operator.h"
70#include "llvm/Analysis/ConstantFolding.h"
71#include "llvm/Analysis/Dominators.h"
72#include "llvm/Analysis/LoopInfo.h"
73#include "llvm/Analysis/ValueTracking.h"
74#include "llvm/Assembly/Writer.h"
75#include "llvm/Target/TargetData.h"
76#include "llvm/Support/CommandLine.h"
77#include "llvm/Support/ConstantRange.h"
78#include "llvm/Support/Debug.h"
79#include "llvm/Support/ErrorHandling.h"
80#include "llvm/Support/GetElementPtrTypeIterator.h"
81#include "llvm/Support/InstIterator.h"
82#include "llvm/Support/MathExtras.h"
83#include "llvm/Support/raw_ostream.h"
84#include "llvm/ADT/Statistic.h"
85#include "llvm/ADT/STLExtras.h"
86#include "llvm/ADT/SmallPtrSet.h"
87#include <algorithm>
88using namespace llvm;
89
90STATISTIC(NumArrayLenItCounts,
91          "Number of trip counts computed with array length");
92STATISTIC(NumTripCountsComputed,
93          "Number of loops with predictable loop counts");
94STATISTIC(NumTripCountsNotComputed,
95          "Number of loops without predictable loop counts");
96STATISTIC(NumBruteForceTripCountsComputed,
97          "Number of loops with trip counts computed by force");
98
99static cl::opt<unsigned>
100MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
101                        cl::desc("Maximum number of iterations SCEV will "
102                                 "symbolically execute a constant "
103                                 "derived loop"),
104                        cl::init(100));
105
106static RegisterPass<ScalarEvolution>
107R("scalar-evolution", "Scalar Evolution Analysis", false, true);
108char ScalarEvolution::ID = 0;
109
110//===----------------------------------------------------------------------===//
111//                           SCEV class definitions
112//===----------------------------------------------------------------------===//
113
114//===----------------------------------------------------------------------===//
115// Implementation of the SCEV class.
116//
117
118SCEV::~SCEV() {}
119
120void SCEV::dump() const {
121  print(dbgs());
122  dbgs() << '\n';
123}
124
125bool SCEV::isZero() const {
126  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
127    return SC->getValue()->isZero();
128  return false;
129}
130
131bool SCEV::isOne() const {
132  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
133    return SC->getValue()->isOne();
134  return false;
135}
136
137bool SCEV::isAllOnesValue() const {
138  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
139    return SC->getValue()->isAllOnesValue();
140  return false;
141}
142
143SCEVCouldNotCompute::SCEVCouldNotCompute() :
144  SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
145
146bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
147  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
148  return false;
149}
150
151const Type *SCEVCouldNotCompute::getType() const {
152  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
153  return 0;
154}
155
156bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
157  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
158  return false;
159}
160
161bool SCEVCouldNotCompute::hasOperand(const SCEV *) const {
162  llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
163  return false;
164}
165
166void SCEVCouldNotCompute::print(raw_ostream &OS) const {
167  OS << "***COULDNOTCOMPUTE***";
168}
169
170bool SCEVCouldNotCompute::classof(const SCEV *S) {
171  return S->getSCEVType() == scCouldNotCompute;
172}
173
174const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
175  FoldingSetNodeID ID;
176  ID.AddInteger(scConstant);
177  ID.AddPointer(V);
178  void *IP = 0;
179  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
180  SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
181  UniqueSCEVs.InsertNode(S, IP);
182  return S;
183}
184
185const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
186  return getConstant(ConstantInt::get(getContext(), Val));
187}
188
189const SCEV *
190ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
191  return getConstant(
192    ConstantInt::get(cast<IntegerType>(Ty), V, isSigned));
193}
194
195const Type *SCEVConstant::getType() const { return V->getType(); }
196
197void SCEVConstant::print(raw_ostream &OS) const {
198  WriteAsOperand(OS, V, false);
199}
200
201SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
202                           unsigned SCEVTy, const SCEV *op, const Type *ty)
203  : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
204
205bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
206  return Op->dominates(BB, DT);
207}
208
209bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
210  return Op->properlyDominates(BB, DT);
211}
212
213SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
214                                   const SCEV *op, const Type *ty)
215  : SCEVCastExpr(ID, scTruncate, op, ty) {
216  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
217         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
218         "Cannot truncate non-integer value!");
219}
220
221void SCEVTruncateExpr::print(raw_ostream &OS) const {
222  OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
223}
224
225SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
226                                       const SCEV *op, const Type *ty)
227  : SCEVCastExpr(ID, scZeroExtend, op, ty) {
228  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
229         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
230         "Cannot zero extend non-integer value!");
231}
232
233void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
234  OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
235}
236
237SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
238                                       const SCEV *op, const Type *ty)
239  : SCEVCastExpr(ID, scSignExtend, op, ty) {
240  assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
241         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
242         "Cannot sign extend non-integer value!");
243}
244
245void SCEVSignExtendExpr::print(raw_ostream &OS) const {
246  OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
247}
248
249void SCEVCommutativeExpr::print(raw_ostream &OS) const {
250  assert(NumOperands > 1 && "This plus expr shouldn't exist!");
251  const char *OpStr = getOperationStr();
252  OS << "(" << *Operands[0];
253  for (unsigned i = 1, e = NumOperands; i != e; ++i)
254    OS << OpStr << *Operands[i];
255  OS << ")";
256}
257
258bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
259  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
260    if (!getOperand(i)->dominates(BB, DT))
261      return false;
262  }
263  return true;
264}
265
266bool SCEVNAryExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
267  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
268    if (!getOperand(i)->properlyDominates(BB, DT))
269      return false;
270  }
271  return true;
272}
273
274bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
275  return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
276}
277
278bool SCEVUDivExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
279  return LHS->properlyDominates(BB, DT) && RHS->properlyDominates(BB, DT);
280}
281
282void SCEVUDivExpr::print(raw_ostream &OS) const {
283  OS << "(" << *LHS << " /u " << *RHS << ")";
284}
285
286const Type *SCEVUDivExpr::getType() const {
287  // In most cases the types of LHS and RHS will be the same, but in some
288  // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
289  // depend on the type for correctness, but handling types carefully can
290  // avoid extra casts in the SCEVExpander. The LHS is more likely to be
291  // a pointer type than the RHS, so use the RHS' type here.
292  return RHS->getType();
293}
294
295bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
296  // Add recurrences are never invariant in the function-body (null loop).
297  if (!QueryLoop)
298    return false;
299
300  // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L.
301  if (QueryLoop->contains(L))
302    return false;
303
304  // This recurrence is variant w.r.t. QueryLoop if any of its operands
305  // are variant.
306  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
307    if (!getOperand(i)->isLoopInvariant(QueryLoop))
308      return false;
309
310  // Otherwise it's loop-invariant.
311  return true;
312}
313
314bool
315SCEVAddRecExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
316  return DT->dominates(L->getHeader(), BB) &&
317         SCEVNAryExpr::dominates(BB, DT);
318}
319
320bool
321SCEVAddRecExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
322  // This uses a "dominates" query instead of "properly dominates" query because
323  // the instruction which produces the addrec's value is a PHI, and a PHI
324  // effectively properly dominates its entire containing block.
325  return DT->dominates(L->getHeader(), BB) &&
326         SCEVNAryExpr::properlyDominates(BB, DT);
327}
328
329void SCEVAddRecExpr::print(raw_ostream &OS) const {
330  OS << "{" << *Operands[0];
331  for (unsigned i = 1, e = NumOperands; i != e; ++i)
332    OS << ",+," << *Operands[i];
333  OS << "}<";
334  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
335  OS << ">";
336}
337
338bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
339  // All non-instruction values are loop invariant.  All instructions are loop
340  // invariant if they are not contained in the specified loop.
341  // Instructions are never considered invariant in the function body
342  // (null loop) because they are defined within the "loop".
343  if (Instruction *I = dyn_cast<Instruction>(V))
344    return L && !L->contains(I);
345  return true;
346}
347
348bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
349  if (Instruction *I = dyn_cast<Instruction>(getValue()))
350    return DT->dominates(I->getParent(), BB);
351  return true;
352}
353
354bool SCEVUnknown::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
355  if (Instruction *I = dyn_cast<Instruction>(getValue()))
356    return DT->properlyDominates(I->getParent(), BB);
357  return true;
358}
359
360const Type *SCEVUnknown::getType() const {
361  return V->getType();
362}
363
364bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
365  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
366    if (VCE->getOpcode() == Instruction::PtrToInt)
367      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
368        if (CE->getOpcode() == Instruction::GetElementPtr &&
369            CE->getOperand(0)->isNullValue() &&
370            CE->getNumOperands() == 2)
371          if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
372            if (CI->isOne()) {
373              AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
374                                 ->getElementType();
375              return true;
376            }
377
378  return false;
379}
380
381bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
382  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
383    if (VCE->getOpcode() == Instruction::PtrToInt)
384      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
385        if (CE->getOpcode() == Instruction::GetElementPtr &&
386            CE->getOperand(0)->isNullValue()) {
387          const Type *Ty =
388            cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
389          if (const StructType *STy = dyn_cast<StructType>(Ty))
390            if (!STy->isPacked() &&
391                CE->getNumOperands() == 3 &&
392                CE->getOperand(1)->isNullValue()) {
393              if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
394                if (CI->isOne() &&
395                    STy->getNumElements() == 2 &&
396                    STy->getElementType(0)->isIntegerTy(1)) {
397                  AllocTy = STy->getElementType(1);
398                  return true;
399                }
400            }
401        }
402
403  return false;
404}
405
406bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
407  if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
408    if (VCE->getOpcode() == Instruction::PtrToInt)
409      if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
410        if (CE->getOpcode() == Instruction::GetElementPtr &&
411            CE->getNumOperands() == 3 &&
412            CE->getOperand(0)->isNullValue() &&
413            CE->getOperand(1)->isNullValue()) {
414          const Type *Ty =
415            cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
416          // Ignore vector types here so that ScalarEvolutionExpander doesn't
417          // emit getelementptrs that index into vectors.
418          if (Ty->isStructTy() || Ty->isArrayTy()) {
419            CTy = Ty;
420            FieldNo = CE->getOperand(2);
421            return true;
422          }
423        }
424
425  return false;
426}
427
428void SCEVUnknown::print(raw_ostream &OS) const {
429  const Type *AllocTy;
430  if (isSizeOf(AllocTy)) {
431    OS << "sizeof(" << *AllocTy << ")";
432    return;
433  }
434  if (isAlignOf(AllocTy)) {
435    OS << "alignof(" << *AllocTy << ")";
436    return;
437  }
438
439  const Type *CTy;
440  Constant *FieldNo;
441  if (isOffsetOf(CTy, FieldNo)) {
442    OS << "offsetof(" << *CTy << ", ";
443    WriteAsOperand(OS, FieldNo, false);
444    OS << ")";
445    return;
446  }
447
448  // Otherwise just print it normally.
449  WriteAsOperand(OS, V, false);
450}
451
452//===----------------------------------------------------------------------===//
453//                               SCEV Utilities
454//===----------------------------------------------------------------------===//
455
456static bool CompareTypes(const Type *A, const Type *B) {
457  if (A->getTypeID() != B->getTypeID())
458    return A->getTypeID() < B->getTypeID();
459  if (const IntegerType *AI = dyn_cast<IntegerType>(A)) {
460    const IntegerType *BI = cast<IntegerType>(B);
461    return AI->getBitWidth() < BI->getBitWidth();
462  }
463  if (const PointerType *AI = dyn_cast<PointerType>(A)) {
464    const PointerType *BI = cast<PointerType>(B);
465    return CompareTypes(AI->getElementType(), BI->getElementType());
466  }
467  if (const ArrayType *AI = dyn_cast<ArrayType>(A)) {
468    const ArrayType *BI = cast<ArrayType>(B);
469    if (AI->getNumElements() != BI->getNumElements())
470      return AI->getNumElements() < BI->getNumElements();
471    return CompareTypes(AI->getElementType(), BI->getElementType());
472  }
473  if (const VectorType *AI = dyn_cast<VectorType>(A)) {
474    const VectorType *BI = cast<VectorType>(B);
475    if (AI->getNumElements() != BI->getNumElements())
476      return AI->getNumElements() < BI->getNumElements();
477    return CompareTypes(AI->getElementType(), BI->getElementType());
478  }
479  if (const StructType *AI = dyn_cast<StructType>(A)) {
480    const StructType *BI = cast<StructType>(B);
481    if (AI->getNumElements() != BI->getNumElements())
482      return AI->getNumElements() < BI->getNumElements();
483    for (unsigned i = 0, e = AI->getNumElements(); i != e; ++i)
484      if (CompareTypes(AI->getElementType(i), BI->getElementType(i)) ||
485          CompareTypes(BI->getElementType(i), AI->getElementType(i)))
486        return CompareTypes(AI->getElementType(i), BI->getElementType(i));
487  }
488  return false;
489}
490
491namespace {
492  /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
493  /// than the complexity of the RHS.  This comparator is used to canonicalize
494  /// expressions.
495  class SCEVComplexityCompare {
496    LoopInfo *LI;
497  public:
498    explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
499
500    bool operator()(const SCEV *LHS, const SCEV *RHS) const {
501      // Fast-path: SCEVs are uniqued so we can do a quick equality check.
502      if (LHS == RHS)
503        return false;
504
505      // Primarily, sort the SCEVs by their getSCEVType().
506      if (LHS->getSCEVType() != RHS->getSCEVType())
507        return LHS->getSCEVType() < RHS->getSCEVType();
508
509      // Aside from the getSCEVType() ordering, the particular ordering
510      // isn't very important except that it's beneficial to be consistent,
511      // so that (a + b) and (b + a) don't end up as different expressions.
512
513      // Sort SCEVUnknown values with some loose heuristics. TODO: This is
514      // not as complete as it could be.
515      if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
516        const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
517
518        // Order pointer values after integer values. This helps SCEVExpander
519        // form GEPs.
520        if (LU->getType()->isPointerTy() && !RU->getType()->isPointerTy())
521          return false;
522        if (RU->getType()->isPointerTy() && !LU->getType()->isPointerTy())
523          return true;
524
525        // Compare getValueID values.
526        if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
527          return LU->getValue()->getValueID() < RU->getValue()->getValueID();
528
529        // Sort arguments by their position.
530        if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
531          const Argument *RA = cast<Argument>(RU->getValue());
532          return LA->getArgNo() < RA->getArgNo();
533        }
534
535        // For instructions, compare their loop depth, and their opcode.
536        // This is pretty loose.
537        if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
538          Instruction *RV = cast<Instruction>(RU->getValue());
539
540          // Compare loop depths.
541          if (LI->getLoopDepth(LV->getParent()) !=
542              LI->getLoopDepth(RV->getParent()))
543            return LI->getLoopDepth(LV->getParent()) <
544                   LI->getLoopDepth(RV->getParent());
545
546          // Compare opcodes.
547          if (LV->getOpcode() != RV->getOpcode())
548            return LV->getOpcode() < RV->getOpcode();
549
550          // Compare the number of operands.
551          if (LV->getNumOperands() != RV->getNumOperands())
552            return LV->getNumOperands() < RV->getNumOperands();
553        }
554
555        return false;
556      }
557
558      // Compare constant values.
559      if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
560        const SCEVConstant *RC = cast<SCEVConstant>(RHS);
561        if (LC->getValue()->getBitWidth() != RC->getValue()->getBitWidth())
562          return LC->getValue()->getBitWidth() < RC->getValue()->getBitWidth();
563        return LC->getValue()->getValue().ult(RC->getValue()->getValue());
564      }
565
566      // Compare addrec loop depths.
567      if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
568        const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
569        if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth())
570          return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth();
571      }
572
573      // Lexicographically compare n-ary expressions.
574      if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
575        const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
576        for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) {
577          if (i >= RC->getNumOperands())
578            return false;
579          if (operator()(LC->getOperand(i), RC->getOperand(i)))
580            return true;
581          if (operator()(RC->getOperand(i), LC->getOperand(i)))
582            return false;
583        }
584        return LC->getNumOperands() < RC->getNumOperands();
585      }
586
587      // Lexicographically compare udiv expressions.
588      if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
589        const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
590        if (operator()(LC->getLHS(), RC->getLHS()))
591          return true;
592        if (operator()(RC->getLHS(), LC->getLHS()))
593          return false;
594        if (operator()(LC->getRHS(), RC->getRHS()))
595          return true;
596        if (operator()(RC->getRHS(), LC->getRHS()))
597          return false;
598        return false;
599      }
600
601      // Compare cast expressions by operand.
602      if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
603        const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
604        return operator()(LC->getOperand(), RC->getOperand());
605      }
606
607      llvm_unreachable("Unknown SCEV kind!");
608      return false;
609    }
610  };
611}
612
613/// GroupByComplexity - Given a list of SCEV objects, order them by their
614/// complexity, and group objects of the same complexity together by value.
615/// When this routine is finished, we know that any duplicates in the vector are
616/// consecutive and that complexity is monotonically increasing.
617///
618/// Note that we go take special precautions to ensure that we get deterministic
619/// results from this routine.  In other words, we don't want the results of
620/// this to depend on where the addresses of various SCEV objects happened to
621/// land in memory.
622///
623static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
624                              LoopInfo *LI) {
625  if (Ops.size() < 2) return;  // Noop
626  if (Ops.size() == 2) {
627    // This is the common case, which also happens to be trivially simple.
628    // Special case it.
629    if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
630      std::swap(Ops[0], Ops[1]);
631    return;
632  }
633
634  // Do the rough sort by complexity.
635  std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
636
637  // Now that we are sorted by complexity, group elements of the same
638  // complexity.  Note that this is, at worst, N^2, but the vector is likely to
639  // be extremely short in practice.  Note that we take this approach because we
640  // do not want to depend on the addresses of the objects we are grouping.
641  for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
642    const SCEV *S = Ops[i];
643    unsigned Complexity = S->getSCEVType();
644
645    // If there are any objects of the same complexity and same value as this
646    // one, group them.
647    for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
648      if (Ops[j] == S) { // Found a duplicate.
649        // Move it to immediately after i'th element.
650        std::swap(Ops[i+1], Ops[j]);
651        ++i;   // no need to rescan it.
652        if (i == e-2) return;  // Done!
653      }
654    }
655  }
656}
657
658
659
660//===----------------------------------------------------------------------===//
661//                      Simple SCEV method implementations
662//===----------------------------------------------------------------------===//
663
664/// BinomialCoefficient - Compute BC(It, K).  The result has width W.
665/// Assume, K > 0.
666static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
667                                       ScalarEvolution &SE,
668                                       const Type* ResultTy) {
669  // Handle the simplest case efficiently.
670  if (K == 1)
671    return SE.getTruncateOrZeroExtend(It, ResultTy);
672
673  // We are using the following formula for BC(It, K):
674  //
675  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
676  //
677  // Suppose, W is the bitwidth of the return value.  We must be prepared for
678  // overflow.  Hence, we must assure that the result of our computation is
679  // equal to the accurate one modulo 2^W.  Unfortunately, division isn't
680  // safe in modular arithmetic.
681  //
682  // However, this code doesn't use exactly that formula; the formula it uses
683  // is something like the following, where T is the number of factors of 2 in
684  // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
685  // exponentiation:
686  //
687  //   BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
688  //
689  // This formula is trivially equivalent to the previous formula.  However,
690  // this formula can be implemented much more efficiently.  The trick is that
691  // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
692  // arithmetic.  To do exact division in modular arithmetic, all we have
693  // to do is multiply by the inverse.  Therefore, this step can be done at
694  // width W.
695  //
696  // The next issue is how to safely do the division by 2^T.  The way this
697  // is done is by doing the multiplication step at a width of at least W + T
698  // bits.  This way, the bottom W+T bits of the product are accurate. Then,
699  // when we perform the division by 2^T (which is equivalent to a right shift
700  // by T), the bottom W bits are accurate.  Extra bits are okay; they'll get
701  // truncated out after the division by 2^T.
702  //
703  // In comparison to just directly using the first formula, this technique
704  // is much more efficient; using the first formula requires W * K bits,
705  // but this formula less than W + K bits. Also, the first formula requires
706  // a division step, whereas this formula only requires multiplies and shifts.
707  //
708  // It doesn't matter whether the subtraction step is done in the calculation
709  // width or the input iteration count's width; if the subtraction overflows,
710  // the result must be zero anyway.  We prefer here to do it in the width of
711  // the induction variable because it helps a lot for certain cases; CodeGen
712  // isn't smart enough to ignore the overflow, which leads to much less
713  // efficient code if the width of the subtraction is wider than the native
714  // register width.
715  //
716  // (It's possible to not widen at all by pulling out factors of 2 before
717  // the multiplication; for example, K=2 can be calculated as
718  // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
719  // extra arithmetic, so it's not an obvious win, and it gets
720  // much more complicated for K > 3.)
721
722  // Protection from insane SCEVs; this bound is conservative,
723  // but it probably doesn't matter.
724  if (K > 1000)
725    return SE.getCouldNotCompute();
726
727  unsigned W = SE.getTypeSizeInBits(ResultTy);
728
729  // Calculate K! / 2^T and T; we divide out the factors of two before
730  // multiplying for calculating K! / 2^T to avoid overflow.
731  // Other overflow doesn't matter because we only care about the bottom
732  // W bits of the result.
733  APInt OddFactorial(W, 1);
734  unsigned T = 1;
735  for (unsigned i = 3; i <= K; ++i) {
736    APInt Mult(W, i);
737    unsigned TwoFactors = Mult.countTrailingZeros();
738    T += TwoFactors;
739    Mult = Mult.lshr(TwoFactors);
740    OddFactorial *= Mult;
741  }
742
743  // We need at least W + T bits for the multiplication step
744  unsigned CalculationBits = W + T;
745
746  // Calculate 2^T, at width T+W.
747  APInt DivFactor = APInt(CalculationBits, 1).shl(T);
748
749  // Calculate the multiplicative inverse of K! / 2^T;
750  // this multiplication factor will perform the exact division by
751  // K! / 2^T.
752  APInt Mod = APInt::getSignedMinValue(W+1);
753  APInt MultiplyFactor = OddFactorial.zext(W+1);
754  MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
755  MultiplyFactor = MultiplyFactor.trunc(W);
756
757  // Calculate the product, at width T+W
758  const IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
759                                                      CalculationBits);
760  const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
761  for (unsigned i = 1; i != K; ++i) {
762    const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType()));
763    Dividend = SE.getMulExpr(Dividend,
764                             SE.getTruncateOrZeroExtend(S, CalculationTy));
765  }
766
767  // Divide by 2^T
768  const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
769
770  // Truncate the result, and divide by K! / 2^T.
771
772  return SE.getMulExpr(SE.getConstant(MultiplyFactor),
773                       SE.getTruncateOrZeroExtend(DivResult, ResultTy));
774}
775
776/// evaluateAtIteration - Return the value of this chain of recurrences at
777/// the specified iteration number.  We can evaluate this recurrence by
778/// multiplying each element in the chain by the binomial coefficient
779/// corresponding to it.  In other words, we can evaluate {A,+,B,+,C,+,D} as:
780///
781///   A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
782///
783/// where BC(It, k) stands for binomial coefficient.
784///
785const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
786                                                ScalarEvolution &SE) const {
787  const SCEV *Result = getStart();
788  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
789    // The computation is correct in the face of overflow provided that the
790    // multiplication is performed _after_ the evaluation of the binomial
791    // coefficient.
792    const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
793    if (isa<SCEVCouldNotCompute>(Coeff))
794      return Coeff;
795
796    Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
797  }
798  return Result;
799}
800
801//===----------------------------------------------------------------------===//
802//                    SCEV Expression folder implementations
803//===----------------------------------------------------------------------===//
804
805const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
806                                             const Type *Ty) {
807  assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
808         "This is not a truncating conversion!");
809  assert(isSCEVable(Ty) &&
810         "This is not a conversion to a SCEVable type!");
811  Ty = getEffectiveSCEVType(Ty);
812
813  FoldingSetNodeID ID;
814  ID.AddInteger(scTruncate);
815  ID.AddPointer(Op);
816  ID.AddPointer(Ty);
817  void *IP = 0;
818  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
819
820  // Fold if the operand is constant.
821  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
822    return getConstant(
823      cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
824
825  // trunc(trunc(x)) --> trunc(x)
826  if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
827    return getTruncateExpr(ST->getOperand(), Ty);
828
829  // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
830  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
831    return getTruncateOrSignExtend(SS->getOperand(), Ty);
832
833  // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
834  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
835    return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
836
837  // If the input value is a chrec scev, truncate the chrec's operands.
838  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
839    SmallVector<const SCEV *, 4> Operands;
840    for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
841      Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
842    return getAddRecExpr(Operands, AddRec->getLoop());
843  }
844
845  // The cast wasn't folded; create an explicit cast node.
846  // Recompute the insert position, as it may have been invalidated.
847  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
848  SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
849                                                 Op, Ty);
850  UniqueSCEVs.InsertNode(S, IP);
851  return S;
852}
853
854const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
855                                               const Type *Ty) {
856  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
857         "This is not an extending conversion!");
858  assert(isSCEVable(Ty) &&
859         "This is not a conversion to a SCEVable type!");
860  Ty = getEffectiveSCEVType(Ty);
861
862  // Fold if the operand is constant.
863  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
864    const Type *IntTy = getEffectiveSCEVType(Ty);
865    Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
866    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
867    return getConstant(cast<ConstantInt>(C));
868  }
869
870  // zext(zext(x)) --> zext(x)
871  if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
872    return getZeroExtendExpr(SZ->getOperand(), Ty);
873
874  // Before doing any expensive analysis, check to see if we've already
875  // computed a SCEV for this Op and Ty.
876  FoldingSetNodeID ID;
877  ID.AddInteger(scZeroExtend);
878  ID.AddPointer(Op);
879  ID.AddPointer(Ty);
880  void *IP = 0;
881  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
882
883  // If the input value is a chrec scev, and we can prove that the value
884  // did not overflow the old, smaller, value, we can zero extend all of the
885  // operands (often constants).  This allows analysis of something like
886  // this:  for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
887  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
888    if (AR->isAffine()) {
889      const SCEV *Start = AR->getStart();
890      const SCEV *Step = AR->getStepRecurrence(*this);
891      unsigned BitWidth = getTypeSizeInBits(AR->getType());
892      const Loop *L = AR->getLoop();
893
894      // If we have special knowledge that this addrec won't overflow,
895      // we don't need to do any further analysis.
896      if (AR->hasNoUnsignedWrap())
897        return getAddRecExpr(getZeroExtendExpr(Start, Ty),
898                             getZeroExtendExpr(Step, Ty),
899                             L);
900
901      // Check whether the backedge-taken count is SCEVCouldNotCompute.
902      // Note that this serves two purposes: It filters out loops that are
903      // simply not analyzable, and it covers the case where this code is
904      // being called from within backedge-taken count analysis, such that
905      // attempting to ask for the backedge-taken count would likely result
906      // in infinite recursion. In the later case, the analysis code will
907      // cope with a conservative value, and it will take care to purge
908      // that value once it has finished.
909      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
910      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
911        // Manually compute the final value for AR, checking for
912        // overflow.
913
914        // Check whether the backedge-taken count can be losslessly casted to
915        // the addrec's type. The count is always unsigned.
916        const SCEV *CastedMaxBECount =
917          getTruncateOrZeroExtend(MaxBECount, Start->getType());
918        const SCEV *RecastedMaxBECount =
919          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
920        if (MaxBECount == RecastedMaxBECount) {
921          const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
922          // Check whether Start+Step*MaxBECount has no unsigned overflow.
923          const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
924          const SCEV *Add = getAddExpr(Start, ZMul);
925          const SCEV *OperandExtendedAdd =
926            getAddExpr(getZeroExtendExpr(Start, WideTy),
927                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
928                                  getZeroExtendExpr(Step, WideTy)));
929          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
930            // Return the expression with the addrec on the outside.
931            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
932                                 getZeroExtendExpr(Step, Ty),
933                                 L);
934
935          // Similar to above, only this time treat the step value as signed.
936          // This covers loops that count down.
937          const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
938          Add = getAddExpr(Start, SMul);
939          OperandExtendedAdd =
940            getAddExpr(getZeroExtendExpr(Start, WideTy),
941                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
942                                  getSignExtendExpr(Step, WideTy)));
943          if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
944            // Return the expression with the addrec on the outside.
945            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
946                                 getSignExtendExpr(Step, Ty),
947                                 L);
948        }
949
950        // If the backedge is guarded by a comparison with the pre-inc value
951        // the addrec is safe. Also, if the entry is guarded by a comparison
952        // with the start value and the backedge is guarded by a comparison
953        // with the post-inc value, the addrec is safe.
954        if (isKnownPositive(Step)) {
955          const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
956                                      getUnsignedRange(Step).getUnsignedMax());
957          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
958              (isLoopGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
959               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
960                                           AR->getPostIncExpr(*this), N)))
961            // Return the expression with the addrec on the outside.
962            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
963                                 getZeroExtendExpr(Step, Ty),
964                                 L);
965        } else if (isKnownNegative(Step)) {
966          const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
967                                      getSignedRange(Step).getSignedMin());
968          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) &&
969              (isLoopGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) ||
970               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
971                                           AR->getPostIncExpr(*this), N)))
972            // Return the expression with the addrec on the outside.
973            return getAddRecExpr(getZeroExtendExpr(Start, Ty),
974                                 getSignExtendExpr(Step, Ty),
975                                 L);
976        }
977      }
978    }
979
980  // The cast wasn't folded; create an explicit cast node.
981  // Recompute the insert position, as it may have been invalidated.
982  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
983  SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
984                                                   Op, Ty);
985  UniqueSCEVs.InsertNode(S, IP);
986  return S;
987}
988
989const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
990                                               const Type *Ty) {
991  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
992         "This is not an extending conversion!");
993  assert(isSCEVable(Ty) &&
994         "This is not a conversion to a SCEVable type!");
995  Ty = getEffectiveSCEVType(Ty);
996
997  // Fold if the operand is constant.
998  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
999    const Type *IntTy = getEffectiveSCEVType(Ty);
1000    Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
1001    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
1002    return getConstant(cast<ConstantInt>(C));
1003  }
1004
1005  // sext(sext(x)) --> sext(x)
1006  if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1007    return getSignExtendExpr(SS->getOperand(), Ty);
1008
1009  // Before doing any expensive analysis, check to see if we've already
1010  // computed a SCEV for this Op and Ty.
1011  FoldingSetNodeID ID;
1012  ID.AddInteger(scSignExtend);
1013  ID.AddPointer(Op);
1014  ID.AddPointer(Ty);
1015  void *IP = 0;
1016  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1017
1018  // If the input value is a chrec scev, and we can prove that the value
1019  // did not overflow the old, smaller, value, we can sign extend all of the
1020  // operands (often constants).  This allows analysis of something like
1021  // this:  for (signed char X = 0; X < 100; ++X) { int Y = X; }
1022  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1023    if (AR->isAffine()) {
1024      const SCEV *Start = AR->getStart();
1025      const SCEV *Step = AR->getStepRecurrence(*this);
1026      unsigned BitWidth = getTypeSizeInBits(AR->getType());
1027      const Loop *L = AR->getLoop();
1028
1029      // If we have special knowledge that this addrec won't overflow,
1030      // we don't need to do any further analysis.
1031      if (AR->hasNoSignedWrap())
1032        return getAddRecExpr(getSignExtendExpr(Start, Ty),
1033                             getSignExtendExpr(Step, Ty),
1034                             L);
1035
1036      // Check whether the backedge-taken count is SCEVCouldNotCompute.
1037      // Note that this serves two purposes: It filters out loops that are
1038      // simply not analyzable, and it covers the case where this code is
1039      // being called from within backedge-taken count analysis, such that
1040      // attempting to ask for the backedge-taken count would likely result
1041      // in infinite recursion. In the later case, the analysis code will
1042      // cope with a conservative value, and it will take care to purge
1043      // that value once it has finished.
1044      const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
1045      if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1046        // Manually compute the final value for AR, checking for
1047        // overflow.
1048
1049        // Check whether the backedge-taken count can be losslessly casted to
1050        // the addrec's type. The count is always unsigned.
1051        const SCEV *CastedMaxBECount =
1052          getTruncateOrZeroExtend(MaxBECount, Start->getType());
1053        const SCEV *RecastedMaxBECount =
1054          getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
1055        if (MaxBECount == RecastedMaxBECount) {
1056          const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1057          // Check whether Start+Step*MaxBECount has no signed overflow.
1058          const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
1059          const SCEV *Add = getAddExpr(Start, SMul);
1060          const SCEV *OperandExtendedAdd =
1061            getAddExpr(getSignExtendExpr(Start, WideTy),
1062                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
1063                                  getSignExtendExpr(Step, WideTy)));
1064          if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
1065            // Return the expression with the addrec on the outside.
1066            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1067                                 getSignExtendExpr(Step, Ty),
1068                                 L);
1069
1070          // Similar to above, only this time treat the step value as unsigned.
1071          // This covers loops that count up with an unsigned step.
1072          const SCEV *UMul = getMulExpr(CastedMaxBECount, Step);
1073          Add = getAddExpr(Start, UMul);
1074          OperandExtendedAdd =
1075            getAddExpr(getSignExtendExpr(Start, WideTy),
1076                       getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
1077                                  getZeroExtendExpr(Step, WideTy)));
1078          if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
1079            // Return the expression with the addrec on the outside.
1080            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1081                                 getZeroExtendExpr(Step, Ty),
1082                                 L);
1083        }
1084
1085        // If the backedge is guarded by a comparison with the pre-inc value
1086        // the addrec is safe. Also, if the entry is guarded by a comparison
1087        // with the start value and the backedge is guarded by a comparison
1088        // with the post-inc value, the addrec is safe.
1089        if (isKnownPositive(Step)) {
1090          const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) -
1091                                      getSignedRange(Step).getSignedMax());
1092          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) ||
1093              (isLoopGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
1094               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT,
1095                                           AR->getPostIncExpr(*this), N)))
1096            // Return the expression with the addrec on the outside.
1097            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1098                                 getSignExtendExpr(Step, Ty),
1099                                 L);
1100        } else if (isKnownNegative(Step)) {
1101          const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
1102                                      getSignedRange(Step).getSignedMin());
1103          if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) ||
1104              (isLoopGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
1105               isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT,
1106                                           AR->getPostIncExpr(*this), N)))
1107            // Return the expression with the addrec on the outside.
1108            return getAddRecExpr(getSignExtendExpr(Start, Ty),
1109                                 getSignExtendExpr(Step, Ty),
1110                                 L);
1111        }
1112      }
1113    }
1114
1115  // The cast wasn't folded; create an explicit cast node.
1116  // Recompute the insert position, as it may have been invalidated.
1117  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1118  SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1119                                                   Op, Ty);
1120  UniqueSCEVs.InsertNode(S, IP);
1121  return S;
1122}
1123
1124/// getAnyExtendExpr - Return a SCEV for the given operand extended with
1125/// unspecified bits out to the given type.
1126///
1127const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
1128                                              const Type *Ty) {
1129  assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1130         "This is not an extending conversion!");
1131  assert(isSCEVable(Ty) &&
1132         "This is not a conversion to a SCEVable type!");
1133  Ty = getEffectiveSCEVType(Ty);
1134
1135  // Sign-extend negative constants.
1136  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1137    if (SC->getValue()->getValue().isNegative())
1138      return getSignExtendExpr(Op, Ty);
1139
1140  // Peel off a truncate cast.
1141  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
1142    const SCEV *NewOp = T->getOperand();
1143    if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
1144      return getAnyExtendExpr(NewOp, Ty);
1145    return getTruncateOrNoop(NewOp, Ty);
1146  }
1147
1148  // Next try a zext cast. If the cast is folded, use it.
1149  const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
1150  if (!isa<SCEVZeroExtendExpr>(ZExt))
1151    return ZExt;
1152
1153  // Next try a sext cast. If the cast is folded, use it.
1154  const SCEV *SExt = getSignExtendExpr(Op, Ty);
1155  if (!isa<SCEVSignExtendExpr>(SExt))
1156    return SExt;
1157
1158  // Force the cast to be folded into the operands of an addrec.
1159  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
1160    SmallVector<const SCEV *, 4> Ops;
1161    for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
1162         I != E; ++I)
1163      Ops.push_back(getAnyExtendExpr(*I, Ty));
1164    return getAddRecExpr(Ops, AR->getLoop());
1165  }
1166
1167  // If the expression is obviously signed, use the sext cast value.
1168  if (isa<SCEVSMaxExpr>(Op))
1169    return SExt;
1170
1171  // Absent any other information, use the zext cast value.
1172  return ZExt;
1173}
1174
1175/// CollectAddOperandsWithScales - Process the given Ops list, which is
1176/// a list of operands to be added under the given scale, update the given
1177/// map. This is a helper function for getAddRecExpr. As an example of
1178/// what it does, given a sequence of operands that would form an add
1179/// expression like this:
1180///
1181///    m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1182///
1183/// where A and B are constants, update the map with these values:
1184///
1185///    (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1186///
1187/// and add 13 + A*B*29 to AccumulatedConstant.
1188/// This will allow getAddRecExpr to produce this:
1189///
1190///    13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1191///
1192/// This form often exposes folding opportunities that are hidden in
1193/// the original operand list.
1194///
1195/// Return true iff it appears that any interesting folding opportunities
1196/// may be exposed. This helps getAddRecExpr short-circuit extra work in
1197/// the common case where no interesting opportunities are present, and
1198/// is also used as a check to avoid infinite recursion.
1199///
1200static bool
1201CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1202                             SmallVector<const SCEV *, 8> &NewOps,
1203                             APInt &AccumulatedConstant,
1204                             const SCEV *const *Ops, size_t NumOperands,
1205                             const APInt &Scale,
1206                             ScalarEvolution &SE) {
1207  bool Interesting = false;
1208
1209  // Iterate over the add operands.
1210  for (unsigned i = 0, e = NumOperands; i != e; ++i) {
1211    const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1212    if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1213      APInt NewScale =
1214        Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1215      if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1216        // A multiplication of a constant with another add; recurse.
1217        const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
1218        Interesting |=
1219          CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1220                                       Add->op_begin(), Add->getNumOperands(),
1221                                       NewScale, SE);
1222      } else {
1223        // A multiplication of a constant with some other value. Update
1224        // the map.
1225        SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1226        const SCEV *Key = SE.getMulExpr(MulOps);
1227        std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1228          M.insert(std::make_pair(Key, NewScale));
1229        if (Pair.second) {
1230          NewOps.push_back(Pair.first->first);
1231        } else {
1232          Pair.first->second += NewScale;
1233          // The map already had an entry for this value, which may indicate
1234          // a folding opportunity.
1235          Interesting = true;
1236        }
1237      }
1238    } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1239      // Pull a buried constant out to the outside.
1240      if (Scale != 1 || AccumulatedConstant != 0 || C->isZero())
1241        Interesting = true;
1242      AccumulatedConstant += Scale * C->getValue()->getValue();
1243    } else {
1244      // An ordinary operand. Update the map.
1245      std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1246        M.insert(std::make_pair(Ops[i], Scale));
1247      if (Pair.second) {
1248        NewOps.push_back(Pair.first->first);
1249      } else {
1250        Pair.first->second += Scale;
1251        // The map already had an entry for this value, which may indicate
1252        // a folding opportunity.
1253        Interesting = true;
1254      }
1255    }
1256  }
1257
1258  return Interesting;
1259}
1260
1261namespace {
1262  struct APIntCompare {
1263    bool operator()(const APInt &LHS, const APInt &RHS) const {
1264      return LHS.ult(RHS);
1265    }
1266  };
1267}
1268
1269/// getAddExpr - Get a canonical add expression, or something simpler if
1270/// possible.
1271const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
1272                                        bool HasNUW, bool HasNSW) {
1273  assert(!Ops.empty() && "Cannot get empty add!");
1274  if (Ops.size() == 1) return Ops[0];
1275#ifndef NDEBUG
1276  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1277    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1278           getEffectiveSCEVType(Ops[0]->getType()) &&
1279           "SCEVAddExpr operand types don't match!");
1280#endif
1281
1282  // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1283  if (!HasNUW && HasNSW) {
1284    bool All = true;
1285    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1286      if (!isKnownNonNegative(Ops[i])) {
1287        All = false;
1288        break;
1289      }
1290    if (All) HasNUW = true;
1291  }
1292
1293  // Sort by complexity, this groups all similar expression types together.
1294  GroupByComplexity(Ops, LI);
1295
1296  // If there are any constants, fold them together.
1297  unsigned Idx = 0;
1298  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1299    ++Idx;
1300    assert(Idx < Ops.size());
1301    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1302      // We found two constants, fold them together!
1303      Ops[0] = getConstant(LHSC->getValue()->getValue() +
1304                           RHSC->getValue()->getValue());
1305      if (Ops.size() == 2) return Ops[0];
1306      Ops.erase(Ops.begin()+1);  // Erase the folded element
1307      LHSC = cast<SCEVConstant>(Ops[0]);
1308    }
1309
1310    // If we are left with a constant zero being added, strip it off.
1311    if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1312      Ops.erase(Ops.begin());
1313      --Idx;
1314    }
1315  }
1316
1317  if (Ops.size() == 1) return Ops[0];
1318
1319  // Okay, check to see if the same value occurs in the operand list twice.  If
1320  // so, merge them together into an multiply expression.  Since we sorted the
1321  // list, these values are required to be adjacent.
1322  const Type *Ty = Ops[0]->getType();
1323  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1324    if (Ops[i] == Ops[i+1]) {      //  X + Y + Y  -->  X + Y*2
1325      // Found a match, merge the two values into a multiply, and add any
1326      // remaining values to the result.
1327      const SCEV *Two = getIntegerSCEV(2, Ty);
1328      const SCEV *Mul = getMulExpr(Ops[i], Two);
1329      if (Ops.size() == 2)
1330        return Mul;
1331      Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
1332      Ops.push_back(Mul);
1333      return getAddExpr(Ops, HasNUW, HasNSW);
1334    }
1335
1336  // Check for truncates. If all the operands are truncated from the same
1337  // type, see if factoring out the truncate would permit the result to be
1338  // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1339  // if the contents of the resulting outer trunc fold to something simple.
1340  for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1341    const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1342    const Type *DstType = Trunc->getType();
1343    const Type *SrcType = Trunc->getOperand()->getType();
1344    SmallVector<const SCEV *, 8> LargeOps;
1345    bool Ok = true;
1346    // Check all the operands to see if they can be represented in the
1347    // source type of the truncate.
1348    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1349      if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1350        if (T->getOperand()->getType() != SrcType) {
1351          Ok = false;
1352          break;
1353        }
1354        LargeOps.push_back(T->getOperand());
1355      } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1356        // This could be either sign or zero extension, but sign extension
1357        // is much more likely to be foldable here.
1358        LargeOps.push_back(getSignExtendExpr(C, SrcType));
1359      } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1360        SmallVector<const SCEV *, 8> LargeMulOps;
1361        for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1362          if (const SCEVTruncateExpr *T =
1363                dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1364            if (T->getOperand()->getType() != SrcType) {
1365              Ok = false;
1366              break;
1367            }
1368            LargeMulOps.push_back(T->getOperand());
1369          } else if (const SCEVConstant *C =
1370                       dyn_cast<SCEVConstant>(M->getOperand(j))) {
1371            // This could be either sign or zero extension, but sign extension
1372            // is much more likely to be foldable here.
1373            LargeMulOps.push_back(getSignExtendExpr(C, SrcType));
1374          } else {
1375            Ok = false;
1376            break;
1377          }
1378        }
1379        if (Ok)
1380          LargeOps.push_back(getMulExpr(LargeMulOps));
1381      } else {
1382        Ok = false;
1383        break;
1384      }
1385    }
1386    if (Ok) {
1387      // Evaluate the expression in the larger type.
1388      const SCEV *Fold = getAddExpr(LargeOps, HasNUW, HasNSW);
1389      // If it folds to something simple, use it. Otherwise, don't.
1390      if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1391        return getTruncateExpr(Fold, DstType);
1392    }
1393  }
1394
1395  // Skip past any other cast SCEVs.
1396  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1397    ++Idx;
1398
1399  // If there are add operands they would be next.
1400  if (Idx < Ops.size()) {
1401    bool DeletedAdd = false;
1402    while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1403      // If we have an add, expand the add operands onto the end of the operands
1404      // list.
1405      Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
1406      Ops.erase(Ops.begin()+Idx);
1407      DeletedAdd = true;
1408    }
1409
1410    // If we deleted at least one add, we added operands to the end of the list,
1411    // and they are not necessarily sorted.  Recurse to resort and resimplify
1412    // any operands we just acquired.
1413    if (DeletedAdd)
1414      return getAddExpr(Ops);
1415  }
1416
1417  // Skip over the add expression until we get to a multiply.
1418  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1419    ++Idx;
1420
1421  // Check to see if there are any folding opportunities present with
1422  // operands multiplied by constant values.
1423  if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1424    uint64_t BitWidth = getTypeSizeInBits(Ty);
1425    DenseMap<const SCEV *, APInt> M;
1426    SmallVector<const SCEV *, 8> NewOps;
1427    APInt AccumulatedConstant(BitWidth, 0);
1428    if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1429                                     Ops.data(), Ops.size(),
1430                                     APInt(BitWidth, 1), *this)) {
1431      // Some interesting folding opportunity is present, so its worthwhile to
1432      // re-generate the operands list. Group the operands by constant scale,
1433      // to avoid multiplying by the same constant scale multiple times.
1434      std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1435      for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(),
1436           E = NewOps.end(); I != E; ++I)
1437        MulOpLists[M.find(*I)->second].push_back(*I);
1438      // Re-generate the operands list.
1439      Ops.clear();
1440      if (AccumulatedConstant != 0)
1441        Ops.push_back(getConstant(AccumulatedConstant));
1442      for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1443           I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1444        if (I->first != 0)
1445          Ops.push_back(getMulExpr(getConstant(I->first),
1446                                   getAddExpr(I->second)));
1447      if (Ops.empty())
1448        return getIntegerSCEV(0, Ty);
1449      if (Ops.size() == 1)
1450        return Ops[0];
1451      return getAddExpr(Ops);
1452    }
1453  }
1454
1455  // If we are adding something to a multiply expression, make sure the
1456  // something is not already an operand of the multiply.  If so, merge it into
1457  // the multiply.
1458  for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1459    const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1460    for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1461      const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1462      for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1463        if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) {
1464          // Fold W + X + (X * Y * Z)  -->  W + (X * ((Y*Z)+1))
1465          const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1466          if (Mul->getNumOperands() != 2) {
1467            // If the multiply has more than two operands, we must get the
1468            // Y*Z term.
1469            SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end());
1470            MulOps.erase(MulOps.begin()+MulOp);
1471            InnerMul = getMulExpr(MulOps);
1472          }
1473          const SCEV *One = getIntegerSCEV(1, Ty);
1474          const SCEV *AddOne = getAddExpr(InnerMul, One);
1475          const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]);
1476          if (Ops.size() == 2) return OuterMul;
1477          if (AddOp < Idx) {
1478            Ops.erase(Ops.begin()+AddOp);
1479            Ops.erase(Ops.begin()+Idx-1);
1480          } else {
1481            Ops.erase(Ops.begin()+Idx);
1482            Ops.erase(Ops.begin()+AddOp-1);
1483          }
1484          Ops.push_back(OuterMul);
1485          return getAddExpr(Ops);
1486        }
1487
1488      // Check this multiply against other multiplies being added together.
1489      for (unsigned OtherMulIdx = Idx+1;
1490           OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1491           ++OtherMulIdx) {
1492        const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1493        // If MulOp occurs in OtherMul, we can fold the two multiplies
1494        // together.
1495        for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1496             OMulOp != e; ++OMulOp)
1497          if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1498            // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1499            const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1500            if (Mul->getNumOperands() != 2) {
1501              SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1502                                                  Mul->op_end());
1503              MulOps.erase(MulOps.begin()+MulOp);
1504              InnerMul1 = getMulExpr(MulOps);
1505            }
1506            const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1507            if (OtherMul->getNumOperands() != 2) {
1508              SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1509                                                  OtherMul->op_end());
1510              MulOps.erase(MulOps.begin()+OMulOp);
1511              InnerMul2 = getMulExpr(MulOps);
1512            }
1513            const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1514            const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1515            if (Ops.size() == 2) return OuterMul;
1516            Ops.erase(Ops.begin()+Idx);
1517            Ops.erase(Ops.begin()+OtherMulIdx-1);
1518            Ops.push_back(OuterMul);
1519            return getAddExpr(Ops);
1520          }
1521      }
1522    }
1523  }
1524
1525  // If there are any add recurrences in the operands list, see if any other
1526  // added values are loop invariant.  If so, we can fold them into the
1527  // recurrence.
1528  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1529    ++Idx;
1530
1531  // Scan over all recurrences, trying to fold loop invariants into them.
1532  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1533    // Scan all of the other operands to this add and add them to the vector if
1534    // they are loop invariant w.r.t. the recurrence.
1535    SmallVector<const SCEV *, 8> LIOps;
1536    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1537    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1538      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1539        LIOps.push_back(Ops[i]);
1540        Ops.erase(Ops.begin()+i);
1541        --i; --e;
1542      }
1543
1544    // If we found some loop invariants, fold them into the recurrence.
1545    if (!LIOps.empty()) {
1546      //  NLI + LI + {Start,+,Step}  -->  NLI + {LI+Start,+,Step}
1547      LIOps.push_back(AddRec->getStart());
1548
1549      SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1550                                             AddRec->op_end());
1551      AddRecOps[0] = getAddExpr(LIOps);
1552
1553      // It's tempting to propagate NUW/NSW flags here, but nuw/nsw addition
1554      // is not associative so this isn't necessarily safe.
1555      const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop());
1556
1557      // If all of the other operands were loop invariant, we are done.
1558      if (Ops.size() == 1) return NewRec;
1559
1560      // Otherwise, add the folded AddRec by the non-liv parts.
1561      for (unsigned i = 0;; ++i)
1562        if (Ops[i] == AddRec) {
1563          Ops[i] = NewRec;
1564          break;
1565        }
1566      return getAddExpr(Ops);
1567    }
1568
1569    // Okay, if there weren't any loop invariants to be folded, check to see if
1570    // there are multiple AddRec's with the same loop induction variable being
1571    // added together.  If so, we can fold them.
1572    for (unsigned OtherIdx = Idx+1;
1573         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1574      if (OtherIdx != Idx) {
1575        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1576        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1577          // Other + {A,+,B} + {C,+,D}  -->  Other + {A+C,+,B+D}
1578          SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
1579                                              AddRec->op_end());
1580          for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
1581            if (i >= NewOps.size()) {
1582              NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i,
1583                            OtherAddRec->op_end());
1584              break;
1585            }
1586            NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
1587          }
1588          const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop());
1589
1590          if (Ops.size() == 2) return NewAddRec;
1591
1592          Ops.erase(Ops.begin()+Idx);
1593          Ops.erase(Ops.begin()+OtherIdx-1);
1594          Ops.push_back(NewAddRec);
1595          return getAddExpr(Ops);
1596        }
1597      }
1598
1599    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1600    // next one.
1601  }
1602
1603  // Okay, it looks like we really DO need an add expr.  Check to see if we
1604  // already have one, otherwise create a new one.
1605  FoldingSetNodeID ID;
1606  ID.AddInteger(scAddExpr);
1607  ID.AddInteger(Ops.size());
1608  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1609    ID.AddPointer(Ops[i]);
1610  void *IP = 0;
1611  SCEVAddExpr *S =
1612    static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1613  if (!S) {
1614    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
1615    std::uninitialized_copy(Ops.begin(), Ops.end(), O);
1616    S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
1617                                        O, Ops.size());
1618    UniqueSCEVs.InsertNode(S, IP);
1619  }
1620  if (HasNUW) S->setHasNoUnsignedWrap(true);
1621  if (HasNSW) S->setHasNoSignedWrap(true);
1622  return S;
1623}
1624
1625/// getMulExpr - Get a canonical multiply expression, or something simpler if
1626/// possible.
1627const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
1628                                        bool HasNUW, bool HasNSW) {
1629  assert(!Ops.empty() && "Cannot get empty mul!");
1630  if (Ops.size() == 1) return Ops[0];
1631#ifndef NDEBUG
1632  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1633    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1634           getEffectiveSCEVType(Ops[0]->getType()) &&
1635           "SCEVMulExpr operand types don't match!");
1636#endif
1637
1638  // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1639  if (!HasNUW && HasNSW) {
1640    bool All = true;
1641    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1642      if (!isKnownNonNegative(Ops[i])) {
1643        All = false;
1644        break;
1645      }
1646    if (All) HasNUW = true;
1647  }
1648
1649  // Sort by complexity, this groups all similar expression types together.
1650  GroupByComplexity(Ops, LI);
1651
1652  // If there are any constants, fold them together.
1653  unsigned Idx = 0;
1654  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1655
1656    // C1*(C2+V) -> C1*C2 + C1*V
1657    if (Ops.size() == 2)
1658      if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1659        if (Add->getNumOperands() == 2 &&
1660            isa<SCEVConstant>(Add->getOperand(0)))
1661          return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1662                            getMulExpr(LHSC, Add->getOperand(1)));
1663
1664    ++Idx;
1665    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1666      // We found two constants, fold them together!
1667      ConstantInt *Fold = ConstantInt::get(getContext(),
1668                                           LHSC->getValue()->getValue() *
1669                                           RHSC->getValue()->getValue());
1670      Ops[0] = getConstant(Fold);
1671      Ops.erase(Ops.begin()+1);  // Erase the folded element
1672      if (Ops.size() == 1) return Ops[0];
1673      LHSC = cast<SCEVConstant>(Ops[0]);
1674    }
1675
1676    // If we are left with a constant one being multiplied, strip it off.
1677    if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1678      Ops.erase(Ops.begin());
1679      --Idx;
1680    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1681      // If we have a multiply of zero, it will always be zero.
1682      return Ops[0];
1683    } else if (Ops[0]->isAllOnesValue()) {
1684      // If we have a mul by -1 of an add, try distributing the -1 among the
1685      // add operands.
1686      if (Ops.size() == 2)
1687        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
1688          SmallVector<const SCEV *, 4> NewOps;
1689          bool AnyFolded = false;
1690          for (SCEVAddRecExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
1691               I != E; ++I) {
1692            const SCEV *Mul = getMulExpr(Ops[0], *I);
1693            if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
1694            NewOps.push_back(Mul);
1695          }
1696          if (AnyFolded)
1697            return getAddExpr(NewOps);
1698        }
1699    }
1700  }
1701
1702  // Skip over the add expression until we get to a multiply.
1703  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1704    ++Idx;
1705
1706  if (Ops.size() == 1)
1707    return Ops[0];
1708
1709  // If there are mul operands inline them all into this expression.
1710  if (Idx < Ops.size()) {
1711    bool DeletedMul = false;
1712    while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1713      // If we have an mul, expand the mul operands onto the end of the operands
1714      // list.
1715      Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end());
1716      Ops.erase(Ops.begin()+Idx);
1717      DeletedMul = true;
1718    }
1719
1720    // If we deleted at least one mul, we added operands to the end of the list,
1721    // and they are not necessarily sorted.  Recurse to resort and resimplify
1722    // any operands we just acquired.
1723    if (DeletedMul)
1724      return getMulExpr(Ops);
1725  }
1726
1727  // If there are any add recurrences in the operands list, see if any other
1728  // added values are loop invariant.  If so, we can fold them into the
1729  // recurrence.
1730  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1731    ++Idx;
1732
1733  // Scan over all recurrences, trying to fold loop invariants into them.
1734  for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1735    // Scan all of the other operands to this mul and add them to the vector if
1736    // they are loop invariant w.r.t. the recurrence.
1737    SmallVector<const SCEV *, 8> LIOps;
1738    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1739    for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1740      if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1741        LIOps.push_back(Ops[i]);
1742        Ops.erase(Ops.begin()+i);
1743        --i; --e;
1744      }
1745
1746    // If we found some loop invariants, fold them into the recurrence.
1747    if (!LIOps.empty()) {
1748      //  NLI * LI * {Start,+,Step}  -->  NLI * {LI*Start,+,LI*Step}
1749      SmallVector<const SCEV *, 4> NewOps;
1750      NewOps.reserve(AddRec->getNumOperands());
1751      if (LIOps.size() == 1) {
1752        const SCEV *Scale = LIOps[0];
1753        for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1754          NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1755      } else {
1756        for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
1757          SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end());
1758          MulOps.push_back(AddRec->getOperand(i));
1759          NewOps.push_back(getMulExpr(MulOps));
1760        }
1761      }
1762
1763      // It's tempting to propagate the NSW flag here, but nsw multiplication
1764      // is not associative so this isn't necessarily safe.
1765      const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(),
1766                                         HasNUW && AddRec->hasNoUnsignedWrap(),
1767                                         /*HasNSW=*/false);
1768
1769      // If all of the other operands were loop invariant, we are done.
1770      if (Ops.size() == 1) return NewRec;
1771
1772      // Otherwise, multiply the folded AddRec by the non-liv parts.
1773      for (unsigned i = 0;; ++i)
1774        if (Ops[i] == AddRec) {
1775          Ops[i] = NewRec;
1776          break;
1777        }
1778      return getMulExpr(Ops);
1779    }
1780
1781    // Okay, if there weren't any loop invariants to be folded, check to see if
1782    // there are multiple AddRec's with the same loop induction variable being
1783    // multiplied together.  If so, we can fold them.
1784    for (unsigned OtherIdx = Idx+1;
1785         OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1786      if (OtherIdx != Idx) {
1787        const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1788        if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1789          // F * G  -->  {A,+,B} * {C,+,D}  -->  {A*C,+,F*D + G*B + B*D}
1790          const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
1791          const SCEV *NewStart = getMulExpr(F->getStart(),
1792                                                 G->getStart());
1793          const SCEV *B = F->getStepRecurrence(*this);
1794          const SCEV *D = G->getStepRecurrence(*this);
1795          const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
1796                                          getMulExpr(G, B),
1797                                          getMulExpr(B, D));
1798          const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
1799                                               F->getLoop());
1800          if (Ops.size() == 2) return NewAddRec;
1801
1802          Ops.erase(Ops.begin()+Idx);
1803          Ops.erase(Ops.begin()+OtherIdx-1);
1804          Ops.push_back(NewAddRec);
1805          return getMulExpr(Ops);
1806        }
1807      }
1808
1809    // Otherwise couldn't fold anything into this recurrence.  Move onto the
1810    // next one.
1811  }
1812
1813  // Okay, it looks like we really DO need an mul expr.  Check to see if we
1814  // already have one, otherwise create a new one.
1815  FoldingSetNodeID ID;
1816  ID.AddInteger(scMulExpr);
1817  ID.AddInteger(Ops.size());
1818  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1819    ID.AddPointer(Ops[i]);
1820  void *IP = 0;
1821  SCEVMulExpr *S =
1822    static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1823  if (!S) {
1824    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
1825    std::uninitialized_copy(Ops.begin(), Ops.end(), O);
1826    S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
1827                                        O, Ops.size());
1828    UniqueSCEVs.InsertNode(S, IP);
1829  }
1830  if (HasNUW) S->setHasNoUnsignedWrap(true);
1831  if (HasNSW) S->setHasNoSignedWrap(true);
1832  return S;
1833}
1834
1835/// getUDivExpr - Get a canonical unsigned division expression, or something
1836/// simpler if possible.
1837const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
1838                                         const SCEV *RHS) {
1839  assert(getEffectiveSCEVType(LHS->getType()) ==
1840         getEffectiveSCEVType(RHS->getType()) &&
1841         "SCEVUDivExpr operand types don't match!");
1842
1843  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
1844    if (RHSC->getValue()->equalsInt(1))
1845      return LHS;                               // X udiv 1 --> x
1846    if (RHSC->isZero())
1847      return getIntegerSCEV(0, LHS->getType()); // value is undefined
1848
1849    // Determine if the division can be folded into the operands of
1850    // its operands.
1851    // TODO: Generalize this to non-constants by using known-bits information.
1852    const Type *Ty = LHS->getType();
1853    unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1854    unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
1855    // For non-power-of-two values, effectively round the value up to the
1856    // nearest power of two.
1857    if (!RHSC->getValue()->getValue().isPowerOf2())
1858      ++MaxShiftAmt;
1859    const IntegerType *ExtTy =
1860      IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
1861    // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1862    if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
1863      if (const SCEVConstant *Step =
1864            dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
1865        if (!Step->getValue()->getValue()
1866              .urem(RHSC->getValue()->getValue()) &&
1867            getZeroExtendExpr(AR, ExtTy) ==
1868            getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1869                          getZeroExtendExpr(Step, ExtTy),
1870                          AR->getLoop())) {
1871          SmallVector<const SCEV *, 4> Operands;
1872          for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1873            Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1874          return getAddRecExpr(Operands, AR->getLoop());
1875        }
1876    // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1877    if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
1878      SmallVector<const SCEV *, 4> Operands;
1879      for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1880        Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1881      if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1882        // Find an operand that's safely divisible.
1883        for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1884          const SCEV *Op = M->getOperand(i);
1885          const SCEV *Div = getUDivExpr(Op, RHSC);
1886          if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
1887            Operands = SmallVector<const SCEV *, 4>(M->op_begin(), M->op_end());
1888            Operands[i] = Div;
1889            return getMulExpr(Operands);
1890          }
1891        }
1892    }
1893    // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1894    if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
1895      SmallVector<const SCEV *, 4> Operands;
1896      for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1897        Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1898      if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1899        Operands.clear();
1900        for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1901          const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
1902          if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i))
1903            break;
1904          Operands.push_back(Op);
1905        }
1906        if (Operands.size() == A->getNumOperands())
1907          return getAddExpr(Operands);
1908      }
1909    }
1910
1911    // Fold if both operands are constant.
1912    if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
1913      Constant *LHSCV = LHSC->getValue();
1914      Constant *RHSCV = RHSC->getValue();
1915      return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
1916                                                                 RHSCV)));
1917    }
1918  }
1919
1920  FoldingSetNodeID ID;
1921  ID.AddInteger(scUDivExpr);
1922  ID.AddPointer(LHS);
1923  ID.AddPointer(RHS);
1924  void *IP = 0;
1925  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1926  SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
1927                                             LHS, RHS);
1928  UniqueSCEVs.InsertNode(S, IP);
1929  return S;
1930}
1931
1932
1933/// getAddRecExpr - Get an add recurrence expression for the specified loop.
1934/// Simplify the expression as much as possible.
1935const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
1936                                           const SCEV *Step, const Loop *L,
1937                                           bool HasNUW, bool HasNSW) {
1938  SmallVector<const SCEV *, 4> Operands;
1939  Operands.push_back(Start);
1940  if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
1941    if (StepChrec->getLoop() == L) {
1942      Operands.insert(Operands.end(), StepChrec->op_begin(),
1943                      StepChrec->op_end());
1944      return getAddRecExpr(Operands, L);
1945    }
1946
1947  Operands.push_back(Step);
1948  return getAddRecExpr(Operands, L, HasNUW, HasNSW);
1949}
1950
1951/// getAddRecExpr - Get an add recurrence expression for the specified loop.
1952/// Simplify the expression as much as possible.
1953const SCEV *
1954ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
1955                               const Loop *L,
1956                               bool HasNUW, bool HasNSW) {
1957  if (Operands.size() == 1) return Operands[0];
1958#ifndef NDEBUG
1959  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
1960    assert(getEffectiveSCEVType(Operands[i]->getType()) ==
1961           getEffectiveSCEVType(Operands[0]->getType()) &&
1962           "SCEVAddRecExpr operand types don't match!");
1963#endif
1964
1965  if (Operands.back()->isZero()) {
1966    Operands.pop_back();
1967    return getAddRecExpr(Operands, L, HasNUW, HasNSW); // {X,+,0}  -->  X
1968  }
1969
1970  // It's tempting to want to call getMaxBackedgeTakenCount count here and
1971  // use that information to infer NUW and NSW flags. However, computing a
1972  // BE count requires calling getAddRecExpr, so we may not yet have a
1973  // meaningful BE count at this point (and if we don't, we'd be stuck
1974  // with a SCEVCouldNotCompute as the cached BE count).
1975
1976  // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1977  if (!HasNUW && HasNSW) {
1978    bool All = true;
1979    for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1980      if (!isKnownNonNegative(Operands[i])) {
1981        All = false;
1982        break;
1983      }
1984    if (All) HasNUW = true;
1985  }
1986
1987  // Canonicalize nested AddRecs in by nesting them in order of loop depth.
1988  if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
1989    const Loop *NestedLoop = NestedAR->getLoop();
1990    if (L->contains(NestedLoop->getHeader()) ?
1991        (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
1992        (!NestedLoop->contains(L->getHeader()) &&
1993         DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
1994      SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
1995                                                  NestedAR->op_end());
1996      Operands[0] = NestedAR->getStart();
1997      // AddRecs require their operands be loop-invariant with respect to their
1998      // loops. Don't perform this transformation if it would break this
1999      // requirement.
2000      bool AllInvariant = true;
2001      for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2002        if (!Operands[i]->isLoopInvariant(L)) {
2003          AllInvariant = false;
2004          break;
2005        }
2006      if (AllInvariant) {
2007        NestedOperands[0] = getAddRecExpr(Operands, L);
2008        AllInvariant = true;
2009        for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
2010          if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) {
2011            AllInvariant = false;
2012            break;
2013          }
2014        if (AllInvariant)
2015          // Ok, both add recurrences are valid after the transformation.
2016          return getAddRecExpr(NestedOperands, NestedLoop, HasNUW, HasNSW);
2017      }
2018      // Reset Operands to its original state.
2019      Operands[0] = NestedAR;
2020    }
2021  }
2022
2023  // Okay, it looks like we really DO need an addrec expr.  Check to see if we
2024  // already have one, otherwise create a new one.
2025  FoldingSetNodeID ID;
2026  ID.AddInteger(scAddRecExpr);
2027  ID.AddInteger(Operands.size());
2028  for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2029    ID.AddPointer(Operands[i]);
2030  ID.AddPointer(L);
2031  void *IP = 0;
2032  SCEVAddRecExpr *S =
2033    static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2034  if (!S) {
2035    const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
2036    std::uninitialized_copy(Operands.begin(), Operands.end(), O);
2037    S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
2038                                           O, Operands.size(), L);
2039    UniqueSCEVs.InsertNode(S, IP);
2040  }
2041  if (HasNUW) S->setHasNoUnsignedWrap(true);
2042  if (HasNSW) S->setHasNoSignedWrap(true);
2043  return S;
2044}
2045
2046const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
2047                                         const SCEV *RHS) {
2048  SmallVector<const SCEV *, 2> Ops;
2049  Ops.push_back(LHS);
2050  Ops.push_back(RHS);
2051  return getSMaxExpr(Ops);
2052}
2053
2054const SCEV *
2055ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2056  assert(!Ops.empty() && "Cannot get empty smax!");
2057  if (Ops.size() == 1) return Ops[0];
2058#ifndef NDEBUG
2059  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2060    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
2061           getEffectiveSCEVType(Ops[0]->getType()) &&
2062           "SCEVSMaxExpr operand types don't match!");
2063#endif
2064
2065  // Sort by complexity, this groups all similar expression types together.
2066  GroupByComplexity(Ops, LI);
2067
2068  // If there are any constants, fold them together.
2069  unsigned Idx = 0;
2070  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2071    ++Idx;
2072    assert(Idx < Ops.size());
2073    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2074      // We found two constants, fold them together!
2075      ConstantInt *Fold = ConstantInt::get(getContext(),
2076                              APIntOps::smax(LHSC->getValue()->getValue(),
2077                                             RHSC->getValue()->getValue()));
2078      Ops[0] = getConstant(Fold);
2079      Ops.erase(Ops.begin()+1);  // Erase the folded element
2080      if (Ops.size() == 1) return Ops[0];
2081      LHSC = cast<SCEVConstant>(Ops[0]);
2082    }
2083
2084    // If we are left with a constant minimum-int, strip it off.
2085    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
2086      Ops.erase(Ops.begin());
2087      --Idx;
2088    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
2089      // If we have an smax with a constant maximum-int, it will always be
2090      // maximum-int.
2091      return Ops[0];
2092    }
2093  }
2094
2095  if (Ops.size() == 1) return Ops[0];
2096
2097  // Find the first SMax
2098  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
2099    ++Idx;
2100
2101  // Check to see if one of the operands is an SMax. If so, expand its operands
2102  // onto our operand list, and recurse to simplify.
2103  if (Idx < Ops.size()) {
2104    bool DeletedSMax = false;
2105    while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
2106      Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end());
2107      Ops.erase(Ops.begin()+Idx);
2108      DeletedSMax = true;
2109    }
2110
2111    if (DeletedSMax)
2112      return getSMaxExpr(Ops);
2113  }
2114
2115  // Okay, check to see if the same value occurs in the operand list twice.  If
2116  // so, delete one.  Since we sorted the list, these values are required to
2117  // be adjacent.
2118  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2119    if (Ops[i] == Ops[i+1]) {      //  X smax Y smax Y  -->  X smax Y
2120      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2121      --i; --e;
2122    }
2123
2124  if (Ops.size() == 1) return Ops[0];
2125
2126  assert(!Ops.empty() && "Reduced smax down to nothing!");
2127
2128  // Okay, it looks like we really DO need an smax expr.  Check to see if we
2129  // already have one, otherwise create a new one.
2130  FoldingSetNodeID ID;
2131  ID.AddInteger(scSMaxExpr);
2132  ID.AddInteger(Ops.size());
2133  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2134    ID.AddPointer(Ops[i]);
2135  void *IP = 0;
2136  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2137  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2138  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2139  SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
2140                                             O, Ops.size());
2141  UniqueSCEVs.InsertNode(S, IP);
2142  return S;
2143}
2144
2145const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
2146                                         const SCEV *RHS) {
2147  SmallVector<const SCEV *, 2> Ops;
2148  Ops.push_back(LHS);
2149  Ops.push_back(RHS);
2150  return getUMaxExpr(Ops);
2151}
2152
2153const SCEV *
2154ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2155  assert(!Ops.empty() && "Cannot get empty umax!");
2156  if (Ops.size() == 1) return Ops[0];
2157#ifndef NDEBUG
2158  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2159    assert(getEffectiveSCEVType(Ops[i]->getType()) ==
2160           getEffectiveSCEVType(Ops[0]->getType()) &&
2161           "SCEVUMaxExpr operand types don't match!");
2162#endif
2163
2164  // Sort by complexity, this groups all similar expression types together.
2165  GroupByComplexity(Ops, LI);
2166
2167  // If there are any constants, fold them together.
2168  unsigned Idx = 0;
2169  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2170    ++Idx;
2171    assert(Idx < Ops.size());
2172    while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2173      // We found two constants, fold them together!
2174      ConstantInt *Fold = ConstantInt::get(getContext(),
2175                              APIntOps::umax(LHSC->getValue()->getValue(),
2176                                             RHSC->getValue()->getValue()));
2177      Ops[0] = getConstant(Fold);
2178      Ops.erase(Ops.begin()+1);  // Erase the folded element
2179      if (Ops.size() == 1) return Ops[0];
2180      LHSC = cast<SCEVConstant>(Ops[0]);
2181    }
2182
2183    // If we are left with a constant minimum-int, strip it off.
2184    if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
2185      Ops.erase(Ops.begin());
2186      --Idx;
2187    } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
2188      // If we have an umax with a constant maximum-int, it will always be
2189      // maximum-int.
2190      return Ops[0];
2191    }
2192  }
2193
2194  if (Ops.size() == 1) return Ops[0];
2195
2196  // Find the first UMax
2197  while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
2198    ++Idx;
2199
2200  // Check to see if one of the operands is a UMax. If so, expand its operands
2201  // onto our operand list, and recurse to simplify.
2202  if (Idx < Ops.size()) {
2203    bool DeletedUMax = false;
2204    while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
2205      Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end());
2206      Ops.erase(Ops.begin()+Idx);
2207      DeletedUMax = true;
2208    }
2209
2210    if (DeletedUMax)
2211      return getUMaxExpr(Ops);
2212  }
2213
2214  // Okay, check to see if the same value occurs in the operand list twice.  If
2215  // so, delete one.  Since we sorted the list, these values are required to
2216  // be adjacent.
2217  for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2218    if (Ops[i] == Ops[i+1]) {      //  X umax Y umax Y  -->  X umax Y
2219      Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2220      --i; --e;
2221    }
2222
2223  if (Ops.size() == 1) return Ops[0];
2224
2225  assert(!Ops.empty() && "Reduced umax down to nothing!");
2226
2227  // Okay, it looks like we really DO need a umax expr.  Check to see if we
2228  // already have one, otherwise create a new one.
2229  FoldingSetNodeID ID;
2230  ID.AddInteger(scUMaxExpr);
2231  ID.AddInteger(Ops.size());
2232  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2233    ID.AddPointer(Ops[i]);
2234  void *IP = 0;
2235  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2236  const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2237  std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2238  SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
2239                                             O, Ops.size());
2240  UniqueSCEVs.InsertNode(S, IP);
2241  return S;
2242}
2243
2244const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
2245                                         const SCEV *RHS) {
2246  // ~smax(~x, ~y) == smin(x, y).
2247  return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2248}
2249
2250const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
2251                                         const SCEV *RHS) {
2252  // ~umax(~x, ~y) == umin(x, y)
2253  return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2254}
2255
2256const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
2257  Constant *C = ConstantExpr::getSizeOf(AllocTy);
2258  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2259    C = ConstantFoldConstantExpression(CE, TD);
2260  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
2261  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2262}
2263
2264const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) {
2265  Constant *C = ConstantExpr::getAlignOf(AllocTy);
2266  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2267    C = ConstantFoldConstantExpression(CE, TD);
2268  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
2269  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2270}
2271
2272const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
2273                                             unsigned FieldNo) {
2274  Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
2275  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2276    C = ConstantFoldConstantExpression(CE, TD);
2277  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
2278  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2279}
2280
2281const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy,
2282                                             Constant *FieldNo) {
2283  Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
2284  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2285    C = ConstantFoldConstantExpression(CE, TD);
2286  const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
2287  return getTruncateOrZeroExtend(getSCEV(C), Ty);
2288}
2289
2290const SCEV *ScalarEvolution::getUnknown(Value *V) {
2291  // Don't attempt to do anything other than create a SCEVUnknown object
2292  // here.  createSCEV only calls getUnknown after checking for all other
2293  // interesting possibilities, and any other code that calls getUnknown
2294  // is doing so in order to hide a value from SCEV canonicalization.
2295
2296  FoldingSetNodeID ID;
2297  ID.AddInteger(scUnknown);
2298  ID.AddPointer(V);
2299  void *IP = 0;
2300  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2301  SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V);
2302  UniqueSCEVs.InsertNode(S, IP);
2303  return S;
2304}
2305
2306//===----------------------------------------------------------------------===//
2307//            Basic SCEV Analysis and PHI Idiom Recognition Code
2308//
2309
2310/// isSCEVable - Test if values of the given type are analyzable within
2311/// the SCEV framework. This primarily includes integer types, and it
2312/// can optionally include pointer types if the ScalarEvolution class
2313/// has access to target-specific information.
2314bool ScalarEvolution::isSCEVable(const Type *Ty) const {
2315  // Integers and pointers are always SCEVable.
2316  return Ty->isIntegerTy() || Ty->isPointerTy();
2317}
2318
2319/// getTypeSizeInBits - Return the size in bits of the specified type,
2320/// for which isSCEVable must return true.
2321uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
2322  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2323
2324  // If we have a TargetData, use it!
2325  if (TD)
2326    return TD->getTypeSizeInBits(Ty);
2327
2328  // Integer types have fixed sizes.
2329  if (Ty->isIntegerTy())
2330    return Ty->getPrimitiveSizeInBits();
2331
2332  // The only other support type is pointer. Without TargetData, conservatively
2333  // assume pointers are 64-bit.
2334  assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
2335  return 64;
2336}
2337
2338/// getEffectiveSCEVType - Return a type with the same bitwidth as
2339/// the given type and which represents how SCEV will treat the given
2340/// type, for which isSCEVable must return true. For pointer types,
2341/// this is the pointer-sized integer type.
2342const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
2343  assert(isSCEVable(Ty) && "Type is not SCEVable!");
2344
2345  if (Ty->isIntegerTy())
2346    return Ty;
2347
2348  // The only other support type is pointer.
2349  assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
2350  if (TD) return TD->getIntPtrType(getContext());
2351
2352  // Without TargetData, conservatively assume pointers are 64-bit.
2353  return Type::getInt64Ty(getContext());
2354}
2355
2356const SCEV *ScalarEvolution::getCouldNotCompute() {
2357  return &CouldNotCompute;
2358}
2359
2360/// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2361/// expression and create a new one.
2362const SCEV *ScalarEvolution::getSCEV(Value *V) {
2363  assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2364
2365  std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V);
2366  if (I != Scalars.end()) return I->second;
2367  const SCEV *S = createSCEV(V);
2368  Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2369  return S;
2370}
2371
2372/// getIntegerSCEV - Given a SCEVable type, create a constant for the
2373/// specified signed integer value and return a SCEV for the constant.
2374const SCEV *ScalarEvolution::getIntegerSCEV(int64_t Val, const Type *Ty) {
2375  const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
2376  return getConstant(ConstantInt::get(ITy, Val));
2377}
2378
2379/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2380///
2381const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
2382  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2383    return getConstant(
2384               cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
2385
2386  const Type *Ty = V->getType();
2387  Ty = getEffectiveSCEVType(Ty);
2388  return getMulExpr(V,
2389                  getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
2390}
2391
2392/// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2393const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
2394  if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2395    return getConstant(
2396                cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
2397
2398  const Type *Ty = V->getType();
2399  Ty = getEffectiveSCEVType(Ty);
2400  const SCEV *AllOnes =
2401                   getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
2402  return getMinusSCEV(AllOnes, V);
2403}
2404
2405/// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
2406///
2407const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
2408                                          const SCEV *RHS) {
2409  // X - Y --> X + -Y
2410  return getAddExpr(LHS, getNegativeSCEV(RHS));
2411}
2412
2413/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2414/// input value to the specified type.  If the type must be extended, it is zero
2415/// extended.
2416const SCEV *
2417ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
2418                                         const Type *Ty) {
2419  const Type *SrcTy = V->getType();
2420  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2421         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2422         "Cannot truncate or zero extend with non-integer arguments!");
2423  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2424    return V;  // No conversion
2425  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2426    return getTruncateExpr(V, Ty);
2427  return getZeroExtendExpr(V, Ty);
2428}
2429
2430/// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2431/// input value to the specified type.  If the type must be extended, it is sign
2432/// extended.
2433const SCEV *
2434ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
2435                                         const Type *Ty) {
2436  const Type *SrcTy = V->getType();
2437  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2438         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2439         "Cannot truncate or zero extend with non-integer arguments!");
2440  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2441    return V;  // No conversion
2442  if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2443    return getTruncateExpr(V, Ty);
2444  return getSignExtendExpr(V, Ty);
2445}
2446
2447/// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2448/// input value to the specified type.  If the type must be extended, it is zero
2449/// extended.  The conversion must not be narrowing.
2450const SCEV *
2451ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
2452  const Type *SrcTy = V->getType();
2453  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2454         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2455         "Cannot noop or zero extend with non-integer arguments!");
2456  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2457         "getNoopOrZeroExtend cannot truncate!");
2458  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2459    return V;  // No conversion
2460  return getZeroExtendExpr(V, Ty);
2461}
2462
2463/// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2464/// input value to the specified type.  If the type must be extended, it is sign
2465/// extended.  The conversion must not be narrowing.
2466const SCEV *
2467ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
2468  const Type *SrcTy = V->getType();
2469  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2470         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2471         "Cannot noop or sign extend with non-integer arguments!");
2472  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2473         "getNoopOrSignExtend cannot truncate!");
2474  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2475    return V;  // No conversion
2476  return getSignExtendExpr(V, Ty);
2477}
2478
2479/// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2480/// the input value to the specified type. If the type must be extended,
2481/// it is extended with unspecified bits. The conversion must not be
2482/// narrowing.
2483const SCEV *
2484ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
2485  const Type *SrcTy = V->getType();
2486  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2487         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2488         "Cannot noop or any extend with non-integer arguments!");
2489  assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2490         "getNoopOrAnyExtend cannot truncate!");
2491  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2492    return V;  // No conversion
2493  return getAnyExtendExpr(V, Ty);
2494}
2495
2496/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2497/// input value to the specified type.  The conversion must not be widening.
2498const SCEV *
2499ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
2500  const Type *SrcTy = V->getType();
2501  assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2502         (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2503         "Cannot truncate or noop with non-integer arguments!");
2504  assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2505         "getTruncateOrNoop cannot extend!");
2506  if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2507    return V;  // No conversion
2508  return getTruncateExpr(V, Ty);
2509}
2510
2511/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2512/// the types using zero-extension, and then perform a umax operation
2513/// with them.
2514const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2515                                                        const SCEV *RHS) {
2516  const SCEV *PromotedLHS = LHS;
2517  const SCEV *PromotedRHS = RHS;
2518
2519  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2520    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2521  else
2522    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2523
2524  return getUMaxExpr(PromotedLHS, PromotedRHS);
2525}
2526
2527/// getUMinFromMismatchedTypes - Promote the operands to the wider of
2528/// the types using zero-extension, and then perform a umin operation
2529/// with them.
2530const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2531                                                        const SCEV *RHS) {
2532  const SCEV *PromotedLHS = LHS;
2533  const SCEV *PromotedRHS = RHS;
2534
2535  if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2536    PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2537  else
2538    PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2539
2540  return getUMinExpr(PromotedLHS, PromotedRHS);
2541}
2542
2543/// PushDefUseChildren - Push users of the given Instruction
2544/// onto the given Worklist.
2545static void
2546PushDefUseChildren(Instruction *I,
2547                   SmallVectorImpl<Instruction *> &Worklist) {
2548  // Push the def-use children onto the Worklist stack.
2549  for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
2550       UI != UE; ++UI)
2551    Worklist.push_back(cast<Instruction>(UI));
2552}
2553
2554/// ForgetSymbolicValue - This looks up computed SCEV values for all
2555/// instructions that depend on the given instruction and removes them from
2556/// the Scalars map if they reference SymName. This is used during PHI
2557/// resolution.
2558void
2559ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
2560  SmallVector<Instruction *, 16> Worklist;
2561  PushDefUseChildren(PN, Worklist);
2562
2563  SmallPtrSet<Instruction *, 8> Visited;
2564  Visited.insert(PN);
2565  while (!Worklist.empty()) {
2566    Instruction *I = Worklist.pop_back_val();
2567    if (!Visited.insert(I)) continue;
2568
2569    std::map<SCEVCallbackVH, const SCEV *>::iterator It =
2570      Scalars.find(static_cast<Value *>(I));
2571    if (It != Scalars.end()) {
2572      // Short-circuit the def-use traversal if the symbolic name
2573      // ceases to appear in expressions.
2574      if (It->second != SymName && !It->second->hasOperand(SymName))
2575        continue;
2576
2577      // SCEVUnknown for a PHI either means that it has an unrecognized
2578      // structure, it's a PHI that's in the progress of being computed
2579      // by createNodeForPHI, or it's a single-value PHI. In the first case,
2580      // additional loop trip count information isn't going to change anything.
2581      // In the second case, createNodeForPHI will perform the necessary
2582      // updates on its own when it gets to that point. In the third, we do
2583      // want to forget the SCEVUnknown.
2584      if (!isa<PHINode>(I) ||
2585          !isa<SCEVUnknown>(It->second) ||
2586          (I != PN && It->second == SymName)) {
2587        ValuesAtScopes.erase(It->second);
2588        Scalars.erase(It);
2589      }
2590    }
2591
2592    PushDefUseChildren(I, Worklist);
2593  }
2594}
2595
2596/// createNodeForPHI - PHI nodes have two cases.  Either the PHI node exists in
2597/// a loop header, making it a potential recurrence, or it doesn't.
2598///
2599const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
2600  if (PN->getNumIncomingValues() == 2)  // The loops have been canonicalized.
2601    if (const Loop *L = LI->getLoopFor(PN->getParent()))
2602      if (L->getHeader() == PN->getParent()) {
2603        // If it lives in the loop header, it has two incoming values, one
2604        // from outside the loop, and one from inside.
2605        unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
2606        unsigned BackEdge     = IncomingEdge^1;
2607
2608        // While we are analyzing this PHI node, handle its value symbolically.
2609        const SCEV *SymbolicName = getUnknown(PN);
2610        assert(Scalars.find(PN) == Scalars.end() &&
2611               "PHI node already processed?");
2612        Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
2613
2614        // Using this symbolic name for the PHI, analyze the value coming around
2615        // the back-edge.
2616        Value *BEValueV = PN->getIncomingValue(BackEdge);
2617        const SCEV *BEValue = getSCEV(BEValueV);
2618
2619        // NOTE: If BEValue is loop invariant, we know that the PHI node just
2620        // has a special value for the first iteration of the loop.
2621
2622        // If the value coming around the backedge is an add with the symbolic
2623        // value we just inserted, then we found a simple induction variable!
2624        if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
2625          // If there is a single occurrence of the symbolic value, replace it
2626          // with a recurrence.
2627          unsigned FoundIndex = Add->getNumOperands();
2628          for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2629            if (Add->getOperand(i) == SymbolicName)
2630              if (FoundIndex == e) {
2631                FoundIndex = i;
2632                break;
2633              }
2634
2635          if (FoundIndex != Add->getNumOperands()) {
2636            // Create an add with everything but the specified operand.
2637            SmallVector<const SCEV *, 8> Ops;
2638            for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2639              if (i != FoundIndex)
2640                Ops.push_back(Add->getOperand(i));
2641            const SCEV *Accum = getAddExpr(Ops);
2642
2643            // This is not a valid addrec if the step amount is varying each
2644            // loop iteration, but is not itself an addrec in this loop.
2645            if (Accum->isLoopInvariant(L) ||
2646                (isa<SCEVAddRecExpr>(Accum) &&
2647                 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
2648              bool HasNUW = false;
2649              bool HasNSW = false;
2650
2651              // If the increment doesn't overflow, then neither the addrec nor
2652              // the post-increment will overflow.
2653              if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
2654                if (OBO->hasNoUnsignedWrap())
2655                  HasNUW = true;
2656                if (OBO->hasNoSignedWrap())
2657                  HasNSW = true;
2658              }
2659
2660              const SCEV *StartVal =
2661                getSCEV(PN->getIncomingValue(IncomingEdge));
2662              const SCEV *PHISCEV =
2663                getAddRecExpr(StartVal, Accum, L, HasNUW, HasNSW);
2664
2665              // Since the no-wrap flags are on the increment, they apply to the
2666              // post-incremented value as well.
2667              if (Accum->isLoopInvariant(L))
2668                (void)getAddRecExpr(getAddExpr(StartVal, Accum),
2669                                    Accum, L, HasNUW, HasNSW);
2670
2671              // Okay, for the entire analysis of this edge we assumed the PHI
2672              // to be symbolic.  We now need to go back and purge all of the
2673              // entries for the scalars that use the symbolic expression.
2674              ForgetSymbolicName(PN, SymbolicName);
2675              Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
2676              return PHISCEV;
2677            }
2678          }
2679        } else if (const SCEVAddRecExpr *AddRec =
2680                     dyn_cast<SCEVAddRecExpr>(BEValue)) {
2681          // Otherwise, this could be a loop like this:
2682          //     i = 0;  for (j = 1; ..; ++j) { ....  i = j; }
2683          // In this case, j = {1,+,1}  and BEValue is j.
2684          // Because the other in-value of i (0) fits the evolution of BEValue
2685          // i really is an addrec evolution.
2686          if (AddRec->getLoop() == L && AddRec->isAffine()) {
2687            const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge));
2688
2689            // If StartVal = j.start - j.stride, we can use StartVal as the
2690            // initial step of the addrec evolution.
2691            if (StartVal == getMinusSCEV(AddRec->getOperand(0),
2692                                            AddRec->getOperand(1))) {
2693              const SCEV *PHISCEV =
2694                 getAddRecExpr(StartVal, AddRec->getOperand(1), L);
2695
2696              // Okay, for the entire analysis of this edge we assumed the PHI
2697              // to be symbolic.  We now need to go back and purge all of the
2698              // entries for the scalars that use the symbolic expression.
2699              ForgetSymbolicName(PN, SymbolicName);
2700              Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
2701              return PHISCEV;
2702            }
2703          }
2704        }
2705
2706        return SymbolicName;
2707      }
2708
2709  // If the PHI has a single incoming value, follow that value, unless the
2710  // PHI's incoming blocks are in a different loop, in which case doing so
2711  // risks breaking LCSSA form. Instcombine would normally zap these, but
2712  // it doesn't have DominatorTree information, so it may miss cases.
2713  if (Value *V = PN->hasConstantValue(DT)) {
2714    bool AllSameLoop = true;
2715    Loop *PNLoop = LI->getLoopFor(PN->getParent());
2716    for (size_t i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
2717      if (LI->getLoopFor(PN->getIncomingBlock(i)) != PNLoop) {
2718        AllSameLoop = false;
2719        break;
2720      }
2721    if (AllSameLoop)
2722      return getSCEV(V);
2723  }
2724
2725  // If it's not a loop phi, we can't handle it yet.
2726  return getUnknown(PN);
2727}
2728
2729/// createNodeForGEP - Expand GEP instructions into add and multiply
2730/// operations. This allows them to be analyzed by regular SCEV code.
2731///
2732const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
2733
2734  bool InBounds = GEP->isInBounds();
2735  const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
2736  Value *Base = GEP->getOperand(0);
2737  // Don't attempt to analyze GEPs over unsized objects.
2738  if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
2739    return getUnknown(GEP);
2740  const SCEV *TotalOffset = getIntegerSCEV(0, IntPtrTy);
2741  gep_type_iterator GTI = gep_type_begin(GEP);
2742  for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
2743                                      E = GEP->op_end();
2744       I != E; ++I) {
2745    Value *Index = *I;
2746    // Compute the (potentially symbolic) offset in bytes for this index.
2747    if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
2748      // For a struct, add the member offset.
2749      unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
2750      TotalOffset = getAddExpr(TotalOffset,
2751                               getOffsetOfExpr(STy, FieldNo),
2752                               /*HasNUW=*/false, /*HasNSW=*/InBounds);
2753    } else {
2754      // For an array, add the element offset, explicitly scaled.
2755      const SCEV *LocalOffset = getSCEV(Index);
2756      // Getelementptr indices are signed.
2757      LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
2758      // Lower "inbounds" GEPs to NSW arithmetic.
2759      LocalOffset = getMulExpr(LocalOffset, getSizeOfExpr(*GTI),
2760                               /*HasNUW=*/false, /*HasNSW=*/InBounds);
2761      TotalOffset = getAddExpr(TotalOffset, LocalOffset,
2762                               /*HasNUW=*/false, /*HasNSW=*/InBounds);
2763    }
2764  }
2765  return getAddExpr(getSCEV(Base), TotalOffset,
2766                    /*HasNUW=*/false, /*HasNSW=*/InBounds);
2767}
2768
2769/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2770/// guaranteed to end in (at every loop iteration).  It is, at the same time,
2771/// the minimum number of times S is divisible by 2.  For example, given {4,+,8}
2772/// it returns 2.  If S is guaranteed to be 0, it returns the bitwidth of S.
2773uint32_t
2774ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
2775  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2776    return C->getValue()->getValue().countTrailingZeros();
2777
2778  if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
2779    return std::min(GetMinTrailingZeros(T->getOperand()),
2780                    (uint32_t)getTypeSizeInBits(T->getType()));
2781
2782  if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
2783    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2784    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2785             getTypeSizeInBits(E->getType()) : OpRes;
2786  }
2787
2788  if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
2789    uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2790    return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2791             getTypeSizeInBits(E->getType()) : OpRes;
2792  }
2793
2794  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2795    // The result is the min of all operands results.
2796    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2797    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2798      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2799    return MinOpRes;
2800  }
2801
2802  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
2803    // The result is the sum of all operands results.
2804    uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
2805    uint32_t BitWidth = getTypeSizeInBits(M->getType());
2806    for (unsigned i = 1, e = M->getNumOperands();
2807         SumOpRes != BitWidth && i != e; ++i)
2808      SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
2809                          BitWidth);
2810    return SumOpRes;
2811  }
2812
2813  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
2814    // The result is the min of all operands results.
2815    uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2816    for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2817      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2818    return MinOpRes;
2819  }
2820
2821  if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
2822    // The result is the min of all operands results.
2823    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2824    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2825      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2826    return MinOpRes;
2827  }
2828
2829  if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
2830    // The result is the min of all operands results.
2831    uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2832    for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2833      MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2834    return MinOpRes;
2835  }
2836
2837  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2838    // For a SCEVUnknown, ask ValueTracking.
2839    unsigned BitWidth = getTypeSizeInBits(U->getType());
2840    APInt Mask = APInt::getAllOnesValue(BitWidth);
2841    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2842    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
2843    return Zeros.countTrailingOnes();
2844  }
2845
2846  // SCEVUDivExpr
2847  return 0;
2848}
2849
2850/// getUnsignedRange - Determine the unsigned range for a particular SCEV.
2851///
2852ConstantRange
2853ScalarEvolution::getUnsignedRange(const SCEV *S) {
2854
2855  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2856    return ConstantRange(C->getValue()->getValue());
2857
2858  unsigned BitWidth = getTypeSizeInBits(S->getType());
2859  ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
2860
2861  // If the value has known zeros, the maximum unsigned value will have those
2862  // known zeros as well.
2863  uint32_t TZ = GetMinTrailingZeros(S);
2864  if (TZ != 0)
2865    ConservativeResult =
2866      ConstantRange(APInt::getMinValue(BitWidth),
2867                    APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
2868
2869  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2870    ConstantRange X = getUnsignedRange(Add->getOperand(0));
2871    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2872      X = X.add(getUnsignedRange(Add->getOperand(i)));
2873    return ConservativeResult.intersectWith(X);
2874  }
2875
2876  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2877    ConstantRange X = getUnsignedRange(Mul->getOperand(0));
2878    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2879      X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
2880    return ConservativeResult.intersectWith(X);
2881  }
2882
2883  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
2884    ConstantRange X = getUnsignedRange(SMax->getOperand(0));
2885    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
2886      X = X.smax(getUnsignedRange(SMax->getOperand(i)));
2887    return ConservativeResult.intersectWith(X);
2888  }
2889
2890  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
2891    ConstantRange X = getUnsignedRange(UMax->getOperand(0));
2892    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
2893      X = X.umax(getUnsignedRange(UMax->getOperand(i)));
2894    return ConservativeResult.intersectWith(X);
2895  }
2896
2897  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
2898    ConstantRange X = getUnsignedRange(UDiv->getLHS());
2899    ConstantRange Y = getUnsignedRange(UDiv->getRHS());
2900    return ConservativeResult.intersectWith(X.udiv(Y));
2901  }
2902
2903  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
2904    ConstantRange X = getUnsignedRange(ZExt->getOperand());
2905    return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
2906  }
2907
2908  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
2909    ConstantRange X = getUnsignedRange(SExt->getOperand());
2910    return ConservativeResult.intersectWith(X.signExtend(BitWidth));
2911  }
2912
2913  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
2914    ConstantRange X = getUnsignedRange(Trunc->getOperand());
2915    return ConservativeResult.intersectWith(X.truncate(BitWidth));
2916  }
2917
2918  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
2919    // If there's no unsigned wrap, the value will never be less than its
2920    // initial value.
2921    if (AddRec->hasNoUnsignedWrap())
2922      if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
2923        ConservativeResult =
2924          ConstantRange(C->getValue()->getValue(),
2925                        APInt(getTypeSizeInBits(C->getType()), 0));
2926
2927    // TODO: non-affine addrec
2928    if (AddRec->isAffine()) {
2929      const Type *Ty = AddRec->getType();
2930      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
2931      if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
2932          getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
2933        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
2934
2935        const SCEV *Start = AddRec->getStart();
2936        const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
2937
2938        // Check for overflow.
2939        if (!AddRec->hasNoUnsignedWrap())
2940          return ConservativeResult;
2941
2942        ConstantRange StartRange = getUnsignedRange(Start);
2943        ConstantRange EndRange = getUnsignedRange(End);
2944        APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
2945                                   EndRange.getUnsignedMin());
2946        APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
2947                                   EndRange.getUnsignedMax());
2948        if (Min.isMinValue() && Max.isMaxValue())
2949          return ConservativeResult;
2950        return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
2951      }
2952    }
2953
2954    return ConservativeResult;
2955  }
2956
2957  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2958    // For a SCEVUnknown, ask ValueTracking.
2959    APInt Mask = APInt::getAllOnesValue(BitWidth);
2960    APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2961    ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
2962    if (Ones == ~Zeros + 1)
2963      return ConservativeResult;
2964    return ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1));
2965  }
2966
2967  return ConservativeResult;
2968}
2969
2970/// getSignedRange - Determine the signed range for a particular SCEV.
2971///
2972ConstantRange
2973ScalarEvolution::getSignedRange(const SCEV *S) {
2974
2975  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2976    return ConstantRange(C->getValue()->getValue());
2977
2978  unsigned BitWidth = getTypeSizeInBits(S->getType());
2979  ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
2980
2981  // If the value has known zeros, the maximum signed value will have those
2982  // known zeros as well.
2983  uint32_t TZ = GetMinTrailingZeros(S);
2984  if (TZ != 0)
2985    ConservativeResult =
2986      ConstantRange(APInt::getSignedMinValue(BitWidth),
2987                    APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
2988
2989  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2990    ConstantRange X = getSignedRange(Add->getOperand(0));
2991    for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
2992      X = X.add(getSignedRange(Add->getOperand(i)));
2993    return ConservativeResult.intersectWith(X);
2994  }
2995
2996  if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2997    ConstantRange X = getSignedRange(Mul->getOperand(0));
2998    for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
2999      X = X.multiply(getSignedRange(Mul->getOperand(i)));
3000    return ConservativeResult.intersectWith(X);
3001  }
3002
3003  if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
3004    ConstantRange X = getSignedRange(SMax->getOperand(0));
3005    for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
3006      X = X.smax(getSignedRange(SMax->getOperand(i)));
3007    return ConservativeResult.intersectWith(X);
3008  }
3009
3010  if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
3011    ConstantRange X = getSignedRange(UMax->getOperand(0));
3012    for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
3013      X = X.umax(getSignedRange(UMax->getOperand(i)));
3014    return ConservativeResult.intersectWith(X);
3015  }
3016
3017  if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
3018    ConstantRange X = getSignedRange(UDiv->getLHS());
3019    ConstantRange Y = getSignedRange(UDiv->getRHS());
3020    return ConservativeResult.intersectWith(X.udiv(Y));
3021  }
3022
3023  if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3024    ConstantRange X = getSignedRange(ZExt->getOperand());
3025    return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
3026  }
3027
3028  if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3029    ConstantRange X = getSignedRange(SExt->getOperand());
3030    return ConservativeResult.intersectWith(X.signExtend(BitWidth));
3031  }
3032
3033  if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3034    ConstantRange X = getSignedRange(Trunc->getOperand());
3035    return ConservativeResult.intersectWith(X.truncate(BitWidth));
3036  }
3037
3038  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
3039    // If there's no signed wrap, and all the operands have the same sign or
3040    // zero, the value won't ever change sign.
3041    if (AddRec->hasNoSignedWrap()) {
3042      bool AllNonNeg = true;
3043      bool AllNonPos = true;
3044      for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
3045        if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
3046        if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
3047      }
3048      if (AllNonNeg)
3049        ConservativeResult = ConservativeResult.intersectWith(
3050          ConstantRange(APInt(BitWidth, 0),
3051                        APInt::getSignedMinValue(BitWidth)));
3052      else if (AllNonPos)
3053        ConservativeResult = ConservativeResult.intersectWith(
3054          ConstantRange(APInt::getSignedMinValue(BitWidth),
3055                        APInt(BitWidth, 1)));
3056    }
3057
3058    // TODO: non-affine addrec
3059    if (AddRec->isAffine()) {
3060      const Type *Ty = AddRec->getType();
3061      const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
3062      if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3063          getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
3064        MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3065
3066        const SCEV *Start = AddRec->getStart();
3067        const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
3068
3069        // Check for overflow.
3070        if (!AddRec->hasNoSignedWrap())
3071          return ConservativeResult;
3072
3073        ConstantRange StartRange = getSignedRange(Start);
3074        ConstantRange EndRange = getSignedRange(End);
3075        APInt Min = APIntOps::smin(StartRange.getSignedMin(),
3076                                   EndRange.getSignedMin());
3077        APInt Max = APIntOps::smax(StartRange.getSignedMax(),
3078                                   EndRange.getSignedMax());
3079        if (Min.isMinSignedValue() && Max.isMaxSignedValue())
3080          return ConservativeResult;
3081        return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
3082      }
3083    }
3084
3085    return ConservativeResult;
3086  }
3087
3088  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3089    // For a SCEVUnknown, ask ValueTracking.
3090    if (!U->getValue()->getType()->isIntegerTy() && !TD)
3091      return ConservativeResult;
3092    unsigned NS = ComputeNumSignBits(U->getValue(), TD);
3093    if (NS == 1)
3094      return ConservativeResult;
3095    return ConservativeResult.intersectWith(
3096      ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
3097                    APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1));
3098  }
3099
3100  return ConservativeResult;
3101}
3102
3103/// createSCEV - We know that there is no SCEV for the specified value.
3104/// Analyze the expression.
3105///
3106const SCEV *ScalarEvolution::createSCEV(Value *V) {
3107  if (!isSCEVable(V->getType()))
3108    return getUnknown(V);
3109
3110  unsigned Opcode = Instruction::UserOp1;
3111  if (Instruction *I = dyn_cast<Instruction>(V)) {
3112    Opcode = I->getOpcode();
3113
3114    // Don't attempt to analyze instructions in blocks that aren't
3115    // reachable. Such instructions don't matter, and they aren't required
3116    // to obey basic rules for definitions dominating uses which this
3117    // analysis depends on.
3118    if (!DT->isReachableFromEntry(I->getParent()))
3119      return getUnknown(V);
3120  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
3121    Opcode = CE->getOpcode();
3122  else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
3123    return getConstant(CI);
3124  else if (isa<ConstantPointerNull>(V))
3125    return getIntegerSCEV(0, V->getType());
3126  else if (isa<UndefValue>(V))
3127    return getIntegerSCEV(0, V->getType());
3128  else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
3129    return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
3130  else
3131    return getUnknown(V);
3132
3133  Operator *U = cast<Operator>(V);
3134  switch (Opcode) {
3135  case Instruction::Add:
3136    // Don't transfer the NSW and NUW bits from the Add instruction to the
3137    // Add expression, because the Instruction may be guarded by control
3138    // flow and the no-overflow bits may not be valid for the expression in
3139    // any context.
3140    return getAddExpr(getSCEV(U->getOperand(0)),
3141                      getSCEV(U->getOperand(1)));
3142  case Instruction::Mul:
3143    // Don't transfer the NSW and NUW bits from the Mul instruction to the
3144    // Mul expression, as with Add.
3145    return getMulExpr(getSCEV(U->getOperand(0)),
3146                      getSCEV(U->getOperand(1)));
3147  case Instruction::UDiv:
3148    return getUDivExpr(getSCEV(U->getOperand(0)),
3149                       getSCEV(U->getOperand(1)));
3150  case Instruction::Sub:
3151    return getMinusSCEV(getSCEV(U->getOperand(0)),
3152                        getSCEV(U->getOperand(1)));
3153  case Instruction::And:
3154    // For an expression like x&255 that merely masks off the high bits,
3155    // use zext(trunc(x)) as the SCEV expression.
3156    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3157      if (CI->isNullValue())
3158        return getSCEV(U->getOperand(1));
3159      if (CI->isAllOnesValue())
3160        return getSCEV(U->getOperand(0));
3161      const APInt &A = CI->getValue();
3162
3163      // Instcombine's ShrinkDemandedConstant may strip bits out of
3164      // constants, obscuring what would otherwise be a low-bits mask.
3165      // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
3166      // knew about to reconstruct a low-bits mask value.
3167      unsigned LZ = A.countLeadingZeros();
3168      unsigned BitWidth = A.getBitWidth();
3169      APInt AllOnes = APInt::getAllOnesValue(BitWidth);
3170      APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
3171      ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
3172
3173      APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
3174
3175      if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
3176        return
3177          getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
3178                                IntegerType::get(getContext(), BitWidth - LZ)),
3179                            U->getType());
3180    }
3181    break;
3182
3183  case Instruction::Or:
3184    // If the RHS of the Or is a constant, we may have something like:
3185    // X*4+1 which got turned into X*4|1.  Handle this as an Add so loop
3186    // optimizations will transparently handle this case.
3187    //
3188    // In order for this transformation to be safe, the LHS must be of the
3189    // form X*(2^n) and the Or constant must be less than 2^n.
3190    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3191      const SCEV *LHS = getSCEV(U->getOperand(0));
3192      const APInt &CIVal = CI->getValue();
3193      if (GetMinTrailingZeros(LHS) >=
3194          (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
3195        // Build a plain add SCEV.
3196        const SCEV *S = getAddExpr(LHS, getSCEV(CI));
3197        // If the LHS of the add was an addrec and it has no-wrap flags,
3198        // transfer the no-wrap flags, since an or won't introduce a wrap.
3199        if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
3200          const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
3201          if (OldAR->hasNoUnsignedWrap())
3202            const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoUnsignedWrap(true);
3203          if (OldAR->hasNoSignedWrap())
3204            const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoSignedWrap(true);
3205        }
3206        return S;
3207      }
3208    }
3209    break;
3210  case Instruction::Xor:
3211    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3212      // If the RHS of the xor is a signbit, then this is just an add.
3213      // Instcombine turns add of signbit into xor as a strength reduction step.
3214      if (CI->getValue().isSignBit())
3215        return getAddExpr(getSCEV(U->getOperand(0)),
3216                          getSCEV(U->getOperand(1)));
3217
3218      // If the RHS of xor is -1, then this is a not operation.
3219      if (CI->isAllOnesValue())
3220        return getNotSCEV(getSCEV(U->getOperand(0)));
3221
3222      // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
3223      // This is a variant of the check for xor with -1, and it handles
3224      // the case where instcombine has trimmed non-demanded bits out
3225      // of an xor with -1.
3226      if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
3227        if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
3228          if (BO->getOpcode() == Instruction::And &&
3229              LCI->getValue() == CI->getValue())
3230            if (const SCEVZeroExtendExpr *Z =
3231                  dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
3232              const Type *UTy = U->getType();
3233              const SCEV *Z0 = Z->getOperand();
3234              const Type *Z0Ty = Z0->getType();
3235              unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
3236
3237              // If C is a low-bits mask, the zero extend is serving to
3238              // mask off the high bits. Complement the operand and
3239              // re-apply the zext.
3240              if (APIntOps::isMask(Z0TySize, CI->getValue()))
3241                return getZeroExtendExpr(getNotSCEV(Z0), UTy);
3242
3243              // If C is a single bit, it may be in the sign-bit position
3244              // before the zero-extend. In this case, represent the xor
3245              // using an add, which is equivalent, and re-apply the zext.
3246              APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
3247              if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
3248                  Trunc.isSignBit())
3249                return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
3250                                         UTy);
3251            }
3252    }
3253    break;
3254
3255  case Instruction::Shl:
3256    // Turn shift left of a constant amount into a multiply.
3257    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3258      uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3259      Constant *X = ConstantInt::get(getContext(),
3260        APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
3261      return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3262    }
3263    break;
3264
3265  case Instruction::LShr:
3266    // Turn logical shift right of a constant into a unsigned divide.
3267    if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3268      uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3269      Constant *X = ConstantInt::get(getContext(),
3270        APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
3271      return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3272    }
3273    break;
3274
3275  case Instruction::AShr:
3276    // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
3277    if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
3278      if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0)))
3279        if (L->getOpcode() == Instruction::Shl &&
3280            L->getOperand(1) == U->getOperand(1)) {
3281          unsigned BitWidth = getTypeSizeInBits(U->getType());
3282          uint64_t Amt = BitWidth - CI->getZExtValue();
3283          if (Amt == BitWidth)
3284            return getSCEV(L->getOperand(0));       // shift by zero --> noop
3285          if (Amt > BitWidth)
3286            return getIntegerSCEV(0, U->getType()); // value is undefined
3287          return
3288            getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
3289                                           IntegerType::get(getContext(), Amt)),
3290                                 U->getType());
3291        }
3292    break;
3293
3294  case Instruction::Trunc:
3295    return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
3296
3297  case Instruction::ZExt:
3298    return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3299
3300  case Instruction::SExt:
3301    return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3302
3303  case Instruction::BitCast:
3304    // BitCasts are no-op casts so we just eliminate the cast.
3305    if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
3306      return getSCEV(U->getOperand(0));
3307    break;
3308
3309  // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
3310  // lead to pointer expressions which cannot safely be expanded to GEPs,
3311  // because ScalarEvolution doesn't respect the GEP aliasing rules when
3312  // simplifying integer expressions.
3313
3314  case Instruction::GetElementPtr:
3315    return createNodeForGEP(cast<GEPOperator>(U));
3316
3317  case Instruction::PHI:
3318    return createNodeForPHI(cast<PHINode>(U));
3319
3320  case Instruction::Select:
3321    // This could be a smax or umax that was lowered earlier.
3322    // Try to recover it.
3323    if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
3324      Value *LHS = ICI->getOperand(0);
3325      Value *RHS = ICI->getOperand(1);
3326      switch (ICI->getPredicate()) {
3327      case ICmpInst::ICMP_SLT:
3328      case ICmpInst::ICMP_SLE:
3329        std::swap(LHS, RHS);
3330        // fall through
3331      case ICmpInst::ICMP_SGT:
3332      case ICmpInst::ICMP_SGE:
3333        if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
3334          return getSMaxExpr(getSCEV(LHS), getSCEV(RHS));
3335        else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
3336          return getSMinExpr(getSCEV(LHS), getSCEV(RHS));
3337        break;
3338      case ICmpInst::ICMP_ULT:
3339      case ICmpInst::ICMP_ULE:
3340        std::swap(LHS, RHS);
3341        // fall through
3342      case ICmpInst::ICMP_UGT:
3343      case ICmpInst::ICMP_UGE:
3344        if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
3345          return getUMaxExpr(getSCEV(LHS), getSCEV(RHS));
3346        else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
3347          return getUMinExpr(getSCEV(LHS), getSCEV(RHS));
3348        break;
3349      case ICmpInst::ICMP_NE:
3350        // n != 0 ? n : 1  ->  umax(n, 1)
3351        if (LHS == U->getOperand(1) &&
3352            isa<ConstantInt>(U->getOperand(2)) &&
3353            cast<ConstantInt>(U->getOperand(2))->isOne() &&
3354            isa<ConstantInt>(RHS) &&
3355            cast<ConstantInt>(RHS)->isZero())
3356          return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2)));
3357        break;
3358      case ICmpInst::ICMP_EQ:
3359        // n == 0 ? 1 : n  ->  umax(n, 1)
3360        if (LHS == U->getOperand(2) &&
3361            isa<ConstantInt>(U->getOperand(1)) &&
3362            cast<ConstantInt>(U->getOperand(1))->isOne() &&
3363            isa<ConstantInt>(RHS) &&
3364            cast<ConstantInt>(RHS)->isZero())
3365          return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1)));
3366        break;
3367      default:
3368        break;
3369      }
3370    }
3371
3372  default: // We cannot analyze this expression.
3373    break;
3374  }
3375
3376  return getUnknown(V);
3377}
3378
3379
3380
3381//===----------------------------------------------------------------------===//
3382//                   Iteration Count Computation Code
3383//
3384
3385/// getBackedgeTakenCount - If the specified loop has a predictable
3386/// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
3387/// object. The backedge-taken count is the number of times the loop header
3388/// will be branched to from within the loop. This is one less than the
3389/// trip count of the loop, since it doesn't count the first iteration,
3390/// when the header is branched to from outside the loop.
3391///
3392/// Note that it is not valid to call this method on a loop without a
3393/// loop-invariant backedge-taken count (see
3394/// hasLoopInvariantBackedgeTakenCount).
3395///
3396const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
3397  return getBackedgeTakenInfo(L).Exact;
3398}
3399
3400/// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
3401/// return the least SCEV value that is known never to be less than the
3402/// actual backedge taken count.
3403const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
3404  return getBackedgeTakenInfo(L).Max;
3405}
3406
3407/// PushLoopPHIs - Push PHI nodes in the header of the given loop
3408/// onto the given Worklist.
3409static void
3410PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
3411  BasicBlock *Header = L->getHeader();
3412
3413  // Push all Loop-header PHIs onto the Worklist stack.
3414  for (BasicBlock::iterator I = Header->begin();
3415       PHINode *PN = dyn_cast<PHINode>(I); ++I)
3416    Worklist.push_back(PN);
3417}
3418
3419const ScalarEvolution::BackedgeTakenInfo &
3420ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
3421  // Initially insert a CouldNotCompute for this loop. If the insertion
3422  // succeeds, proceed to actually compute a backedge-taken count and
3423  // update the value. The temporary CouldNotCompute value tells SCEV
3424  // code elsewhere that it shouldn't attempt to request a new
3425  // backedge-taken count, which could result in infinite recursion.
3426  std::pair<std::map<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
3427    BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
3428  if (Pair.second) {
3429    BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L);
3430    if (BECount.Exact != getCouldNotCompute()) {
3431      assert(BECount.Exact->isLoopInvariant(L) &&
3432             BECount.Max->isLoopInvariant(L) &&
3433             "Computed backedge-taken count isn't loop invariant for loop!");
3434      ++NumTripCountsComputed;
3435
3436      // Update the value in the map.
3437      Pair.first->second = BECount;
3438    } else {
3439      if (BECount.Max != getCouldNotCompute())
3440        // Update the value in the map.
3441        Pair.first->second = BECount;
3442      if (isa<PHINode>(L->getHeader()->begin()))
3443        // Only count loops that have phi nodes as not being computable.
3444        ++NumTripCountsNotComputed;
3445    }
3446
3447    // Now that we know more about the trip count for this loop, forget any
3448    // existing SCEV values for PHI nodes in this loop since they are only
3449    // conservative estimates made without the benefit of trip count
3450    // information. This is similar to the code in forgetLoop, except that
3451    // it handles SCEVUnknown PHI nodes specially.
3452    if (BECount.hasAnyInfo()) {
3453      SmallVector<Instruction *, 16> Worklist;
3454      PushLoopPHIs(L, Worklist);
3455
3456      SmallPtrSet<Instruction *, 8> Visited;
3457      while (!Worklist.empty()) {
3458        Instruction *I = Worklist.pop_back_val();
3459        if (!Visited.insert(I)) continue;
3460
3461        std::map<SCEVCallbackVH, const SCEV *>::iterator It =
3462          Scalars.find(static_cast<Value *>(I));
3463        if (It != Scalars.end()) {
3464          // SCEVUnknown for a PHI either means that it has an unrecognized
3465          // structure, or it's a PHI that's in the progress of being computed
3466          // by createNodeForPHI.  In the former case, additional loop trip
3467          // count information isn't going to change anything. In the later
3468          // case, createNodeForPHI will perform the necessary updates on its
3469          // own when it gets to that point.
3470          if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) {
3471            ValuesAtScopes.erase(It->second);
3472            Scalars.erase(It);
3473          }
3474          if (PHINode *PN = dyn_cast<PHINode>(I))
3475            ConstantEvolutionLoopExitValue.erase(PN);
3476        }
3477
3478        PushDefUseChildren(I, Worklist);
3479      }
3480    }
3481  }
3482  return Pair.first->second;
3483}
3484
3485/// forgetLoop - This method should be called by the client when it has
3486/// changed a loop in a way that may effect ScalarEvolution's ability to
3487/// compute a trip count, or if the loop is deleted.
3488void ScalarEvolution::forgetLoop(const Loop *L) {
3489  // Drop any stored trip count value.
3490  BackedgeTakenCounts.erase(L);
3491
3492  // Drop information about expressions based on loop-header PHIs.
3493  SmallVector<Instruction *, 16> Worklist;
3494  PushLoopPHIs(L, Worklist);
3495
3496  SmallPtrSet<Instruction *, 8> Visited;
3497  while (!Worklist.empty()) {
3498    Instruction *I = Worklist.pop_back_val();
3499    if (!Visited.insert(I)) continue;
3500
3501    std::map<SCEVCallbackVH, const SCEV *>::iterator It =
3502      Scalars.find(static_cast<Value *>(I));
3503    if (It != Scalars.end()) {
3504      ValuesAtScopes.erase(It->second);
3505      Scalars.erase(It);
3506      if (PHINode *PN = dyn_cast<PHINode>(I))
3507        ConstantEvolutionLoopExitValue.erase(PN);
3508    }
3509
3510    PushDefUseChildren(I, Worklist);
3511  }
3512}
3513
3514/// forgetValue - This method should be called by the client when it has
3515/// changed a value in a way that may effect its value, or which may
3516/// disconnect it from a def-use chain linking it to a loop.
3517void ScalarEvolution::forgetValue(Value *V) {
3518  Instruction *I = dyn_cast<Instruction>(V);
3519  if (!I) return;
3520
3521  // Drop information about expressions based on loop-header PHIs.
3522  SmallVector<Instruction *, 16> Worklist;
3523  Worklist.push_back(I);
3524
3525  SmallPtrSet<Instruction *, 8> Visited;
3526  while (!Worklist.empty()) {
3527    I = Worklist.pop_back_val();
3528    if (!Visited.insert(I)) continue;
3529
3530    std::map<SCEVCallbackVH, const SCEV *>::iterator It =
3531      Scalars.find(static_cast<Value *>(I));
3532    if (It != Scalars.end()) {
3533      ValuesAtScopes.erase(It->second);
3534      Scalars.erase(It);
3535      if (PHINode *PN = dyn_cast<PHINode>(I))
3536        ConstantEvolutionLoopExitValue.erase(PN);
3537    }
3538
3539    PushDefUseChildren(I, Worklist);
3540  }
3541}
3542
3543/// ComputeBackedgeTakenCount - Compute the number of times the backedge
3544/// of the specified loop will execute.
3545ScalarEvolution::BackedgeTakenInfo
3546ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
3547  SmallVector<BasicBlock *, 8> ExitingBlocks;
3548  L->getExitingBlocks(ExitingBlocks);
3549
3550  // Examine all exits and pick the most conservative values.
3551  const SCEV *BECount = getCouldNotCompute();
3552  const SCEV *MaxBECount = getCouldNotCompute();
3553  bool CouldNotComputeBECount = false;
3554  for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
3555    BackedgeTakenInfo NewBTI =
3556      ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
3557
3558    if (NewBTI.Exact == getCouldNotCompute()) {
3559      // We couldn't compute an exact value for this exit, so
3560      // we won't be able to compute an exact value for the loop.
3561      CouldNotComputeBECount = true;
3562      BECount = getCouldNotCompute();
3563    } else if (!CouldNotComputeBECount) {
3564      if (BECount == getCouldNotCompute())
3565        BECount = NewBTI.Exact;
3566      else
3567        BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact);
3568    }
3569    if (MaxBECount == getCouldNotCompute())
3570      MaxBECount = NewBTI.Max;
3571    else if (NewBTI.Max != getCouldNotCompute())
3572      MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max);
3573  }
3574
3575  return BackedgeTakenInfo(BECount, MaxBECount);
3576}
3577
3578/// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
3579/// of the specified loop will execute if it exits via the specified block.
3580ScalarEvolution::BackedgeTakenInfo
3581ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
3582                                                   BasicBlock *ExitingBlock) {
3583
3584  // Okay, we've chosen an exiting block.  See what condition causes us to
3585  // exit at this block.
3586  //
3587  // FIXME: we should be able to handle switch instructions (with a single exit)
3588  BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
3589  if (ExitBr == 0) return getCouldNotCompute();
3590  assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
3591
3592  // At this point, we know we have a conditional branch that determines whether
3593  // the loop is exited.  However, we don't know if the branch is executed each
3594  // time through the loop.  If not, then the execution count of the branch will
3595  // not be equal to the trip count of the loop.
3596  //
3597  // Currently we check for this by checking to see if the Exit branch goes to
3598  // the loop header.  If so, we know it will always execute the same number of
3599  // times as the loop.  We also handle the case where the exit block *is* the
3600  // loop header.  This is common for un-rotated loops.
3601  //
3602  // If both of those tests fail, walk up the unique predecessor chain to the
3603  // header, stopping if there is an edge that doesn't exit the loop. If the
3604  // header is reached, the execution count of the branch will be equal to the
3605  // trip count of the loop.
3606  //
3607  //  More extensive analysis could be done to handle more cases here.
3608  //
3609  if (ExitBr->getSuccessor(0) != L->getHeader() &&
3610      ExitBr->getSuccessor(1) != L->getHeader() &&
3611      ExitBr->getParent() != L->getHeader()) {
3612    // The simple checks failed, try climbing the unique predecessor chain
3613    // up to the header.
3614    bool Ok = false;
3615    for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
3616      BasicBlock *Pred = BB->getUniquePredecessor();
3617      if (!Pred)
3618        return getCouldNotCompute();
3619      TerminatorInst *PredTerm = Pred->getTerminator();
3620      for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
3621        BasicBlock *PredSucc = PredTerm->getSuccessor(i);
3622        if (PredSucc == BB)
3623          continue;
3624        // If the predecessor has a successor that isn't BB and isn't
3625        // outside the loop, assume the worst.
3626        if (L->contains(PredSucc))
3627          return getCouldNotCompute();
3628      }
3629      if (Pred == L->getHeader()) {
3630        Ok = true;
3631        break;
3632      }
3633      BB = Pred;
3634    }
3635    if (!Ok)
3636      return getCouldNotCompute();
3637  }
3638
3639  // Proceed to the next level to examine the exit condition expression.
3640  return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
3641                                               ExitBr->getSuccessor(0),
3642                                               ExitBr->getSuccessor(1));
3643}
3644
3645/// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
3646/// backedge of the specified loop will execute if its exit condition
3647/// were a conditional branch of ExitCond, TBB, and FBB.
3648ScalarEvolution::BackedgeTakenInfo
3649ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
3650                                                       Value *ExitCond,
3651                                                       BasicBlock *TBB,
3652                                                       BasicBlock *FBB) {
3653  // Check if the controlling expression for this loop is an And or Or.
3654  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
3655    if (BO->getOpcode() == Instruction::And) {
3656      // Recurse on the operands of the and.
3657      BackedgeTakenInfo BTI0 =
3658        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3659      BackedgeTakenInfo BTI1 =
3660        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3661      const SCEV *BECount = getCouldNotCompute();
3662      const SCEV *MaxBECount = getCouldNotCompute();
3663      if (L->contains(TBB)) {
3664        // Both conditions must be true for the loop to continue executing.
3665        // Choose the less conservative count.
3666        if (BTI0.Exact == getCouldNotCompute() ||
3667            BTI1.Exact == getCouldNotCompute())
3668          BECount = getCouldNotCompute();
3669        else
3670          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3671        if (BTI0.Max == getCouldNotCompute())
3672          MaxBECount = BTI1.Max;
3673        else if (BTI1.Max == getCouldNotCompute())
3674          MaxBECount = BTI0.Max;
3675        else
3676          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3677      } else {
3678        // Both conditions must be true for the loop to exit.
3679        assert(L->contains(FBB) && "Loop block has no successor in loop!");
3680        if (BTI0.Exact != getCouldNotCompute() &&
3681            BTI1.Exact != getCouldNotCompute())
3682          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3683        if (BTI0.Max != getCouldNotCompute() &&
3684            BTI1.Max != getCouldNotCompute())
3685          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3686      }
3687
3688      return BackedgeTakenInfo(BECount, MaxBECount);
3689    }
3690    if (BO->getOpcode() == Instruction::Or) {
3691      // Recurse on the operands of the or.
3692      BackedgeTakenInfo BTI0 =
3693        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3694      BackedgeTakenInfo BTI1 =
3695        ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3696      const SCEV *BECount = getCouldNotCompute();
3697      const SCEV *MaxBECount = getCouldNotCompute();
3698      if (L->contains(FBB)) {
3699        // Both conditions must be false for the loop to continue executing.
3700        // Choose the less conservative count.
3701        if (BTI0.Exact == getCouldNotCompute() ||
3702            BTI1.Exact == getCouldNotCompute())
3703          BECount = getCouldNotCompute();
3704        else
3705          BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3706        if (BTI0.Max == getCouldNotCompute())
3707          MaxBECount = BTI1.Max;
3708        else if (BTI1.Max == getCouldNotCompute())
3709          MaxBECount = BTI0.Max;
3710        else
3711          MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3712      } else {
3713        // Both conditions must be false for the loop to exit.
3714        assert(L->contains(TBB) && "Loop block has no successor in loop!");
3715        if (BTI0.Exact != getCouldNotCompute() &&
3716            BTI1.Exact != getCouldNotCompute())
3717          BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3718        if (BTI0.Max != getCouldNotCompute() &&
3719            BTI1.Max != getCouldNotCompute())
3720          MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3721      }
3722
3723      return BackedgeTakenInfo(BECount, MaxBECount);
3724    }
3725  }
3726
3727  // With an icmp, it may be feasible to compute an exact backedge-taken count.
3728  // Proceed to the next level to examine the icmp.
3729  if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
3730    return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
3731
3732  // Check for a constant condition. These are normally stripped out by
3733  // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
3734  // preserve the CFG and is temporarily leaving constant conditions
3735  // in place.
3736  if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
3737    if (L->contains(FBB) == !CI->getZExtValue())
3738      // The backedge is always taken.
3739      return getCouldNotCompute();
3740    else
3741      // The backedge is never taken.
3742      return getIntegerSCEV(0, CI->getType());
3743  }
3744
3745  // If it's not an integer or pointer comparison then compute it the hard way.
3746  return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3747}
3748
3749/// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
3750/// backedge of the specified loop will execute if its exit condition
3751/// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
3752ScalarEvolution::BackedgeTakenInfo
3753ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
3754                                                           ICmpInst *ExitCond,
3755                                                           BasicBlock *TBB,
3756                                                           BasicBlock *FBB) {
3757
3758  // If the condition was exit on true, convert the condition to exit on false
3759  ICmpInst::Predicate Cond;
3760  if (!L->contains(FBB))
3761    Cond = ExitCond->getPredicate();
3762  else
3763    Cond = ExitCond->getInversePredicate();
3764
3765  // Handle common loops like: for (X = "string"; *X; ++X)
3766  if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
3767    if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
3768      BackedgeTakenInfo ItCnt =
3769        ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
3770      if (ItCnt.hasAnyInfo())
3771        return ItCnt;
3772    }
3773
3774  const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
3775  const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
3776
3777  // Try to evaluate any dependencies out of the loop.
3778  LHS = getSCEVAtScope(LHS, L);
3779  RHS = getSCEVAtScope(RHS, L);
3780
3781  // At this point, we would like to compute how many iterations of the
3782  // loop the predicate will return true for these inputs.
3783  if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
3784    // If there is a loop-invariant, force it into the RHS.
3785    std::swap(LHS, RHS);
3786    Cond = ICmpInst::getSwappedPredicate(Cond);
3787  }
3788
3789  // If we have a comparison of a chrec against a constant, try to use value
3790  // ranges to answer this query.
3791  if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
3792    if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
3793      if (AddRec->getLoop() == L) {
3794        // Form the constant range.
3795        ConstantRange CompRange(
3796            ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
3797
3798        const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
3799        if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
3800      }
3801
3802  switch (Cond) {
3803  case ICmpInst::ICMP_NE: {                     // while (X != Y)
3804    // Convert to: while (X-Y != 0)
3805    BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEV(LHS, RHS), L);
3806    if (BTI.hasAnyInfo()) return BTI;
3807    break;
3808  }
3809  case ICmpInst::ICMP_EQ: {                     // while (X == Y)
3810    // Convert to: while (X-Y == 0)
3811    BackedgeTakenInfo BTI = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
3812    if (BTI.hasAnyInfo()) return BTI;
3813    break;
3814  }
3815  case ICmpInst::ICMP_SLT: {
3816    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
3817    if (BTI.hasAnyInfo()) return BTI;
3818    break;
3819  }
3820  case ICmpInst::ICMP_SGT: {
3821    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3822                                             getNotSCEV(RHS), L, true);
3823    if (BTI.hasAnyInfo()) return BTI;
3824    break;
3825  }
3826  case ICmpInst::ICMP_ULT: {
3827    BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
3828    if (BTI.hasAnyInfo()) return BTI;
3829    break;
3830  }
3831  case ICmpInst::ICMP_UGT: {
3832    BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3833                                             getNotSCEV(RHS), L, false);
3834    if (BTI.hasAnyInfo()) return BTI;
3835    break;
3836  }
3837  default:
3838#if 0
3839    dbgs() << "ComputeBackedgeTakenCount ";
3840    if (ExitCond->getOperand(0)->getType()->isUnsigned())
3841      dbgs() << "[unsigned] ";
3842    dbgs() << *LHS << "   "
3843         << Instruction::getOpcodeName(Instruction::ICmp)
3844         << "   " << *RHS << "\n";
3845#endif
3846    break;
3847  }
3848  return
3849    ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3850}
3851
3852static ConstantInt *
3853EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
3854                                ScalarEvolution &SE) {
3855  const SCEV *InVal = SE.getConstant(C);
3856  const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
3857  assert(isa<SCEVConstant>(Val) &&
3858         "Evaluation of SCEV at constant didn't fold correctly?");
3859  return cast<SCEVConstant>(Val)->getValue();
3860}
3861
3862/// GetAddressedElementFromGlobal - Given a global variable with an initializer
3863/// and a GEP expression (missing the pointer index) indexing into it, return
3864/// the addressed element of the initializer or null if the index expression is
3865/// invalid.
3866static Constant *
3867GetAddressedElementFromGlobal(GlobalVariable *GV,
3868                              const std::vector<ConstantInt*> &Indices) {
3869  Constant *Init = GV->getInitializer();
3870  for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
3871    uint64_t Idx = Indices[i]->getZExtValue();
3872    if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
3873      assert(Idx < CS->getNumOperands() && "Bad struct index!");
3874      Init = cast<Constant>(CS->getOperand(Idx));
3875    } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
3876      if (Idx >= CA->getNumOperands()) return 0;  // Bogus program
3877      Init = cast<Constant>(CA->getOperand(Idx));
3878    } else if (isa<ConstantAggregateZero>(Init)) {
3879      if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
3880        assert(Idx < STy->getNumElements() && "Bad struct index!");
3881        Init = Constant::getNullValue(STy->getElementType(Idx));
3882      } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
3883        if (Idx >= ATy->getNumElements()) return 0;  // Bogus program
3884        Init = Constant::getNullValue(ATy->getElementType());
3885      } else {
3886        llvm_unreachable("Unknown constant aggregate type!");
3887      }
3888      return 0;
3889    } else {
3890      return 0; // Unknown initializer type
3891    }
3892  }
3893  return Init;
3894}
3895
3896/// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
3897/// 'icmp op load X, cst', try to see if we can compute the backedge
3898/// execution count.
3899ScalarEvolution::BackedgeTakenInfo
3900ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
3901                                                LoadInst *LI,
3902                                                Constant *RHS,
3903                                                const Loop *L,
3904                                                ICmpInst::Predicate predicate) {
3905  if (LI->isVolatile()) return getCouldNotCompute();
3906
3907  // Check to see if the loaded pointer is a getelementptr of a global.
3908  // TODO: Use SCEV instead of manually grubbing with GEPs.
3909  GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
3910  if (!GEP) return getCouldNotCompute();
3911
3912  // Make sure that it is really a constant global we are gepping, with an
3913  // initializer, and make sure the first IDX is really 0.
3914  GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
3915  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
3916      GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
3917      !cast<Constant>(GEP->getOperand(1))->isNullValue())
3918    return getCouldNotCompute();
3919
3920  // Okay, we allow one non-constant index into the GEP instruction.
3921  Value *VarIdx = 0;
3922  std::vector<ConstantInt*> Indexes;
3923  unsigned VarIdxNum = 0;
3924  for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
3925    if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
3926      Indexes.push_back(CI);
3927    } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
3928      if (VarIdx) return getCouldNotCompute();  // Multiple non-constant idx's.
3929      VarIdx = GEP->getOperand(i);
3930      VarIdxNum = i-2;
3931      Indexes.push_back(0);
3932    }
3933
3934  // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
3935  // Check to see if X is a loop variant variable value now.
3936  const SCEV *Idx = getSCEV(VarIdx);
3937  Idx = getSCEVAtScope(Idx, L);
3938
3939  // We can only recognize very limited forms of loop index expressions, in
3940  // particular, only affine AddRec's like {C1,+,C2}.
3941  const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
3942  if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
3943      !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
3944      !isa<SCEVConstant>(IdxExpr->getOperand(1)))
3945    return getCouldNotCompute();
3946
3947  unsigned MaxSteps = MaxBruteForceIterations;
3948  for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
3949    ConstantInt *ItCst = ConstantInt::get(
3950                           cast<IntegerType>(IdxExpr->getType()), IterationNum);
3951    ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
3952
3953    // Form the GEP offset.
3954    Indexes[VarIdxNum] = Val;
3955
3956    Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
3957    if (Result == 0) break;  // Cannot compute!
3958
3959    // Evaluate the condition for this iteration.
3960    Result = ConstantExpr::getICmp(predicate, Result, RHS);
3961    if (!isa<ConstantInt>(Result)) break;  // Couldn't decide for sure
3962    if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
3963#if 0
3964      dbgs() << "\n***\n*** Computed loop count " << *ItCst
3965             << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
3966             << "***\n";
3967#endif
3968      ++NumArrayLenItCounts;
3969      return getConstant(ItCst);   // Found terminating iteration!
3970    }
3971  }
3972  return getCouldNotCompute();
3973}
3974
3975
3976/// CanConstantFold - Return true if we can constant fold an instruction of the
3977/// specified type, assuming that all operands were constants.
3978static bool CanConstantFold(const Instruction *I) {
3979  if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
3980      isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
3981    return true;
3982
3983  if (const CallInst *CI = dyn_cast<CallInst>(I))
3984    if (const Function *F = CI->getCalledFunction())
3985      return canConstantFoldCallTo(F);
3986  return false;
3987}
3988
3989/// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
3990/// in the loop that V is derived from.  We allow arbitrary operations along the
3991/// way, but the operands of an operation must either be constants or a value
3992/// derived from a constant PHI.  If this expression does not fit with these
3993/// constraints, return null.
3994static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
3995  // If this is not an instruction, or if this is an instruction outside of the
3996  // loop, it can't be derived from a loop PHI.
3997  Instruction *I = dyn_cast<Instruction>(V);
3998  if (I == 0 || !L->contains(I)) return 0;
3999
4000  if (PHINode *PN = dyn_cast<PHINode>(I)) {
4001    if (L->getHeader() == I->getParent())
4002      return PN;
4003    else
4004      // We don't currently keep track of the control flow needed to evaluate
4005      // PHIs, so we cannot handle PHIs inside of loops.
4006      return 0;
4007  }
4008
4009  // If we won't be able to constant fold this expression even if the operands
4010  // are constants, return early.
4011  if (!CanConstantFold(I)) return 0;
4012
4013  // Otherwise, we can evaluate this instruction if all of its operands are
4014  // constant or derived from a PHI node themselves.
4015  PHINode *PHI = 0;
4016  for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
4017    if (!(isa<Constant>(I->getOperand(Op)) ||
4018          isa<GlobalValue>(I->getOperand(Op)))) {
4019      PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
4020      if (P == 0) return 0;  // Not evolving from PHI
4021      if (PHI == 0)
4022        PHI = P;
4023      else if (PHI != P)
4024        return 0;  // Evolving from multiple different PHIs.
4025    }
4026
4027  // This is a expression evolving from a constant PHI!
4028  return PHI;
4029}
4030
4031/// EvaluateExpression - Given an expression that passes the
4032/// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
4033/// in the loop has the value PHIVal.  If we can't fold this expression for some
4034/// reason, return null.
4035static Constant *EvaluateExpression(Value *V, Constant *PHIVal,
4036                                    const TargetData *TD) {
4037  if (isa<PHINode>(V)) return PHIVal;
4038  if (Constant *C = dyn_cast<Constant>(V)) return C;
4039  if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
4040  Instruction *I = cast<Instruction>(V);
4041
4042  std::vector<Constant*> Operands;
4043  Operands.resize(I->getNumOperands());
4044
4045  for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
4046    Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD);
4047    if (Operands[i] == 0) return 0;
4048  }
4049
4050  if (const CmpInst *CI = dyn_cast<CmpInst>(I))
4051    return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
4052                                           Operands[1], TD);
4053  return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
4054                                  &Operands[0], Operands.size(), TD);
4055}
4056
4057/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
4058/// in the header of its containing loop, we know the loop executes a
4059/// constant number of times, and the PHI node is just a recurrence
4060/// involving constants, fold it.
4061Constant *
4062ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
4063                                                   const APInt &BEs,
4064                                                   const Loop *L) {
4065  std::map<PHINode*, Constant*>::iterator I =
4066    ConstantEvolutionLoopExitValue.find(PN);
4067  if (I != ConstantEvolutionLoopExitValue.end())
4068    return I->second;
4069
4070  if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations)))
4071    return ConstantEvolutionLoopExitValue[PN] = 0;  // Not going to evaluate it.
4072
4073  Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
4074
4075  // Since the loop is canonicalized, the PHI node must have two entries.  One
4076  // entry must be a constant (coming in from outside of the loop), and the
4077  // second must be derived from the same PHI.
4078  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4079  Constant *StartCST =
4080    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
4081  if (StartCST == 0)
4082    return RetVal = 0;  // Must be a constant.
4083
4084  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4085  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
4086  if (PN2 != PN)
4087    return RetVal = 0;  // Not derived from same PHI.
4088
4089  // Execute the loop symbolically to determine the exit value.
4090  if (BEs.getActiveBits() >= 32)
4091    return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
4092
4093  unsigned NumIterations = BEs.getZExtValue(); // must be in range
4094  unsigned IterationNum = 0;
4095  for (Constant *PHIVal = StartCST; ; ++IterationNum) {
4096    if (IterationNum == NumIterations)
4097      return RetVal = PHIVal;  // Got exit value!
4098
4099    // Compute the value of the PHI node for the next iteration.
4100    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
4101    if (NextPHI == PHIVal)
4102      return RetVal = NextPHI;  // Stopped evolving!
4103    if (NextPHI == 0)
4104      return 0;        // Couldn't evaluate!
4105    PHIVal = NextPHI;
4106  }
4107}
4108
4109/// ComputeBackedgeTakenCountExhaustively - If the loop is known to execute a
4110/// constant number of times (the condition evolves only from constants),
4111/// try to evaluate a few iterations of the loop until we get the exit
4112/// condition gets a value of ExitWhen (true or false).  If we cannot
4113/// evaluate the trip count of the loop, return getCouldNotCompute().
4114const SCEV *
4115ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
4116                                                       Value *Cond,
4117                                                       bool ExitWhen) {
4118  PHINode *PN = getConstantEvolvingPHI(Cond, L);
4119  if (PN == 0) return getCouldNotCompute();
4120
4121  // Since the loop is canonicalized, the PHI node must have two entries.  One
4122  // entry must be a constant (coming in from outside of the loop), and the
4123  // second must be derived from the same PHI.
4124  bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4125  Constant *StartCST =
4126    dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
4127  if (StartCST == 0) return getCouldNotCompute();  // Must be a constant.
4128
4129  Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4130  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
4131  if (PN2 != PN) return getCouldNotCompute();  // Not derived from same PHI.
4132
4133  // Okay, we find a PHI node that defines the trip count of this loop.  Execute
4134  // the loop symbolically to determine when the condition gets a value of
4135  // "ExitWhen".
4136  unsigned IterationNum = 0;
4137  unsigned MaxIterations = MaxBruteForceIterations;   // Limit analysis.
4138  for (Constant *PHIVal = StartCST;
4139       IterationNum != MaxIterations; ++IterationNum) {
4140    ConstantInt *CondVal =
4141      dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal, TD));
4142
4143    // Couldn't symbolically evaluate.
4144    if (!CondVal) return getCouldNotCompute();
4145
4146    if (CondVal->getValue() == uint64_t(ExitWhen)) {
4147      ++NumBruteForceTripCountsComputed;
4148      return getConstant(Type::getInt32Ty(getContext()), IterationNum);
4149    }
4150
4151    // Compute the value of the PHI node for the next iteration.
4152    Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
4153    if (NextPHI == 0 || NextPHI == PHIVal)
4154      return getCouldNotCompute();// Couldn't evaluate or not making progress...
4155    PHIVal = NextPHI;
4156  }
4157
4158  // Too many iterations were needed to evaluate.
4159  return getCouldNotCompute();
4160}
4161
4162/// getSCEVAtScope - Return a SCEV expression for the specified value
4163/// at the specified scope in the program.  The L value specifies a loop
4164/// nest to evaluate the expression at, where null is the top-level or a
4165/// specified loop is immediately inside of the loop.
4166///
4167/// This method can be used to compute the exit value for a variable defined
4168/// in a loop by querying what the value will hold in the parent loop.
4169///
4170/// In the case that a relevant loop exit value cannot be computed, the
4171/// original value V is returned.
4172const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
4173  // Check to see if we've folded this expression at this loop before.
4174  std::map<const Loop *, const SCEV *> &Values = ValuesAtScopes[V];
4175  std::pair<std::map<const Loop *, const SCEV *>::iterator, bool> Pair =
4176    Values.insert(std::make_pair(L, static_cast<const SCEV *>(0)));
4177  if (!Pair.second)
4178    return Pair.first->second ? Pair.first->second : V;
4179
4180  // Otherwise compute it.
4181  const SCEV *C = computeSCEVAtScope(V, L);
4182  ValuesAtScopes[V][L] = C;
4183  return C;
4184}
4185
4186const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
4187  if (isa<SCEVConstant>(V)) return V;
4188
4189  // If this instruction is evolved from a constant-evolving PHI, compute the
4190  // exit value from the loop without using SCEVs.
4191  if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
4192    if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
4193      const Loop *LI = (*this->LI)[I->getParent()];
4194      if (LI && LI->getParentLoop() == L)  // Looking for loop exit value.
4195        if (PHINode *PN = dyn_cast<PHINode>(I))
4196          if (PN->getParent() == LI->getHeader()) {
4197            // Okay, there is no closed form solution for the PHI node.  Check
4198            // to see if the loop that contains it has a known backedge-taken
4199            // count.  If so, we may be able to force computation of the exit
4200            // value.
4201            const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
4202            if (const SCEVConstant *BTCC =
4203                  dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
4204              // Okay, we know how many times the containing loop executes.  If
4205              // this is a constant evolving PHI node, get the final value at
4206              // the specified iteration number.
4207              Constant *RV = getConstantEvolutionLoopExitValue(PN,
4208                                                   BTCC->getValue()->getValue(),
4209                                                               LI);
4210              if (RV) return getSCEV(RV);
4211            }
4212          }
4213
4214      // Okay, this is an expression that we cannot symbolically evaluate
4215      // into a SCEV.  Check to see if it's possible to symbolically evaluate
4216      // the arguments into constants, and if so, try to constant propagate the
4217      // result.  This is particularly useful for computing loop exit values.
4218      if (CanConstantFold(I)) {
4219        std::vector<Constant*> Operands;
4220        Operands.reserve(I->getNumOperands());
4221        for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
4222          Value *Op = I->getOperand(i);
4223          if (Constant *C = dyn_cast<Constant>(Op)) {
4224            Operands.push_back(C);
4225          } else {
4226            // If any of the operands is non-constant and if they are
4227            // non-integer and non-pointer, don't even try to analyze them
4228            // with scev techniques.
4229            if (!isSCEVable(Op->getType()))
4230              return V;
4231
4232            const SCEV *OpV = getSCEVAtScope(Op, L);
4233            if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
4234              Constant *C = SC->getValue();
4235              if (C->getType() != Op->getType())
4236                C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
4237                                                                  Op->getType(),
4238                                                                  false),
4239                                          C, Op->getType());
4240              Operands.push_back(C);
4241            } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
4242              if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
4243                if (C->getType() != Op->getType())
4244                  C =
4245                    ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
4246                                                                  Op->getType(),
4247                                                                  false),
4248                                          C, Op->getType());
4249                Operands.push_back(C);
4250              } else
4251                return V;
4252            } else {
4253              return V;
4254            }
4255          }
4256        }
4257
4258        Constant *C = 0;
4259        if (const CmpInst *CI = dyn_cast<CmpInst>(I))
4260          C = ConstantFoldCompareInstOperands(CI->getPredicate(),
4261                                              Operands[0], Operands[1], TD);
4262        else
4263          C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
4264                                       &Operands[0], Operands.size(), TD);
4265        if (C)
4266          return getSCEV(C);
4267      }
4268    }
4269
4270    // This is some other type of SCEVUnknown, just return it.
4271    return V;
4272  }
4273
4274  if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
4275    // Avoid performing the look-up in the common case where the specified
4276    // expression has no loop-variant portions.
4277    for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
4278      const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
4279      if (OpAtScope != Comm->getOperand(i)) {
4280        // Okay, at least one of these operands is loop variant but might be
4281        // foldable.  Build a new instance of the folded commutative expression.
4282        SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
4283                                            Comm->op_begin()+i);
4284        NewOps.push_back(OpAtScope);
4285
4286        for (++i; i != e; ++i) {
4287          OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
4288          NewOps.push_back(OpAtScope);
4289        }
4290        if (isa<SCEVAddExpr>(Comm))
4291          return getAddExpr(NewOps);
4292        if (isa<SCEVMulExpr>(Comm))
4293          return getMulExpr(NewOps);
4294        if (isa<SCEVSMaxExpr>(Comm))
4295          return getSMaxExpr(NewOps);
4296        if (isa<SCEVUMaxExpr>(Comm))
4297          return getUMaxExpr(NewOps);
4298        llvm_unreachable("Unknown commutative SCEV type!");
4299      }
4300    }
4301    // If we got here, all operands are loop invariant.
4302    return Comm;
4303  }
4304
4305  if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
4306    const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
4307    const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
4308    if (LHS == Div->getLHS() && RHS == Div->getRHS())
4309      return Div;   // must be loop invariant
4310    return getUDivExpr(LHS, RHS);
4311  }
4312
4313  // If this is a loop recurrence for a loop that does not contain L, then we
4314  // are dealing with the final value computed by the loop.
4315  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
4316    if (!L || !AddRec->getLoop()->contains(L)) {
4317      // To evaluate this recurrence, we need to know how many times the AddRec
4318      // loop iterates.  Compute this now.
4319      const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
4320      if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
4321
4322      // Then, evaluate the AddRec.
4323      return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
4324    }
4325    return AddRec;
4326  }
4327
4328  if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
4329    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4330    if (Op == Cast->getOperand())
4331      return Cast;  // must be loop invariant
4332    return getZeroExtendExpr(Op, Cast->getType());
4333  }
4334
4335  if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
4336    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4337    if (Op == Cast->getOperand())
4338      return Cast;  // must be loop invariant
4339    return getSignExtendExpr(Op, Cast->getType());
4340  }
4341
4342  if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
4343    const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
4344    if (Op == Cast->getOperand())
4345      return Cast;  // must be loop invariant
4346    return getTruncateExpr(Op, Cast->getType());
4347  }
4348
4349  llvm_unreachable("Unknown SCEV type!");
4350  return 0;
4351}
4352
4353/// getSCEVAtScope - This is a convenience function which does
4354/// getSCEVAtScope(getSCEV(V), L).
4355const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
4356  return getSCEVAtScope(getSCEV(V), L);
4357}
4358
4359/// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
4360/// following equation:
4361///
4362///     A * X = B (mod N)
4363///
4364/// where N = 2^BW and BW is the common bit width of A and B. The signedness of
4365/// A and B isn't important.
4366///
4367/// If the equation does not have a solution, SCEVCouldNotCompute is returned.
4368static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
4369                                               ScalarEvolution &SE) {
4370  uint32_t BW = A.getBitWidth();
4371  assert(BW == B.getBitWidth() && "Bit widths must be the same.");
4372  assert(A != 0 && "A must be non-zero.");
4373
4374  // 1. D = gcd(A, N)
4375  //
4376  // The gcd of A and N may have only one prime factor: 2. The number of
4377  // trailing zeros in A is its multiplicity
4378  uint32_t Mult2 = A.countTrailingZeros();
4379  // D = 2^Mult2
4380
4381  // 2. Check if B is divisible by D.
4382  //
4383  // B is divisible by D if and only if the multiplicity of prime factor 2 for B
4384  // is not less than multiplicity of this prime factor for D.
4385  if (B.countTrailingZeros() < Mult2)
4386    return SE.getCouldNotCompute();
4387
4388  // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
4389  // modulo (N / D).
4390  //
4391  // (N / D) may need BW+1 bits in its representation.  Hence, we'll use this
4392  // bit width during computations.
4393  APInt AD = A.lshr(Mult2).zext(BW + 1);  // AD = A / D
4394  APInt Mod(BW + 1, 0);
4395  Mod.set(BW - Mult2);  // Mod = N / D
4396  APInt I = AD.multiplicativeInverse(Mod);
4397
4398  // 4. Compute the minimum unsigned root of the equation:
4399  // I * (B / D) mod (N / D)
4400  APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
4401
4402  // The result is guaranteed to be less than 2^BW so we may truncate it to BW
4403  // bits.
4404  return SE.getConstant(Result.trunc(BW));
4405}
4406
4407/// SolveQuadraticEquation - Find the roots of the quadratic equation for the
4408/// given quadratic chrec {L,+,M,+,N}.  This returns either the two roots (which
4409/// might be the same) or two SCEVCouldNotCompute objects.
4410///
4411static std::pair<const SCEV *,const SCEV *>
4412SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
4413  assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
4414  const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
4415  const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
4416  const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
4417
4418  // We currently can only solve this if the coefficients are constants.
4419  if (!LC || !MC || !NC) {
4420    const SCEV *CNC = SE.getCouldNotCompute();
4421    return std::make_pair(CNC, CNC);
4422  }
4423
4424  uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
4425  const APInt &L = LC->getValue()->getValue();
4426  const APInt &M = MC->getValue()->getValue();
4427  const APInt &N = NC->getValue()->getValue();
4428  APInt Two(BitWidth, 2);
4429  APInt Four(BitWidth, 4);
4430
4431  {
4432    using namespace APIntOps;
4433    const APInt& C = L;
4434    // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
4435    // The B coefficient is M-N/2
4436    APInt B(M);
4437    B -= sdiv(N,Two);
4438
4439    // The A coefficient is N/2
4440    APInt A(N.sdiv(Two));
4441
4442    // Compute the B^2-4ac term.
4443    APInt SqrtTerm(B);
4444    SqrtTerm *= B;
4445    SqrtTerm -= Four * (A * C);
4446
4447    // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
4448    // integer value or else APInt::sqrt() will assert.
4449    APInt SqrtVal(SqrtTerm.sqrt());
4450
4451    // Compute the two solutions for the quadratic formula.
4452    // The divisions must be performed as signed divisions.
4453    APInt NegB(-B);
4454    APInt TwoA( A << 1 );
4455    if (TwoA.isMinValue()) {
4456      const SCEV *CNC = SE.getCouldNotCompute();
4457      return std::make_pair(CNC, CNC);
4458    }
4459
4460    LLVMContext &Context = SE.getContext();
4461
4462    ConstantInt *Solution1 =
4463      ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
4464    ConstantInt *Solution2 =
4465      ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
4466
4467    return std::make_pair(SE.getConstant(Solution1),
4468                          SE.getConstant(Solution2));
4469    } // end APIntOps namespace
4470}
4471
4472/// HowFarToZero - Return the number of times a backedge comparing the specified
4473/// value to zero will execute.  If not computable, return CouldNotCompute.
4474ScalarEvolution::BackedgeTakenInfo
4475ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
4476  // If the value is a constant
4477  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4478    // If the value is already zero, the branch will execute zero times.
4479    if (C->getValue()->isZero()) return C;
4480    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4481  }
4482
4483  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
4484  if (!AddRec || AddRec->getLoop() != L)
4485    return getCouldNotCompute();
4486
4487  if (AddRec->isAffine()) {
4488    // If this is an affine expression, the execution count of this branch is
4489    // the minimum unsigned root of the following equation:
4490    //
4491    //     Start + Step*N = 0 (mod 2^BW)
4492    //
4493    // equivalent to:
4494    //
4495    //             Step*N = -Start (mod 2^BW)
4496    //
4497    // where BW is the common bit width of Start and Step.
4498
4499    // Get the initial value for the loop.
4500    const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
4501                                       L->getParentLoop());
4502    const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
4503                                      L->getParentLoop());
4504
4505    if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
4506      // For now we handle only constant steps.
4507
4508      // First, handle unitary steps.
4509      if (StepC->getValue()->equalsInt(1))      // 1*N = -Start (mod 2^BW), so:
4510        return getNegativeSCEV(Start);          //   N = -Start (as unsigned)
4511      if (StepC->getValue()->isAllOnesValue())  // -1*N = -Start (mod 2^BW), so:
4512        return Start;                           //    N = Start (as unsigned)
4513
4514      // Then, try to solve the above equation provided that Start is constant.
4515      if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
4516        return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
4517                                            -StartC->getValue()->getValue(),
4518                                            *this);
4519    }
4520  } else if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
4521    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
4522    // the quadratic equation to solve it.
4523    std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
4524                                                                    *this);
4525    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4526    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4527    if (R1) {
4528#if 0
4529      dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
4530             << "  sol#2: " << *R2 << "\n";
4531#endif
4532      // Pick the smallest positive root value.
4533      if (ConstantInt *CB =
4534          dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
4535                                   R1->getValue(), R2->getValue()))) {
4536        if (CB->getZExtValue() == false)
4537          std::swap(R1, R2);   // R1 is the minimum root now.
4538
4539        // We can only use this value if the chrec ends up with an exact zero
4540        // value at this index.  When solving for "X*X != 5", for example, we
4541        // should not accept a root of 2.
4542        const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
4543        if (Val->isZero())
4544          return R1;  // We found a quadratic root!
4545      }
4546    }
4547  }
4548
4549  return getCouldNotCompute();
4550}
4551
4552/// HowFarToNonZero - Return the number of times a backedge checking the
4553/// specified value for nonzero will execute.  If not computable, return
4554/// CouldNotCompute
4555ScalarEvolution::BackedgeTakenInfo
4556ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
4557  // Loops that look like: while (X == 0) are very strange indeed.  We don't
4558  // handle them yet except for the trivial case.  This could be expanded in the
4559  // future as needed.
4560
4561  // If the value is a constant, check to see if it is known to be non-zero
4562  // already.  If so, the backedge will execute zero times.
4563  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
4564    if (!C->getValue()->isNullValue())
4565      return getIntegerSCEV(0, C->getType());
4566    return getCouldNotCompute();  // Otherwise it will loop infinitely.
4567  }
4568
4569  // We could implement others, but I really doubt anyone writes loops like
4570  // this, and if they did, they would already be constant folded.
4571  return getCouldNotCompute();
4572}
4573
4574/// getLoopPredecessor - If the given loop's header has exactly one unique
4575/// predecessor outside the loop, return it. Otherwise return null.
4576///
4577BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
4578  BasicBlock *Header = L->getHeader();
4579  BasicBlock *Pred = 0;
4580  for (pred_iterator PI = pred_begin(Header), E = pred_end(Header);
4581       PI != E; ++PI)
4582    if (!L->contains(*PI)) {
4583      if (Pred && Pred != *PI) return 0; // Multiple predecessors.
4584      Pred = *PI;
4585    }
4586  return Pred;
4587}
4588
4589/// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
4590/// (which may not be an immediate predecessor) which has exactly one
4591/// successor from which BB is reachable, or null if no such block is
4592/// found.
4593///
4594BasicBlock *
4595ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
4596  // If the block has a unique predecessor, then there is no path from the
4597  // predecessor to the block that does not go through the direct edge
4598  // from the predecessor to the block.
4599  if (BasicBlock *Pred = BB->getSinglePredecessor())
4600    return Pred;
4601
4602  // A loop's header is defined to be a block that dominates the loop.
4603  // If the header has a unique predecessor outside the loop, it must be
4604  // a block that has exactly one successor that can reach the loop.
4605  if (Loop *L = LI->getLoopFor(BB))
4606    return getLoopPredecessor(L);
4607
4608  return 0;
4609}
4610
4611/// HasSameValue - SCEV structural equivalence is usually sufficient for
4612/// testing whether two expressions are equal, however for the purposes of
4613/// looking for a condition guarding a loop, it can be useful to be a little
4614/// more general, since a front-end may have replicated the controlling
4615/// expression.
4616///
4617static bool HasSameValue(const SCEV *A, const SCEV *B) {
4618  // Quick check to see if they are the same SCEV.
4619  if (A == B) return true;
4620
4621  // Otherwise, if they're both SCEVUnknown, it's possible that they hold
4622  // two different instructions with the same value. Check for this case.
4623  if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
4624    if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
4625      if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
4626        if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
4627          if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
4628            return true;
4629
4630  // Otherwise assume they may have a different value.
4631  return false;
4632}
4633
4634bool ScalarEvolution::isKnownNegative(const SCEV *S) {
4635  return getSignedRange(S).getSignedMax().isNegative();
4636}
4637
4638bool ScalarEvolution::isKnownPositive(const SCEV *S) {
4639  return getSignedRange(S).getSignedMin().isStrictlyPositive();
4640}
4641
4642bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
4643  return !getSignedRange(S).getSignedMin().isNegative();
4644}
4645
4646bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
4647  return !getSignedRange(S).getSignedMax().isStrictlyPositive();
4648}
4649
4650bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
4651  return isKnownNegative(S) || isKnownPositive(S);
4652}
4653
4654bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
4655                                       const SCEV *LHS, const SCEV *RHS) {
4656
4657  if (HasSameValue(LHS, RHS))
4658    return ICmpInst::isTrueWhenEqual(Pred);
4659
4660  switch (Pred) {
4661  default:
4662    llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4663    break;
4664  case ICmpInst::ICMP_SGT:
4665    Pred = ICmpInst::ICMP_SLT;
4666    std::swap(LHS, RHS);
4667  case ICmpInst::ICMP_SLT: {
4668    ConstantRange LHSRange = getSignedRange(LHS);
4669    ConstantRange RHSRange = getSignedRange(RHS);
4670    if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
4671      return true;
4672    if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
4673      return false;
4674    break;
4675  }
4676  case ICmpInst::ICMP_SGE:
4677    Pred = ICmpInst::ICMP_SLE;
4678    std::swap(LHS, RHS);
4679  case ICmpInst::ICMP_SLE: {
4680    ConstantRange LHSRange = getSignedRange(LHS);
4681    ConstantRange RHSRange = getSignedRange(RHS);
4682    if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
4683      return true;
4684    if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
4685      return false;
4686    break;
4687  }
4688  case ICmpInst::ICMP_UGT:
4689    Pred = ICmpInst::ICMP_ULT;
4690    std::swap(LHS, RHS);
4691  case ICmpInst::ICMP_ULT: {
4692    ConstantRange LHSRange = getUnsignedRange(LHS);
4693    ConstantRange RHSRange = getUnsignedRange(RHS);
4694    if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
4695      return true;
4696    if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
4697      return false;
4698    break;
4699  }
4700  case ICmpInst::ICMP_UGE:
4701    Pred = ICmpInst::ICMP_ULE;
4702    std::swap(LHS, RHS);
4703  case ICmpInst::ICMP_ULE: {
4704    ConstantRange LHSRange = getUnsignedRange(LHS);
4705    ConstantRange RHSRange = getUnsignedRange(RHS);
4706    if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
4707      return true;
4708    if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
4709      return false;
4710    break;
4711  }
4712  case ICmpInst::ICMP_NE: {
4713    if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
4714      return true;
4715    if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
4716      return true;
4717
4718    const SCEV *Diff = getMinusSCEV(LHS, RHS);
4719    if (isKnownNonZero(Diff))
4720      return true;
4721    break;
4722  }
4723  case ICmpInst::ICMP_EQ:
4724    // The check at the top of the function catches the case where
4725    // the values are known to be equal.
4726    break;
4727  }
4728  return false;
4729}
4730
4731/// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
4732/// protected by a conditional between LHS and RHS.  This is used to
4733/// to eliminate casts.
4734bool
4735ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
4736                                             ICmpInst::Predicate Pred,
4737                                             const SCEV *LHS, const SCEV *RHS) {
4738  // Interpret a null as meaning no loop, where there is obviously no guard
4739  // (interprocedural conditions notwithstanding).
4740  if (!L) return true;
4741
4742  BasicBlock *Latch = L->getLoopLatch();
4743  if (!Latch)
4744    return false;
4745
4746  BranchInst *LoopContinuePredicate =
4747    dyn_cast<BranchInst>(Latch->getTerminator());
4748  if (!LoopContinuePredicate ||
4749      LoopContinuePredicate->isUnconditional())
4750    return false;
4751
4752  return isImpliedCond(LoopContinuePredicate->getCondition(), Pred, LHS, RHS,
4753                       LoopContinuePredicate->getSuccessor(0) != L->getHeader());
4754}
4755
4756/// isLoopGuardedByCond - Test whether entry to the loop is protected
4757/// by a conditional between LHS and RHS.  This is used to help avoid max
4758/// expressions in loop trip counts, and to eliminate casts.
4759bool
4760ScalarEvolution::isLoopGuardedByCond(const Loop *L,
4761                                     ICmpInst::Predicate Pred,
4762                                     const SCEV *LHS, const SCEV *RHS) {
4763  // Interpret a null as meaning no loop, where there is obviously no guard
4764  // (interprocedural conditions notwithstanding).
4765  if (!L) return false;
4766
4767  BasicBlock *Predecessor = getLoopPredecessor(L);
4768  BasicBlock *PredecessorDest = L->getHeader();
4769
4770  // Starting at the loop predecessor, climb up the predecessor chain, as long
4771  // as there are predecessors that can be found that have unique successors
4772  // leading to the original header.
4773  for (; Predecessor;
4774       PredecessorDest = Predecessor,
4775       Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) {
4776
4777    BranchInst *LoopEntryPredicate =
4778      dyn_cast<BranchInst>(Predecessor->getTerminator());
4779    if (!LoopEntryPredicate ||
4780        LoopEntryPredicate->isUnconditional())
4781      continue;
4782
4783    if (isImpliedCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
4784                      LoopEntryPredicate->getSuccessor(0) != PredecessorDest))
4785      return true;
4786  }
4787
4788  return false;
4789}
4790
4791/// isImpliedCond - Test whether the condition described by Pred, LHS,
4792/// and RHS is true whenever the given Cond value evaluates to true.
4793bool ScalarEvolution::isImpliedCond(Value *CondValue,
4794                                    ICmpInst::Predicate Pred,
4795                                    const SCEV *LHS, const SCEV *RHS,
4796                                    bool Inverse) {
4797  // Recursively handle And and Or conditions.
4798  if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
4799    if (BO->getOpcode() == Instruction::And) {
4800      if (!Inverse)
4801        return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4802               isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4803    } else if (BO->getOpcode() == Instruction::Or) {
4804      if (Inverse)
4805        return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4806               isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4807    }
4808  }
4809
4810  ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue);
4811  if (!ICI) return false;
4812
4813  // Bail if the ICmp's operands' types are wider than the needed type
4814  // before attempting to call getSCEV on them. This avoids infinite
4815  // recursion, since the analysis of widening casts can require loop
4816  // exit condition information for overflow checking, which would
4817  // lead back here.
4818  if (getTypeSizeInBits(LHS->getType()) <
4819      getTypeSizeInBits(ICI->getOperand(0)->getType()))
4820    return false;
4821
4822  // Now that we found a conditional branch that dominates the loop, check to
4823  // see if it is the comparison we are looking for.
4824  ICmpInst::Predicate FoundPred;
4825  if (Inverse)
4826    FoundPred = ICI->getInversePredicate();
4827  else
4828    FoundPred = ICI->getPredicate();
4829
4830  const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
4831  const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
4832
4833  // Balance the types. The case where FoundLHS' type is wider than
4834  // LHS' type is checked for above.
4835  if (getTypeSizeInBits(LHS->getType()) >
4836      getTypeSizeInBits(FoundLHS->getType())) {
4837    if (CmpInst::isSigned(Pred)) {
4838      FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
4839      FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
4840    } else {
4841      FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
4842      FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
4843    }
4844  }
4845
4846  // Canonicalize the query to match the way instcombine will have
4847  // canonicalized the comparison.
4848  // First, put a constant operand on the right.
4849  if (isa<SCEVConstant>(LHS)) {
4850    std::swap(LHS, RHS);
4851    Pred = ICmpInst::getSwappedPredicate(Pred);
4852  }
4853  // Then, canonicalize comparisons with boundary cases.
4854  if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
4855    const APInt &RA = RC->getValue()->getValue();
4856    switch (Pred) {
4857    default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
4858    case ICmpInst::ICMP_EQ:
4859    case ICmpInst::ICMP_NE:
4860      break;
4861    case ICmpInst::ICMP_UGE:
4862      if ((RA - 1).isMinValue()) {
4863        Pred = ICmpInst::ICMP_NE;
4864        RHS = getConstant(RA - 1);
4865        break;
4866      }
4867      if (RA.isMaxValue()) {
4868        Pred = ICmpInst::ICMP_EQ;
4869        break;
4870      }
4871      if (RA.isMinValue()) return true;
4872      break;
4873    case ICmpInst::ICMP_ULE:
4874      if ((RA + 1).isMaxValue()) {
4875        Pred = ICmpInst::ICMP_NE;
4876        RHS = getConstant(RA + 1);
4877        break;
4878      }
4879      if (RA.isMinValue()) {
4880        Pred = ICmpInst::ICMP_EQ;
4881        break;
4882      }
4883      if (RA.isMaxValue()) return true;
4884      break;
4885    case ICmpInst::ICMP_SGE:
4886      if ((RA - 1).isMinSignedValue()) {
4887        Pred = ICmpInst::ICMP_NE;
4888        RHS = getConstant(RA - 1);
4889        break;
4890      }
4891      if (RA.isMaxSignedValue()) {
4892        Pred = ICmpInst::ICMP_EQ;
4893        break;
4894      }
4895      if (RA.isMinSignedValue()) return true;
4896      break;
4897    case ICmpInst::ICMP_SLE:
4898      if ((RA + 1).isMaxSignedValue()) {
4899        Pred = ICmpInst::ICMP_NE;
4900        RHS = getConstant(RA + 1);
4901        break;
4902      }
4903      if (RA.isMinSignedValue()) {
4904        Pred = ICmpInst::ICMP_EQ;
4905        break;
4906      }
4907      if (RA.isMaxSignedValue()) return true;
4908      break;
4909    case ICmpInst::ICMP_UGT:
4910      if (RA.isMinValue()) {
4911        Pred = ICmpInst::ICMP_NE;
4912        break;
4913      }
4914      if ((RA + 1).isMaxValue()) {
4915        Pred = ICmpInst::ICMP_EQ;
4916        RHS = getConstant(RA + 1);
4917        break;
4918      }
4919      if (RA.isMaxValue()) return false;
4920      break;
4921    case ICmpInst::ICMP_ULT:
4922      if (RA.isMaxValue()) {
4923        Pred = ICmpInst::ICMP_NE;
4924        break;
4925      }
4926      if ((RA - 1).isMinValue()) {
4927        Pred = ICmpInst::ICMP_EQ;
4928        RHS = getConstant(RA - 1);
4929        break;
4930      }
4931      if (RA.isMinValue()) return false;
4932      break;
4933    case ICmpInst::ICMP_SGT:
4934      if (RA.isMinSignedValue()) {
4935        Pred = ICmpInst::ICMP_NE;
4936        break;
4937      }
4938      if ((RA + 1).isMaxSignedValue()) {
4939        Pred = ICmpInst::ICMP_EQ;
4940        RHS = getConstant(RA + 1);
4941        break;
4942      }
4943      if (RA.isMaxSignedValue()) return false;
4944      break;
4945    case ICmpInst::ICMP_SLT:
4946      if (RA.isMaxSignedValue()) {
4947        Pred = ICmpInst::ICMP_NE;
4948        break;
4949      }
4950      if ((RA - 1).isMinSignedValue()) {
4951       Pred = ICmpInst::ICMP_EQ;
4952       RHS = getConstant(RA - 1);
4953       break;
4954      }
4955      if (RA.isMinSignedValue()) return false;
4956      break;
4957    }
4958  }
4959
4960  // Check to see if we can make the LHS or RHS match.
4961  if (LHS == FoundRHS || RHS == FoundLHS) {
4962    if (isa<SCEVConstant>(RHS)) {
4963      std::swap(FoundLHS, FoundRHS);
4964      FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
4965    } else {
4966      std::swap(LHS, RHS);
4967      Pred = ICmpInst::getSwappedPredicate(Pred);
4968    }
4969  }
4970
4971  // Check whether the found predicate is the same as the desired predicate.
4972  if (FoundPred == Pred)
4973    return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
4974
4975  // Check whether swapping the found predicate makes it the same as the
4976  // desired predicate.
4977  if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
4978    if (isa<SCEVConstant>(RHS))
4979      return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
4980    else
4981      return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
4982                                   RHS, LHS, FoundLHS, FoundRHS);
4983  }
4984
4985  // Check whether the actual condition is beyond sufficient.
4986  if (FoundPred == ICmpInst::ICMP_EQ)
4987    if (ICmpInst::isTrueWhenEqual(Pred))
4988      if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
4989        return true;
4990  if (Pred == ICmpInst::ICMP_NE)
4991    if (!ICmpInst::isTrueWhenEqual(FoundPred))
4992      if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
4993        return true;
4994
4995  // Otherwise assume the worst.
4996  return false;
4997}
4998
4999/// isImpliedCondOperands - Test whether the condition described by Pred,
5000/// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
5001/// and FoundRHS is true.
5002bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
5003                                            const SCEV *LHS, const SCEV *RHS,
5004                                            const SCEV *FoundLHS,
5005                                            const SCEV *FoundRHS) {
5006  return isImpliedCondOperandsHelper(Pred, LHS, RHS,
5007                                     FoundLHS, FoundRHS) ||
5008         // ~x < ~y --> x > y
5009         isImpliedCondOperandsHelper(Pred, LHS, RHS,
5010                                     getNotSCEV(FoundRHS),
5011                                     getNotSCEV(FoundLHS));
5012}
5013
5014/// isImpliedCondOperandsHelper - Test whether the condition described by
5015/// Pred, LHS, and RHS is true whenever the condition described by Pred,
5016/// FoundLHS, and FoundRHS is true.
5017bool
5018ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
5019                                             const SCEV *LHS, const SCEV *RHS,
5020                                             const SCEV *FoundLHS,
5021                                             const SCEV *FoundRHS) {
5022  switch (Pred) {
5023  default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
5024  case ICmpInst::ICMP_EQ:
5025  case ICmpInst::ICMP_NE:
5026    if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
5027      return true;
5028    break;
5029  case ICmpInst::ICMP_SLT:
5030  case ICmpInst::ICMP_SLE:
5031    if (isKnownPredicate(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
5032        isKnownPredicate(ICmpInst::ICMP_SGE, RHS, FoundRHS))
5033      return true;
5034    break;
5035  case ICmpInst::ICMP_SGT:
5036  case ICmpInst::ICMP_SGE:
5037    if (isKnownPredicate(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
5038        isKnownPredicate(ICmpInst::ICMP_SLE, RHS, FoundRHS))
5039      return true;
5040    break;
5041  case ICmpInst::ICMP_ULT:
5042  case ICmpInst::ICMP_ULE:
5043    if (isKnownPredicate(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
5044        isKnownPredicate(ICmpInst::ICMP_UGE, RHS, FoundRHS))
5045      return true;
5046    break;
5047  case ICmpInst::ICMP_UGT:
5048  case ICmpInst::ICMP_UGE:
5049    if (isKnownPredicate(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
5050        isKnownPredicate(ICmpInst::ICMP_ULE, RHS, FoundRHS))
5051      return true;
5052    break;
5053  }
5054
5055  return false;
5056}
5057
5058/// getBECount - Subtract the end and start values and divide by the step,
5059/// rounding up, to get the number of times the backedge is executed. Return
5060/// CouldNotCompute if an intermediate computation overflows.
5061const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
5062                                        const SCEV *End,
5063                                        const SCEV *Step,
5064                                        bool NoWrap) {
5065  assert(!isKnownNegative(Step) &&
5066         "This code doesn't handle negative strides yet!");
5067
5068  const Type *Ty = Start->getType();
5069  const SCEV *NegOne = getIntegerSCEV(-1, Ty);
5070  const SCEV *Diff = getMinusSCEV(End, Start);
5071  const SCEV *RoundUp = getAddExpr(Step, NegOne);
5072
5073  // Add an adjustment to the difference between End and Start so that
5074  // the division will effectively round up.
5075  const SCEV *Add = getAddExpr(Diff, RoundUp);
5076
5077  if (!NoWrap) {
5078    // Check Add for unsigned overflow.
5079    // TODO: More sophisticated things could be done here.
5080    const Type *WideTy = IntegerType::get(getContext(),
5081                                          getTypeSizeInBits(Ty) + 1);
5082    const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
5083    const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
5084    const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp);
5085    if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
5086      return getCouldNotCompute();
5087  }
5088
5089  return getUDivExpr(Add, Step);
5090}
5091
5092/// HowManyLessThans - Return the number of times a backedge containing the
5093/// specified less-than comparison will execute.  If not computable, return
5094/// CouldNotCompute.
5095ScalarEvolution::BackedgeTakenInfo
5096ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
5097                                  const Loop *L, bool isSigned) {
5098  // Only handle:  "ADDREC < LoopInvariant".
5099  if (!RHS->isLoopInvariant(L)) return getCouldNotCompute();
5100
5101  const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
5102  if (!AddRec || AddRec->getLoop() != L)
5103    return getCouldNotCompute();
5104
5105  // Check to see if we have a flag which makes analysis easy.
5106  bool NoWrap = isSigned ? AddRec->hasNoSignedWrap() :
5107                           AddRec->hasNoUnsignedWrap();
5108
5109  if (AddRec->isAffine()) {
5110    unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
5111    const SCEV *Step = AddRec->getStepRecurrence(*this);
5112
5113    if (Step->isZero())
5114      return getCouldNotCompute();
5115    if (Step->isOne()) {
5116      // With unit stride, the iteration never steps past the limit value.
5117    } else if (isKnownPositive(Step)) {
5118      // Test whether a positive iteration can step past the limit
5119      // value and past the maximum value for its type in a single step.
5120      // Note that it's not sufficient to check NoWrap here, because even
5121      // though the value after a wrap is undefined, it's not undefined
5122      // behavior, so if wrap does occur, the loop could either terminate or
5123      // loop infinitely, but in either case, the loop is guaranteed to
5124      // iterate at least until the iteration where the wrapping occurs.
5125      const SCEV *One = getIntegerSCEV(1, Step->getType());
5126      if (isSigned) {
5127        APInt Max = APInt::getSignedMaxValue(BitWidth);
5128        if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax())
5129              .slt(getSignedRange(RHS).getSignedMax()))
5130          return getCouldNotCompute();
5131      } else {
5132        APInt Max = APInt::getMaxValue(BitWidth);
5133        if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax())
5134              .ult(getUnsignedRange(RHS).getUnsignedMax()))
5135          return getCouldNotCompute();
5136      }
5137    } else
5138      // TODO: Handle negative strides here and below.
5139      return getCouldNotCompute();
5140
5141    // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
5142    // m.  So, we count the number of iterations in which {n,+,s} < m is true.
5143    // Note that we cannot simply return max(m-n,0)/s because it's not safe to
5144    // treat m-n as signed nor unsigned due to overflow possibility.
5145
5146    // First, we get the value of the LHS in the first iteration: n
5147    const SCEV *Start = AddRec->getOperand(0);
5148
5149    // Determine the minimum constant start value.
5150    const SCEV *MinStart = getConstant(isSigned ?
5151      getSignedRange(Start).getSignedMin() :
5152      getUnsignedRange(Start).getUnsignedMin());
5153
5154    // If we know that the condition is true in order to enter the loop,
5155    // then we know that it will run exactly (m-n)/s times. Otherwise, we
5156    // only know that it will execute (max(m,n)-n)/s times. In both cases,
5157    // the division must round up.
5158    const SCEV *End = RHS;
5159    if (!isLoopGuardedByCond(L,
5160                             isSigned ? ICmpInst::ICMP_SLT :
5161                                        ICmpInst::ICMP_ULT,
5162                             getMinusSCEV(Start, Step), RHS))
5163      End = isSigned ? getSMaxExpr(RHS, Start)
5164                     : getUMaxExpr(RHS, Start);
5165
5166    // Determine the maximum constant end value.
5167    const SCEV *MaxEnd = getConstant(isSigned ?
5168      getSignedRange(End).getSignedMax() :
5169      getUnsignedRange(End).getUnsignedMax());
5170
5171    // If MaxEnd is within a step of the maximum integer value in its type,
5172    // adjust it down to the minimum value which would produce the same effect.
5173    // This allows the subsequent ceiling division of (N+(step-1))/step to
5174    // compute the correct value.
5175    const SCEV *StepMinusOne = getMinusSCEV(Step,
5176                                            getIntegerSCEV(1, Step->getType()));
5177    MaxEnd = isSigned ?
5178      getSMinExpr(MaxEnd,
5179                  getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)),
5180                               StepMinusOne)) :
5181      getUMinExpr(MaxEnd,
5182                  getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)),
5183                               StepMinusOne));
5184
5185    // Finally, we subtract these two values and divide, rounding up, to get
5186    // the number of times the backedge is executed.
5187    const SCEV *BECount = getBECount(Start, End, Step, NoWrap);
5188
5189    // The maximum backedge count is similar, except using the minimum start
5190    // value and the maximum end value.
5191    const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step, NoWrap);
5192
5193    return BackedgeTakenInfo(BECount, MaxBECount);
5194  }
5195
5196  return getCouldNotCompute();
5197}
5198
5199/// getNumIterationsInRange - Return the number of iterations of this loop that
5200/// produce values in the specified constant range.  Another way of looking at
5201/// this is that it returns the first iteration number where the value is not in
5202/// the condition, thus computing the exit count. If the iteration count can't
5203/// be computed, an instance of SCEVCouldNotCompute is returned.
5204const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
5205                                                    ScalarEvolution &SE) const {
5206  if (Range.isFullSet())  // Infinite loop.
5207    return SE.getCouldNotCompute();
5208
5209  // If the start is a non-zero constant, shift the range to simplify things.
5210  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
5211    if (!SC->getValue()->isZero()) {
5212      SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
5213      Operands[0] = SE.getIntegerSCEV(0, SC->getType());
5214      const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
5215      if (const SCEVAddRecExpr *ShiftedAddRec =
5216            dyn_cast<SCEVAddRecExpr>(Shifted))
5217        return ShiftedAddRec->getNumIterationsInRange(
5218                           Range.subtract(SC->getValue()->getValue()), SE);
5219      // This is strange and shouldn't happen.
5220      return SE.getCouldNotCompute();
5221    }
5222
5223  // The only time we can solve this is when we have all constant indices.
5224  // Otherwise, we cannot determine the overflow conditions.
5225  for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
5226    if (!isa<SCEVConstant>(getOperand(i)))
5227      return SE.getCouldNotCompute();
5228
5229
5230  // Okay at this point we know that all elements of the chrec are constants and
5231  // that the start element is zero.
5232
5233  // First check to see if the range contains zero.  If not, the first
5234  // iteration exits.
5235  unsigned BitWidth = SE.getTypeSizeInBits(getType());
5236  if (!Range.contains(APInt(BitWidth, 0)))
5237    return SE.getIntegerSCEV(0, getType());
5238
5239  if (isAffine()) {
5240    // If this is an affine expression then we have this situation:
5241    //   Solve {0,+,A} in Range  ===  Ax in Range
5242
5243    // We know that zero is in the range.  If A is positive then we know that
5244    // the upper value of the range must be the first possible exit value.
5245    // If A is negative then the lower of the range is the last possible loop
5246    // value.  Also note that we already checked for a full range.
5247    APInt One(BitWidth,1);
5248    APInt A     = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
5249    APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
5250
5251    // The exit value should be (End+A)/A.
5252    APInt ExitVal = (End + A).udiv(A);
5253    ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
5254
5255    // Evaluate at the exit value.  If we really did fall out of the valid
5256    // range, then we computed our trip count, otherwise wrap around or other
5257    // things must have happened.
5258    ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
5259    if (Range.contains(Val->getValue()))
5260      return SE.getCouldNotCompute();  // Something strange happened
5261
5262    // Ensure that the previous value is in the range.  This is a sanity check.
5263    assert(Range.contains(
5264           EvaluateConstantChrecAtConstant(this,
5265           ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
5266           "Linear scev computation is off in a bad way!");
5267    return SE.getConstant(ExitValue);
5268  } else if (isQuadratic()) {
5269    // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
5270    // quadratic equation to solve it.  To do this, we must frame our problem in
5271    // terms of figuring out when zero is crossed, instead of when
5272    // Range.getUpper() is crossed.
5273    SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
5274    NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
5275    const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
5276
5277    // Next, solve the constructed addrec
5278    std::pair<const SCEV *,const SCEV *> Roots =
5279      SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
5280    const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
5281    const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
5282    if (R1) {
5283      // Pick the smallest positive root value.
5284      if (ConstantInt *CB =
5285          dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
5286                         R1->getValue(), R2->getValue()))) {
5287        if (CB->getZExtValue() == false)
5288          std::swap(R1, R2);   // R1 is the minimum root now.
5289
5290        // Make sure the root is not off by one.  The returned iteration should
5291        // not be in the range, but the previous one should be.  When solving
5292        // for "X*X < 5", for example, we should not return a root of 2.
5293        ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
5294                                                             R1->getValue(),
5295                                                             SE);
5296        if (Range.contains(R1Val->getValue())) {
5297          // The next iteration must be out of the range...
5298          ConstantInt *NextVal =
5299                ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
5300
5301          R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
5302          if (!Range.contains(R1Val->getValue()))
5303            return SE.getConstant(NextVal);
5304          return SE.getCouldNotCompute();  // Something strange happened
5305        }
5306
5307        // If R1 was not in the range, then it is a good return value.  Make
5308        // sure that R1-1 WAS in the range though, just in case.
5309        ConstantInt *NextVal =
5310               ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
5311        R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
5312        if (Range.contains(R1Val->getValue()))
5313          return R1;
5314        return SE.getCouldNotCompute();  // Something strange happened
5315      }
5316    }
5317  }
5318
5319  return SE.getCouldNotCompute();
5320}
5321
5322
5323
5324//===----------------------------------------------------------------------===//
5325//                   SCEVCallbackVH Class Implementation
5326//===----------------------------------------------------------------------===//
5327
5328void ScalarEvolution::SCEVCallbackVH::deleted() {
5329  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
5330  if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
5331    SE->ConstantEvolutionLoopExitValue.erase(PN);
5332  SE->Scalars.erase(getValPtr());
5333  // this now dangles!
5334}
5335
5336void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
5337  assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
5338
5339  // Forget all the expressions associated with users of the old value,
5340  // so that future queries will recompute the expressions using the new
5341  // value.
5342  SmallVector<User *, 16> Worklist;
5343  SmallPtrSet<User *, 8> Visited;
5344  Value *Old = getValPtr();
5345  bool DeleteOld = false;
5346  for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
5347       UI != UE; ++UI)
5348    Worklist.push_back(*UI);
5349  while (!Worklist.empty()) {
5350    User *U = Worklist.pop_back_val();
5351    // Deleting the Old value will cause this to dangle. Postpone
5352    // that until everything else is done.
5353    if (U == Old) {
5354      DeleteOld = true;
5355      continue;
5356    }
5357    if (!Visited.insert(U))
5358      continue;
5359    if (PHINode *PN = dyn_cast<PHINode>(U))
5360      SE->ConstantEvolutionLoopExitValue.erase(PN);
5361    SE->Scalars.erase(U);
5362    for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
5363         UI != UE; ++UI)
5364      Worklist.push_back(*UI);
5365  }
5366  // Delete the Old value if it (indirectly) references itself.
5367  if (DeleteOld) {
5368    if (PHINode *PN = dyn_cast<PHINode>(Old))
5369      SE->ConstantEvolutionLoopExitValue.erase(PN);
5370    SE->Scalars.erase(Old);
5371    // this now dangles!
5372  }
5373  // this may dangle!
5374}
5375
5376ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
5377  : CallbackVH(V), SE(se) {}
5378
5379//===----------------------------------------------------------------------===//
5380//                   ScalarEvolution Class Implementation
5381//===----------------------------------------------------------------------===//
5382
5383ScalarEvolution::ScalarEvolution()
5384  : FunctionPass(&ID) {
5385}
5386
5387bool ScalarEvolution::runOnFunction(Function &F) {
5388  this->F = &F;
5389  LI = &getAnalysis<LoopInfo>();
5390  TD = getAnalysisIfAvailable<TargetData>();
5391  DT = &getAnalysis<DominatorTree>();
5392  return false;
5393}
5394
5395void ScalarEvolution::releaseMemory() {
5396  Scalars.clear();
5397  BackedgeTakenCounts.clear();
5398  ConstantEvolutionLoopExitValue.clear();
5399  ValuesAtScopes.clear();
5400  UniqueSCEVs.clear();
5401  SCEVAllocator.Reset();
5402}
5403
5404void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
5405  AU.setPreservesAll();
5406  AU.addRequiredTransitive<LoopInfo>();
5407  AU.addRequiredTransitive<DominatorTree>();
5408}
5409
5410bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
5411  return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
5412}
5413
5414static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
5415                          const Loop *L) {
5416  // Print all inner loops first
5417  for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
5418    PrintLoopInfo(OS, SE, *I);
5419
5420  OS << "Loop ";
5421  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
5422  OS << ": ";
5423
5424  SmallVector<BasicBlock *, 8> ExitBlocks;
5425  L->getExitBlocks(ExitBlocks);
5426  if (ExitBlocks.size() != 1)
5427    OS << "<multiple exits> ";
5428
5429  if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
5430    OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
5431  } else {
5432    OS << "Unpredictable backedge-taken count. ";
5433  }
5434
5435  OS << "\n"
5436        "Loop ";
5437  WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
5438  OS << ": ";
5439
5440  if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
5441    OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
5442  } else {
5443    OS << "Unpredictable max backedge-taken count. ";
5444  }
5445
5446  OS << "\n";
5447}
5448
5449void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
5450  // ScalarEvolution's implementation of the print method is to print
5451  // out SCEV values of all instructions that are interesting. Doing
5452  // this potentially causes it to create new SCEV objects though,
5453  // which technically conflicts with the const qualifier. This isn't
5454  // observable from outside the class though, so casting away the
5455  // const isn't dangerous.
5456  ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
5457
5458  OS << "Classifying expressions for: ";
5459  WriteAsOperand(OS, F, /*PrintType=*/false);
5460  OS << "\n";
5461  for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
5462    if (isSCEVable(I->getType())) {
5463      OS << *I << '\n';
5464      OS << "  -->  ";
5465      const SCEV *SV = SE.getSCEV(&*I);
5466      SV->print(OS);
5467
5468      const Loop *L = LI->getLoopFor((*I).getParent());
5469
5470      const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
5471      if (AtUse != SV) {
5472        OS << "  -->  ";
5473        AtUse->print(OS);
5474      }
5475
5476      if (L) {
5477        OS << "\t\t" "Exits: ";
5478        const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
5479        if (!ExitValue->isLoopInvariant(L)) {
5480          OS << "<<Unknown>>";
5481        } else {
5482          OS << *ExitValue;
5483        }
5484      }
5485
5486      OS << "\n";
5487    }
5488
5489  OS << "Determining loop execution counts for: ";
5490  WriteAsOperand(OS, F, /*PrintType=*/false);
5491  OS << "\n";
5492  for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
5493    PrintLoopInfo(OS, &SE, *I);
5494}
5495
5496