LoopStrengthReduce.cpp revision 201360
1//===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This transformation analyzes and transforms the induction variables (and
11// computations derived from them) into forms suitable for efficient execution
12// on the target.
13//
14// This pass performs a strength reduction on array references inside loops that
15// have as one or more of their components the loop induction variable, it
16// rewrites expressions to take advantage of scaled-index addressing modes
17// available on the target, and it performs a variety of other optimizations
18// related to loop induction variables.
19//
20//===----------------------------------------------------------------------===//
21
22#define DEBUG_TYPE "loop-reduce"
23#include "llvm/Transforms/Scalar.h"
24#include "llvm/Constants.h"
25#include "llvm/Instructions.h"
26#include "llvm/IntrinsicInst.h"
27#include "llvm/DerivedTypes.h"
28#include "llvm/Analysis/IVUsers.h"
29#include "llvm/Analysis/LoopPass.h"
30#include "llvm/Analysis/ScalarEvolutionExpander.h"
31#include "llvm/Transforms/Utils/AddrModeMatcher.h"
32#include "llvm/Transforms/Utils/BasicBlockUtils.h"
33#include "llvm/Transforms/Utils/Local.h"
34#include "llvm/ADT/Statistic.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/Support/CommandLine.h"
37#include "llvm/Support/ValueHandle.h"
38#include "llvm/Support/raw_ostream.h"
39#include "llvm/Target/TargetLowering.h"
40#include <algorithm>
41using namespace llvm;
42
43STATISTIC(NumReduced ,    "Number of IV uses strength reduced");
44STATISTIC(NumInserted,    "Number of PHIs inserted");
45STATISTIC(NumVariable,    "Number of PHIs with variable strides");
46STATISTIC(NumEliminated,  "Number of strides eliminated");
47STATISTIC(NumShadow,      "Number of Shadow IVs optimized");
48STATISTIC(NumImmSunk,     "Number of common expr immediates sunk into uses");
49STATISTIC(NumLoopCond,    "Number of loop terminating conds optimized");
50STATISTIC(NumCountZero,   "Number of count iv optimized to count toward zero");
51
52static cl::opt<bool> EnableFullLSRMode("enable-full-lsr",
53                                       cl::init(false),
54                                       cl::Hidden);
55
56namespace {
57
58  struct BasedUser;
59
60  /// IVInfo - This structure keeps track of one IV expression inserted during
61  /// StrengthReduceStridedIVUsers. It contains the stride, the common base, as
62  /// well as the PHI node and increment value created for rewrite.
63  struct IVExpr {
64    const SCEV *Stride;
65    const SCEV *Base;
66    PHINode    *PHI;
67
68    IVExpr(const SCEV *const stride, const SCEV *const base, PHINode *phi)
69      : Stride(stride), Base(base), PHI(phi) {}
70  };
71
72  /// IVsOfOneStride - This structure keeps track of all IV expression inserted
73  /// during StrengthReduceStridedIVUsers for a particular stride of the IV.
74  struct IVsOfOneStride {
75    std::vector<IVExpr> IVs;
76
77    void addIV(const SCEV *const Stride, const SCEV *const Base, PHINode *PHI) {
78      IVs.push_back(IVExpr(Stride, Base, PHI));
79    }
80  };
81
82  class LoopStrengthReduce : public LoopPass {
83    IVUsers *IU;
84    ScalarEvolution *SE;
85    bool Changed;
86
87    /// IVsByStride - Keep track of all IVs that have been inserted for a
88    /// particular stride.
89    std::map<const SCEV *, IVsOfOneStride> IVsByStride;
90
91    /// DeadInsts - Keep track of instructions we may have made dead, so that
92    /// we can remove them after we are done working.
93    SmallVector<WeakVH, 16> DeadInsts;
94
95    /// TLI - Keep a pointer of a TargetLowering to consult for determining
96    /// transformation profitability.
97    const TargetLowering *TLI;
98
99  public:
100    static char ID; // Pass ID, replacement for typeid
101    explicit LoopStrengthReduce(const TargetLowering *tli = NULL) :
102      LoopPass(&ID), TLI(tli) {}
103
104    bool runOnLoop(Loop *L, LPPassManager &LPM);
105
106    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
107      // We split critical edges, so we change the CFG.  However, we do update
108      // many analyses if they are around.
109      AU.addPreservedID(LoopSimplifyID);
110      AU.addPreserved("loops");
111      AU.addPreserved("domfrontier");
112      AU.addPreserved("domtree");
113
114      AU.addRequiredID(LoopSimplifyID);
115      AU.addRequired<ScalarEvolution>();
116      AU.addPreserved<ScalarEvolution>();
117      AU.addRequired<IVUsers>();
118      AU.addPreserved<IVUsers>();
119    }
120
121  private:
122    void OptimizeIndvars(Loop *L);
123
124    /// OptimizeLoopTermCond - Change loop terminating condition to use the
125    /// postinc iv when possible.
126    void OptimizeLoopTermCond(Loop *L);
127
128    /// OptimizeShadowIV - If IV is used in a int-to-float cast
129    /// inside the loop then try to eliminate the cast opeation.
130    void OptimizeShadowIV(Loop *L);
131
132    /// OptimizeMax - Rewrite the loop's terminating condition
133    /// if it uses a max computation.
134    ICmpInst *OptimizeMax(Loop *L, ICmpInst *Cond,
135                          IVStrideUse* &CondUse);
136
137    /// OptimizeLoopCountIV - If, after all sharing of IVs, the IV used for
138    /// deciding when to exit the loop is used only for that purpose, try to
139    /// rearrange things so it counts down to a test against zero.
140    bool OptimizeLoopCountIV(Loop *L);
141    bool OptimizeLoopCountIVOfStride(const SCEV* &Stride,
142                                     IVStrideUse* &CondUse, Loop *L);
143
144    /// StrengthReduceIVUsersOfStride - Strength reduce all of the users of a
145    /// single stride of IV.  All of the users may have different starting
146    /// values, and this may not be the only stride.
147    void StrengthReduceIVUsersOfStride(const SCEV *Stride,
148                                      IVUsersOfOneStride &Uses,
149                                      Loop *L);
150    void StrengthReduceIVUsers(Loop *L);
151
152    ICmpInst *ChangeCompareStride(Loop *L, ICmpInst *Cond,
153                                  IVStrideUse* &CondUse,
154                                  const SCEV* &CondStride,
155                                  bool PostPass = false);
156
157    bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse,
158                           const SCEV* &CondStride);
159    bool RequiresTypeConversion(const Type *Ty, const Type *NewTy);
160    const SCEV *CheckForIVReuse(bool, bool, bool, const SCEV *,
161                             IVExpr&, const Type*,
162                             const std::vector<BasedUser>& UsersToProcess);
163    bool ValidScale(bool, int64_t,
164                    const std::vector<BasedUser>& UsersToProcess);
165    bool ValidOffset(bool, int64_t, int64_t,
166                     const std::vector<BasedUser>& UsersToProcess);
167    const SCEV *CollectIVUsers(const SCEV *Stride,
168                              IVUsersOfOneStride &Uses,
169                              Loop *L,
170                              bool &AllUsesAreAddresses,
171                              bool &AllUsesAreOutsideLoop,
172                              std::vector<BasedUser> &UsersToProcess);
173    bool StrideMightBeShared(const SCEV *Stride, Loop *L, bool CheckPreInc);
174    bool ShouldUseFullStrengthReductionMode(
175                                const std::vector<BasedUser> &UsersToProcess,
176                                const Loop *L,
177                                bool AllUsesAreAddresses,
178                                const SCEV *Stride);
179    void PrepareToStrengthReduceFully(
180                             std::vector<BasedUser> &UsersToProcess,
181                             const SCEV *Stride,
182                             const SCEV *CommonExprs,
183                             const Loop *L,
184                             SCEVExpander &PreheaderRewriter);
185    void PrepareToStrengthReduceFromSmallerStride(
186                                         std::vector<BasedUser> &UsersToProcess,
187                                         Value *CommonBaseV,
188                                         const IVExpr &ReuseIV,
189                                         Instruction *PreInsertPt);
190    void PrepareToStrengthReduceWithNewPhi(
191                                  std::vector<BasedUser> &UsersToProcess,
192                                  const SCEV *Stride,
193                                  const SCEV *CommonExprs,
194                                  Value *CommonBaseV,
195                                  Instruction *IVIncInsertPt,
196                                  const Loop *L,
197                                  SCEVExpander &PreheaderRewriter);
198
199    void DeleteTriviallyDeadInstructions();
200  };
201}
202
203char LoopStrengthReduce::ID = 0;
204static RegisterPass<LoopStrengthReduce>
205X("loop-reduce", "Loop Strength Reduction");
206
207Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) {
208  return new LoopStrengthReduce(TLI);
209}
210
211/// DeleteTriviallyDeadInstructions - If any of the instructions is the
212/// specified set are trivially dead, delete them and see if this makes any of
213/// their operands subsequently dead.
214void LoopStrengthReduce::DeleteTriviallyDeadInstructions() {
215  while (!DeadInsts.empty()) {
216    Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val());
217
218    if (I == 0 || !isInstructionTriviallyDead(I))
219      continue;
220
221    for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
222      if (Instruction *U = dyn_cast<Instruction>(*OI)) {
223        *OI = 0;
224        if (U->use_empty())
225          DeadInsts.push_back(U);
226      }
227
228    I->eraseFromParent();
229    Changed = true;
230  }
231}
232
233/// isAddressUse - Returns true if the specified instruction is using the
234/// specified value as an address.
235static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
236  bool isAddress = isa<LoadInst>(Inst);
237  if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
238    if (SI->getOperand(1) == OperandVal)
239      isAddress = true;
240  } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
241    // Addressing modes can also be folded into prefetches and a variety
242    // of intrinsics.
243    switch (II->getIntrinsicID()) {
244      default: break;
245      case Intrinsic::prefetch:
246      case Intrinsic::x86_sse2_loadu_dq:
247      case Intrinsic::x86_sse2_loadu_pd:
248      case Intrinsic::x86_sse_loadu_ps:
249      case Intrinsic::x86_sse_storeu_ps:
250      case Intrinsic::x86_sse2_storeu_pd:
251      case Intrinsic::x86_sse2_storeu_dq:
252      case Intrinsic::x86_sse2_storel_dq:
253        if (II->getOperand(1) == OperandVal)
254          isAddress = true;
255        break;
256    }
257  }
258  return isAddress;
259}
260
261/// getAccessType - Return the type of the memory being accessed.
262static const Type *getAccessType(const Instruction *Inst) {
263  const Type *AccessTy = Inst->getType();
264  if (const StoreInst *SI = dyn_cast<StoreInst>(Inst))
265    AccessTy = SI->getOperand(0)->getType();
266  else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
267    // Addressing modes can also be folded into prefetches and a variety
268    // of intrinsics.
269    switch (II->getIntrinsicID()) {
270    default: break;
271    case Intrinsic::x86_sse_storeu_ps:
272    case Intrinsic::x86_sse2_storeu_pd:
273    case Intrinsic::x86_sse2_storeu_dq:
274    case Intrinsic::x86_sse2_storel_dq:
275      AccessTy = II->getOperand(1)->getType();
276      break;
277    }
278  }
279  return AccessTy;
280}
281
282namespace {
283  /// BasedUser - For a particular base value, keep information about how we've
284  /// partitioned the expression so far.
285  struct BasedUser {
286    /// Base - The Base value for the PHI node that needs to be inserted for
287    /// this use.  As the use is processed, information gets moved from this
288    /// field to the Imm field (below).  BasedUser values are sorted by this
289    /// field.
290    const SCEV *Base;
291
292    /// Inst - The instruction using the induction variable.
293    Instruction *Inst;
294
295    /// OperandValToReplace - The operand value of Inst to replace with the
296    /// EmittedBase.
297    Value *OperandValToReplace;
298
299    /// Imm - The immediate value that should be added to the base immediately
300    /// before Inst, because it will be folded into the imm field of the
301    /// instruction.  This is also sometimes used for loop-variant values that
302    /// must be added inside the loop.
303    const SCEV *Imm;
304
305    /// Phi - The induction variable that performs the striding that
306    /// should be used for this user.
307    PHINode *Phi;
308
309    // isUseOfPostIncrementedValue - True if this should use the
310    // post-incremented version of this IV, not the preincremented version.
311    // This can only be set in special cases, such as the terminating setcc
312    // instruction for a loop and uses outside the loop that are dominated by
313    // the loop.
314    bool isUseOfPostIncrementedValue;
315
316    BasedUser(IVStrideUse &IVSU, ScalarEvolution *se)
317      : Base(IVSU.getOffset()), Inst(IVSU.getUser()),
318        OperandValToReplace(IVSU.getOperandValToReplace()),
319        Imm(se->getIntegerSCEV(0, Base->getType())),
320        isUseOfPostIncrementedValue(IVSU.isUseOfPostIncrementedValue()) {}
321
322    // Once we rewrite the code to insert the new IVs we want, update the
323    // operands of Inst to use the new expression 'NewBase', with 'Imm' added
324    // to it.
325    void RewriteInstructionToUseNewBase(const SCEV *NewBase,
326                                        Instruction *InsertPt,
327                                       SCEVExpander &Rewriter, Loop *L, Pass *P,
328                                        SmallVectorImpl<WeakVH> &DeadInsts,
329                                        ScalarEvolution *SE);
330
331    Value *InsertCodeForBaseAtPosition(const SCEV *NewBase,
332                                       const Type *Ty,
333                                       SCEVExpander &Rewriter,
334                                       Instruction *IP,
335                                       ScalarEvolution *SE);
336    void dump() const;
337  };
338}
339
340void BasedUser::dump() const {
341  dbgs() << " Base=" << *Base;
342  dbgs() << " Imm=" << *Imm;
343  dbgs() << "   Inst: " << *Inst;
344}
345
346Value *BasedUser::InsertCodeForBaseAtPosition(const SCEV *NewBase,
347                                              const Type *Ty,
348                                              SCEVExpander &Rewriter,
349                                              Instruction *IP,
350                                              ScalarEvolution *SE) {
351  Value *Base = Rewriter.expandCodeFor(NewBase, 0, IP);
352
353  // Wrap the base in a SCEVUnknown so that ScalarEvolution doesn't try to
354  // re-analyze it.
355  const SCEV *NewValSCEV = SE->getUnknown(Base);
356
357  // Always emit the immediate into the same block as the user.
358  NewValSCEV = SE->getAddExpr(NewValSCEV, Imm);
359
360  return Rewriter.expandCodeFor(NewValSCEV, Ty, IP);
361}
362
363
364// Once we rewrite the code to insert the new IVs we want, update the
365// operands of Inst to use the new expression 'NewBase', with 'Imm' added
366// to it. NewBasePt is the last instruction which contributes to the
367// value of NewBase in the case that it's a diffferent instruction from
368// the PHI that NewBase is computed from, or null otherwise.
369//
370void BasedUser::RewriteInstructionToUseNewBase(const SCEV *NewBase,
371                                               Instruction *NewBasePt,
372                                      SCEVExpander &Rewriter, Loop *L, Pass *P,
373                                      SmallVectorImpl<WeakVH> &DeadInsts,
374                                      ScalarEvolution *SE) {
375  if (!isa<PHINode>(Inst)) {
376    // By default, insert code at the user instruction.
377    BasicBlock::iterator InsertPt = Inst;
378
379    // However, if the Operand is itself an instruction, the (potentially
380    // complex) inserted code may be shared by many users.  Because of this, we
381    // want to emit code for the computation of the operand right before its old
382    // computation.  This is usually safe, because we obviously used to use the
383    // computation when it was computed in its current block.  However, in some
384    // cases (e.g. use of a post-incremented induction variable) the NewBase
385    // value will be pinned to live somewhere after the original computation.
386    // In this case, we have to back off.
387    //
388    // If this is a use outside the loop (which means after, since it is based
389    // on a loop indvar) we use the post-incremented value, so that we don't
390    // artificially make the preinc value live out the bottom of the loop.
391    if (!isUseOfPostIncrementedValue && L->contains(Inst)) {
392      if (NewBasePt && isa<PHINode>(OperandValToReplace)) {
393        InsertPt = NewBasePt;
394        ++InsertPt;
395      } else if (Instruction *OpInst
396                 = dyn_cast<Instruction>(OperandValToReplace)) {
397        InsertPt = OpInst;
398        while (isa<PHINode>(InsertPt)) ++InsertPt;
399      }
400    }
401    Value *NewVal = InsertCodeForBaseAtPosition(NewBase,
402                                                OperandValToReplace->getType(),
403                                                Rewriter, InsertPt, SE);
404    // Replace the use of the operand Value with the new Phi we just created.
405    Inst->replaceUsesOfWith(OperandValToReplace, NewVal);
406
407    DEBUG(dbgs() << "      Replacing with ");
408    DEBUG(WriteAsOperand(dbgs(), NewVal, /*PrintType=*/false));
409    DEBUG(dbgs() << ", which has value " << *NewBase << " plus IMM "
410                 << *Imm << "\n");
411    return;
412  }
413
414  // PHI nodes are more complex.  We have to insert one copy of the NewBase+Imm
415  // expression into each operand block that uses it.  Note that PHI nodes can
416  // have multiple entries for the same predecessor.  We use a map to make sure
417  // that a PHI node only has a single Value* for each predecessor (which also
418  // prevents us from inserting duplicate code in some blocks).
419  DenseMap<BasicBlock*, Value*> InsertedCode;
420  PHINode *PN = cast<PHINode>(Inst);
421  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
422    if (PN->getIncomingValue(i) == OperandValToReplace) {
423      // If the original expression is outside the loop, put the replacement
424      // code in the same place as the original expression,
425      // which need not be an immediate predecessor of this PHI.  This way we
426      // need only one copy of it even if it is referenced multiple times in
427      // the PHI.  We don't do this when the original expression is inside the
428      // loop because multiple copies sometimes do useful sinking of code in
429      // that case(?).
430      Instruction *OldLoc = dyn_cast<Instruction>(OperandValToReplace);
431      BasicBlock *PHIPred = PN->getIncomingBlock(i);
432      if (L->contains(OldLoc)) {
433        // If this is a critical edge, split the edge so that we do not insert
434        // the code on all predecessor/successor paths.  We do this unless this
435        // is the canonical backedge for this loop, as this can make some
436        // inserted code be in an illegal position.
437        if (e != 1 && PHIPred->getTerminator()->getNumSuccessors() > 1 &&
438            !isa<IndirectBrInst>(PHIPred->getTerminator()) &&
439            (PN->getParent() != L->getHeader() || !L->contains(PHIPred))) {
440
441          // First step, split the critical edge.
442          BasicBlock *NewBB = SplitCriticalEdge(PHIPred, PN->getParent(),
443                                                P, false);
444
445          // Next step: move the basic block.  In particular, if the PHI node
446          // is outside of the loop, and PredTI is in the loop, we want to
447          // move the block to be immediately before the PHI block, not
448          // immediately after PredTI.
449          if (L->contains(PHIPred) && !L->contains(PN))
450            NewBB->moveBefore(PN->getParent());
451
452          // Splitting the edge can reduce the number of PHI entries we have.
453          e = PN->getNumIncomingValues();
454          PHIPred = NewBB;
455          i = PN->getBasicBlockIndex(PHIPred);
456        }
457      }
458      Value *&Code = InsertedCode[PHIPred];
459      if (!Code) {
460        // Insert the code into the end of the predecessor block.
461        Instruction *InsertPt = (L->contains(OldLoc)) ?
462                                PHIPred->getTerminator() :
463                                OldLoc->getParent()->getTerminator();
464        Code = InsertCodeForBaseAtPosition(NewBase, PN->getType(),
465                                           Rewriter, InsertPt, SE);
466
467        DEBUG(dbgs() << "      Changing PHI use to ");
468        DEBUG(WriteAsOperand(dbgs(), Code, /*PrintType=*/false));
469        DEBUG(dbgs() << ", which has value " << *NewBase << " plus IMM "
470                     << *Imm << "\n");
471      }
472
473      // Replace the use of the operand Value with the new Phi we just created.
474      PN->setIncomingValue(i, Code);
475      Rewriter.clear();
476    }
477  }
478
479  // PHI node might have become a constant value after SplitCriticalEdge.
480  DeadInsts.push_back(Inst);
481}
482
483
484/// fitsInAddressMode - Return true if V can be subsumed within an addressing
485/// mode, and does not need to be put in a register first.
486static bool fitsInAddressMode(const SCEV *V, const Type *AccessTy,
487                             const TargetLowering *TLI, bool HasBaseReg) {
488  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(V)) {
489    int64_t VC = SC->getValue()->getSExtValue();
490    if (TLI) {
491      TargetLowering::AddrMode AM;
492      AM.BaseOffs = VC;
493      AM.HasBaseReg = HasBaseReg;
494      return TLI->isLegalAddressingMode(AM, AccessTy);
495    } else {
496      // Defaults to PPC. PPC allows a sign-extended 16-bit immediate field.
497      return (VC > -(1 << 16) && VC < (1 << 16)-1);
498    }
499  }
500
501  if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V))
502    if (GlobalValue *GV = dyn_cast<GlobalValue>(SU->getValue())) {
503      if (TLI) {
504        TargetLowering::AddrMode AM;
505        AM.BaseGV = GV;
506        AM.HasBaseReg = HasBaseReg;
507        return TLI->isLegalAddressingMode(AM, AccessTy);
508      } else {
509        // Default: assume global addresses are not legal.
510      }
511    }
512
513  return false;
514}
515
516/// MoveLoopVariantsToImmediateField - Move any subexpressions from Val that are
517/// loop varying to the Imm operand.
518static void MoveLoopVariantsToImmediateField(const SCEV *&Val, const SCEV *&Imm,
519                                             Loop *L, ScalarEvolution *SE) {
520  if (Val->isLoopInvariant(L)) return;  // Nothing to do.
521
522  if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
523    SmallVector<const SCEV *, 4> NewOps;
524    NewOps.reserve(SAE->getNumOperands());
525
526    for (unsigned i = 0; i != SAE->getNumOperands(); ++i)
527      if (!SAE->getOperand(i)->isLoopInvariant(L)) {
528        // If this is a loop-variant expression, it must stay in the immediate
529        // field of the expression.
530        Imm = SE->getAddExpr(Imm, SAE->getOperand(i));
531      } else {
532        NewOps.push_back(SAE->getOperand(i));
533      }
534
535    if (NewOps.empty())
536      Val = SE->getIntegerSCEV(0, Val->getType());
537    else
538      Val = SE->getAddExpr(NewOps);
539  } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
540    // Try to pull immediates out of the start value of nested addrec's.
541    const SCEV *Start = SARE->getStart();
542    MoveLoopVariantsToImmediateField(Start, Imm, L, SE);
543
544    SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end());
545    Ops[0] = Start;
546    Val = SE->getAddRecExpr(Ops, SARE->getLoop());
547  } else {
548    // Otherwise, all of Val is variant, move the whole thing over.
549    Imm = SE->getAddExpr(Imm, Val);
550    Val = SE->getIntegerSCEV(0, Val->getType());
551  }
552}
553
554
555/// MoveImmediateValues - Look at Val, and pull out any additions of constants
556/// that can fit into the immediate field of instructions in the target.
557/// Accumulate these immediate values into the Imm value.
558static void MoveImmediateValues(const TargetLowering *TLI,
559                                const Type *AccessTy,
560                                const SCEV *&Val, const SCEV *&Imm,
561                                bool isAddress, Loop *L,
562                                ScalarEvolution *SE) {
563  if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
564    SmallVector<const SCEV *, 4> NewOps;
565    NewOps.reserve(SAE->getNumOperands());
566
567    for (unsigned i = 0; i != SAE->getNumOperands(); ++i) {
568      const SCEV *NewOp = SAE->getOperand(i);
569      MoveImmediateValues(TLI, AccessTy, NewOp, Imm, isAddress, L, SE);
570
571      if (!NewOp->isLoopInvariant(L)) {
572        // If this is a loop-variant expression, it must stay in the immediate
573        // field of the expression.
574        Imm = SE->getAddExpr(Imm, NewOp);
575      } else {
576        NewOps.push_back(NewOp);
577      }
578    }
579
580    if (NewOps.empty())
581      Val = SE->getIntegerSCEV(0, Val->getType());
582    else
583      Val = SE->getAddExpr(NewOps);
584    return;
585  } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
586    // Try to pull immediates out of the start value of nested addrec's.
587    const SCEV *Start = SARE->getStart();
588    MoveImmediateValues(TLI, AccessTy, Start, Imm, isAddress, L, SE);
589
590    if (Start != SARE->getStart()) {
591      SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end());
592      Ops[0] = Start;
593      Val = SE->getAddRecExpr(Ops, SARE->getLoop());
594    }
595    return;
596  } else if (const SCEVMulExpr *SME = dyn_cast<SCEVMulExpr>(Val)) {
597    // Transform "8 * (4 + v)" -> "32 + 8*V" if "32" fits in the immed field.
598    if (isAddress &&
599        fitsInAddressMode(SME->getOperand(0), AccessTy, TLI, false) &&
600        SME->getNumOperands() == 2 && SME->isLoopInvariant(L)) {
601
602      const SCEV *SubImm = SE->getIntegerSCEV(0, Val->getType());
603      const SCEV *NewOp = SME->getOperand(1);
604      MoveImmediateValues(TLI, AccessTy, NewOp, SubImm, isAddress, L, SE);
605
606      // If we extracted something out of the subexpressions, see if we can
607      // simplify this!
608      if (NewOp != SME->getOperand(1)) {
609        // Scale SubImm up by "8".  If the result is a target constant, we are
610        // good.
611        SubImm = SE->getMulExpr(SubImm, SME->getOperand(0));
612        if (fitsInAddressMode(SubImm, AccessTy, TLI, false)) {
613          // Accumulate the immediate.
614          Imm = SE->getAddExpr(Imm, SubImm);
615
616          // Update what is left of 'Val'.
617          Val = SE->getMulExpr(SME->getOperand(0), NewOp);
618          return;
619        }
620      }
621    }
622  }
623
624  // Loop-variant expressions must stay in the immediate field of the
625  // expression.
626  if ((isAddress && fitsInAddressMode(Val, AccessTy, TLI, false)) ||
627      !Val->isLoopInvariant(L)) {
628    Imm = SE->getAddExpr(Imm, Val);
629    Val = SE->getIntegerSCEV(0, Val->getType());
630    return;
631  }
632
633  // Otherwise, no immediates to move.
634}
635
636static void MoveImmediateValues(const TargetLowering *TLI,
637                                Instruction *User,
638                                const SCEV *&Val, const SCEV *&Imm,
639                                bool isAddress, Loop *L,
640                                ScalarEvolution *SE) {
641  const Type *AccessTy = getAccessType(User);
642  MoveImmediateValues(TLI, AccessTy, Val, Imm, isAddress, L, SE);
643}
644
645/// SeparateSubExprs - Decompose Expr into all of the subexpressions that are
646/// added together.  This is used to reassociate common addition subexprs
647/// together for maximal sharing when rewriting bases.
648static void SeparateSubExprs(SmallVector<const SCEV *, 16> &SubExprs,
649                             const SCEV *Expr,
650                             ScalarEvolution *SE) {
651  if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(Expr)) {
652    for (unsigned j = 0, e = AE->getNumOperands(); j != e; ++j)
653      SeparateSubExprs(SubExprs, AE->getOperand(j), SE);
654  } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Expr)) {
655    const SCEV *Zero = SE->getIntegerSCEV(0, Expr->getType());
656    if (SARE->getOperand(0) == Zero) {
657      SubExprs.push_back(Expr);
658    } else {
659      // Compute the addrec with zero as its base.
660      SmallVector<const SCEV *, 4> Ops(SARE->op_begin(), SARE->op_end());
661      Ops[0] = Zero;   // Start with zero base.
662      SubExprs.push_back(SE->getAddRecExpr(Ops, SARE->getLoop()));
663
664
665      SeparateSubExprs(SubExprs, SARE->getOperand(0), SE);
666    }
667  } else if (!Expr->isZero()) {
668    // Do not add zero.
669    SubExprs.push_back(Expr);
670  }
671}
672
673// This is logically local to the following function, but C++ says we have
674// to make it file scope.
675struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
676
677/// RemoveCommonExpressionsFromUseBases - Look through all of the Bases of all
678/// the Uses, removing any common subexpressions, except that if all such
679/// subexpressions can be folded into an addressing mode for all uses inside
680/// the loop (this case is referred to as "free" in comments herein) we do
681/// not remove anything.  This looks for things like (a+b+c) and
682/// (a+c+d) and computes the common (a+c) subexpression.  The common expression
683/// is *removed* from the Bases and returned.
684static const SCEV *
685RemoveCommonExpressionsFromUseBases(std::vector<BasedUser> &Uses,
686                                    ScalarEvolution *SE, Loop *L,
687                                    const TargetLowering *TLI) {
688  unsigned NumUses = Uses.size();
689
690  // Only one use?  This is a very common case, so we handle it specially and
691  // cheaply.
692  const SCEV *Zero = SE->getIntegerSCEV(0, Uses[0].Base->getType());
693  const SCEV *Result = Zero;
694  const SCEV *FreeResult = Zero;
695  if (NumUses == 1) {
696    // If the use is inside the loop, use its base, regardless of what it is:
697    // it is clearly shared across all the IV's.  If the use is outside the loop
698    // (which means after it) we don't want to factor anything *into* the loop,
699    // so just use 0 as the base.
700    if (L->contains(Uses[0].Inst))
701      std::swap(Result, Uses[0].Base);
702    return Result;
703  }
704
705  // To find common subexpressions, count how many of Uses use each expression.
706  // If any subexpressions are used Uses.size() times, they are common.
707  // Also track whether all uses of each expression can be moved into an
708  // an addressing mode "for free"; such expressions are left within the loop.
709  // struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; };
710  std::map<const SCEV *, SubExprUseData> SubExpressionUseData;
711
712  // UniqueSubExprs - Keep track of all of the subexpressions we see in the
713  // order we see them.
714  SmallVector<const SCEV *, 16> UniqueSubExprs;
715
716  SmallVector<const SCEV *, 16> SubExprs;
717  unsigned NumUsesInsideLoop = 0;
718  for (unsigned i = 0; i != NumUses; ++i) {
719    // If the user is outside the loop, just ignore it for base computation.
720    // Since the user is outside the loop, it must be *after* the loop (if it
721    // were before, it could not be based on the loop IV).  We don't want users
722    // after the loop to affect base computation of values *inside* the loop,
723    // because we can always add their offsets to the result IV after the loop
724    // is done, ensuring we get good code inside the loop.
725    if (!L->contains(Uses[i].Inst))
726      continue;
727    NumUsesInsideLoop++;
728
729    // If the base is zero (which is common), return zero now, there are no
730    // CSEs we can find.
731    if (Uses[i].Base == Zero) return Zero;
732
733    // If this use is as an address we may be able to put CSEs in the addressing
734    // mode rather than hoisting them.
735    bool isAddrUse = isAddressUse(Uses[i].Inst, Uses[i].OperandValToReplace);
736    // We may need the AccessTy below, but only when isAddrUse, so compute it
737    // only in that case.
738    const Type *AccessTy = 0;
739    if (isAddrUse)
740      AccessTy = getAccessType(Uses[i].Inst);
741
742    // Split the expression into subexprs.
743    SeparateSubExprs(SubExprs, Uses[i].Base, SE);
744    // Add one to SubExpressionUseData.Count for each subexpr present, and
745    // if the subexpr is not a valid immediate within an addressing mode use,
746    // set SubExpressionUseData.notAllUsesAreFree.  We definitely want to
747    // hoist these out of the loop (if they are common to all uses).
748    for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
749      if (++SubExpressionUseData[SubExprs[j]].Count == 1)
750        UniqueSubExprs.push_back(SubExprs[j]);
751      if (!isAddrUse || !fitsInAddressMode(SubExprs[j], AccessTy, TLI, false))
752        SubExpressionUseData[SubExprs[j]].notAllUsesAreFree = true;
753    }
754    SubExprs.clear();
755  }
756
757  // Now that we know how many times each is used, build Result.  Iterate over
758  // UniqueSubexprs so that we have a stable ordering.
759  for (unsigned i = 0, e = UniqueSubExprs.size(); i != e; ++i) {
760    std::map<const SCEV *, SubExprUseData>::iterator I =
761       SubExpressionUseData.find(UniqueSubExprs[i]);
762    assert(I != SubExpressionUseData.end() && "Entry not found?");
763    if (I->second.Count == NumUsesInsideLoop) { // Found CSE!
764      if (I->second.notAllUsesAreFree)
765        Result = SE->getAddExpr(Result, I->first);
766      else
767        FreeResult = SE->getAddExpr(FreeResult, I->first);
768    } else
769      // Remove non-cse's from SubExpressionUseData.
770      SubExpressionUseData.erase(I);
771  }
772
773  if (FreeResult != Zero) {
774    // We have some subexpressions that can be subsumed into addressing
775    // modes in every use inside the loop.  However, it's possible that
776    // there are so many of them that the combined FreeResult cannot
777    // be subsumed, or that the target cannot handle both a FreeResult
778    // and a Result in the same instruction (for example because it would
779    // require too many registers).  Check this.
780    for (unsigned i=0; i<NumUses; ++i) {
781      if (!L->contains(Uses[i].Inst))
782        continue;
783      // We know this is an addressing mode use; if there are any uses that
784      // are not, FreeResult would be Zero.
785      const Type *AccessTy = getAccessType(Uses[i].Inst);
786      if (!fitsInAddressMode(FreeResult, AccessTy, TLI, Result!=Zero)) {
787        // FIXME:  could split up FreeResult into pieces here, some hoisted
788        // and some not.  There is no obvious advantage to this.
789        Result = SE->getAddExpr(Result, FreeResult);
790        FreeResult = Zero;
791        break;
792      }
793    }
794  }
795
796  // If we found no CSE's, return now.
797  if (Result == Zero) return Result;
798
799  // If we still have a FreeResult, remove its subexpressions from
800  // SubExpressionUseData.  This means they will remain in the use Bases.
801  if (FreeResult != Zero) {
802    SeparateSubExprs(SubExprs, FreeResult, SE);
803    for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) {
804      std::map<const SCEV *, SubExprUseData>::iterator I =
805         SubExpressionUseData.find(SubExprs[j]);
806      SubExpressionUseData.erase(I);
807    }
808    SubExprs.clear();
809  }
810
811  // Otherwise, remove all of the CSE's we found from each of the base values.
812  for (unsigned i = 0; i != NumUses; ++i) {
813    // Uses outside the loop don't necessarily include the common base, but
814    // the final IV value coming into those uses does.  Instead of trying to
815    // remove the pieces of the common base, which might not be there,
816    // subtract off the base to compensate for this.
817    if (!L->contains(Uses[i].Inst)) {
818      Uses[i].Base = SE->getMinusSCEV(Uses[i].Base, Result);
819      continue;
820    }
821
822    // Split the expression into subexprs.
823    SeparateSubExprs(SubExprs, Uses[i].Base, SE);
824
825    // Remove any common subexpressions.
826    for (unsigned j = 0, e = SubExprs.size(); j != e; ++j)
827      if (SubExpressionUseData.count(SubExprs[j])) {
828        SubExprs.erase(SubExprs.begin()+j);
829        --j; --e;
830      }
831
832    // Finally, add the non-shared expressions together.
833    if (SubExprs.empty())
834      Uses[i].Base = Zero;
835    else
836      Uses[i].Base = SE->getAddExpr(SubExprs);
837    SubExprs.clear();
838  }
839
840  return Result;
841}
842
843/// ValidScale - Check whether the given Scale is valid for all loads and
844/// stores in UsersToProcess.
845///
846bool LoopStrengthReduce::ValidScale(bool HasBaseReg, int64_t Scale,
847                               const std::vector<BasedUser>& UsersToProcess) {
848  if (!TLI)
849    return true;
850
851  for (unsigned i = 0, e = UsersToProcess.size(); i!=e; ++i) {
852    // If this is a load or other access, pass the type of the access in.
853    const Type *AccessTy =
854        Type::getVoidTy(UsersToProcess[i].Inst->getContext());
855    if (isAddressUse(UsersToProcess[i].Inst,
856                     UsersToProcess[i].OperandValToReplace))
857      AccessTy = getAccessType(UsersToProcess[i].Inst);
858    else if (isa<PHINode>(UsersToProcess[i].Inst))
859      continue;
860
861    TargetLowering::AddrMode AM;
862    if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
863      AM.BaseOffs = SC->getValue()->getSExtValue();
864    AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero();
865    AM.Scale = Scale;
866
867    // If load[imm+r*scale] is illegal, bail out.
868    if (!TLI->isLegalAddressingMode(AM, AccessTy))
869      return false;
870  }
871  return true;
872}
873
874/// ValidOffset - Check whether the given Offset is valid for all loads and
875/// stores in UsersToProcess.
876///
877bool LoopStrengthReduce::ValidOffset(bool HasBaseReg,
878                               int64_t Offset,
879                               int64_t Scale,
880                               const std::vector<BasedUser>& UsersToProcess) {
881  if (!TLI)
882    return true;
883
884  for (unsigned i=0, e = UsersToProcess.size(); i!=e; ++i) {
885    // If this is a load or other access, pass the type of the access in.
886    const Type *AccessTy =
887        Type::getVoidTy(UsersToProcess[i].Inst->getContext());
888    if (isAddressUse(UsersToProcess[i].Inst,
889                     UsersToProcess[i].OperandValToReplace))
890      AccessTy = getAccessType(UsersToProcess[i].Inst);
891    else if (isa<PHINode>(UsersToProcess[i].Inst))
892      continue;
893
894    TargetLowering::AddrMode AM;
895    if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
896      AM.BaseOffs = SC->getValue()->getSExtValue();
897    AM.BaseOffs = (uint64_t)AM.BaseOffs + (uint64_t)Offset;
898    AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero();
899    AM.Scale = Scale;
900
901    // If load[imm+r*scale] is illegal, bail out.
902    if (!TLI->isLegalAddressingMode(AM, AccessTy))
903      return false;
904  }
905  return true;
906}
907
908/// RequiresTypeConversion - Returns true if converting Ty1 to Ty2 is not
909/// a nop.
910bool LoopStrengthReduce::RequiresTypeConversion(const Type *Ty1,
911                                                const Type *Ty2) {
912  if (Ty1 == Ty2)
913    return false;
914  Ty1 = SE->getEffectiveSCEVType(Ty1);
915  Ty2 = SE->getEffectiveSCEVType(Ty2);
916  if (Ty1 == Ty2)
917    return false;
918  if (Ty1->canLosslesslyBitCastTo(Ty2))
919    return false;
920  if (TLI && TLI->isTruncateFree(Ty1, Ty2))
921    return false;
922  return true;
923}
924
925/// CheckForIVReuse - Returns the multiple if the stride is the multiple
926/// of a previous stride and it is a legal value for the target addressing
927/// mode scale component and optional base reg. This allows the users of
928/// this stride to be rewritten as prev iv * factor. It returns 0 if no
929/// reuse is possible.  Factors can be negative on same targets, e.g. ARM.
930///
931/// If all uses are outside the loop, we don't require that all multiplies
932/// be folded into the addressing mode, nor even that the factor be constant;
933/// a multiply (executed once) outside the loop is better than another IV
934/// within.  Well, usually.
935const SCEV *LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg,
936                                bool AllUsesAreAddresses,
937                                bool AllUsesAreOutsideLoop,
938                                const SCEV *Stride,
939                                IVExpr &IV, const Type *Ty,
940                                const std::vector<BasedUser>& UsersToProcess) {
941  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) {
942    int64_t SInt = SC->getValue()->getSExtValue();
943    for (unsigned NewStride = 0, e = IU->StrideOrder.size();
944         NewStride != e; ++NewStride) {
945      std::map<const SCEV *, IVsOfOneStride>::iterator SI =
946                IVsByStride.find(IU->StrideOrder[NewStride]);
947      if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first))
948        continue;
949      // The other stride has no uses, don't reuse it.
950      std::map<const SCEV *, IVUsersOfOneStride *>::iterator UI =
951        IU->IVUsesByStride.find(IU->StrideOrder[NewStride]);
952      if (UI->second->Users.empty())
953        continue;
954      int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
955      if (SI->first != Stride &&
956          (unsigned(abs64(SInt)) < SSInt || (SInt % SSInt) != 0))
957        continue;
958      int64_t Scale = SInt / SSInt;
959      // Check that this stride is valid for all the types used for loads and
960      // stores; if it can be used for some and not others, we might as well use
961      // the original stride everywhere, since we have to create the IV for it
962      // anyway. If the scale is 1, then we don't need to worry about folding
963      // multiplications.
964      if (Scale == 1 ||
965          (AllUsesAreAddresses &&
966           ValidScale(HasBaseReg, Scale, UsersToProcess))) {
967        // Prefer to reuse an IV with a base of zero.
968        for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
969               IE = SI->second.IVs.end(); II != IE; ++II)
970          // Only reuse previous IV if it would not require a type conversion
971          // and if the base difference can be folded.
972          if (II->Base->isZero() &&
973              !RequiresTypeConversion(II->Base->getType(), Ty)) {
974            IV = *II;
975            return SE->getIntegerSCEV(Scale, Stride->getType());
976          }
977        // Otherwise, settle for an IV with a foldable base.
978        if (AllUsesAreAddresses)
979          for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
980                 IE = SI->second.IVs.end(); II != IE; ++II)
981            // Only reuse previous IV if it would not require a type conversion
982            // and if the base difference can be folded.
983            if (SE->getEffectiveSCEVType(II->Base->getType()) ==
984                SE->getEffectiveSCEVType(Ty) &&
985                isa<SCEVConstant>(II->Base)) {
986              int64_t Base =
987                cast<SCEVConstant>(II->Base)->getValue()->getSExtValue();
988              if (Base > INT32_MIN && Base <= INT32_MAX &&
989                  ValidOffset(HasBaseReg, -Base * Scale,
990                              Scale, UsersToProcess)) {
991                IV = *II;
992                return SE->getIntegerSCEV(Scale, Stride->getType());
993              }
994            }
995      }
996    }
997  } else if (AllUsesAreOutsideLoop) {
998    // Accept nonconstant strides here; it is really really right to substitute
999    // an existing IV if we can.
1000    for (unsigned NewStride = 0, e = IU->StrideOrder.size();
1001         NewStride != e; ++NewStride) {
1002      std::map<const SCEV *, IVsOfOneStride>::iterator SI =
1003                IVsByStride.find(IU->StrideOrder[NewStride]);
1004      if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first))
1005        continue;
1006      int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1007      if (SI->first != Stride && SSInt != 1)
1008        continue;
1009      for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1010             IE = SI->second.IVs.end(); II != IE; ++II)
1011        // Accept nonzero base here.
1012        // Only reuse previous IV if it would not require a type conversion.
1013        if (!RequiresTypeConversion(II->Base->getType(), Ty)) {
1014          IV = *II;
1015          return Stride;
1016        }
1017    }
1018    // Special case, old IV is -1*x and this one is x.  Can treat this one as
1019    // -1*old.
1020    for (unsigned NewStride = 0, e = IU->StrideOrder.size();
1021         NewStride != e; ++NewStride) {
1022      std::map<const SCEV *, IVsOfOneStride>::iterator SI =
1023                IVsByStride.find(IU->StrideOrder[NewStride]);
1024      if (SI == IVsByStride.end())
1025        continue;
1026      if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(SI->first))
1027        if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(ME->getOperand(0)))
1028          if (Stride == ME->getOperand(1) &&
1029              SC->getValue()->getSExtValue() == -1LL)
1030            for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
1031                   IE = SI->second.IVs.end(); II != IE; ++II)
1032              // Accept nonzero base here.
1033              // Only reuse previous IV if it would not require type conversion.
1034              if (!RequiresTypeConversion(II->Base->getType(), Ty)) {
1035                IV = *II;
1036                return SE->getIntegerSCEV(-1LL, Stride->getType());
1037              }
1038    }
1039  }
1040  return SE->getIntegerSCEV(0, Stride->getType());
1041}
1042
1043/// PartitionByIsUseOfPostIncrementedValue - Simple boolean predicate that
1044/// returns true if Val's isUseOfPostIncrementedValue is true.
1045static bool PartitionByIsUseOfPostIncrementedValue(const BasedUser &Val) {
1046  return Val.isUseOfPostIncrementedValue;
1047}
1048
1049/// isNonConstantNegative - Return true if the specified scev is negated, but
1050/// not a constant.
1051static bool isNonConstantNegative(const SCEV *Expr) {
1052  const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Expr);
1053  if (!Mul) return false;
1054
1055  // If there is a constant factor, it will be first.
1056  const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
1057  if (!SC) return false;
1058
1059  // Return true if the value is negative, this matches things like (-42 * V).
1060  return SC->getValue()->getValue().isNegative();
1061}
1062
1063/// CollectIVUsers - Transform our list of users and offsets to a bit more
1064/// complex table. In this new vector, each 'BasedUser' contains 'Base', the
1065/// base of the strided accesses, as well as the old information from Uses. We
1066/// progressively move information from the Base field to the Imm field, until
1067/// we eventually have the full access expression to rewrite the use.
1068const SCEV *LoopStrengthReduce::CollectIVUsers(const SCEV *Stride,
1069                                              IVUsersOfOneStride &Uses,
1070                                              Loop *L,
1071                                              bool &AllUsesAreAddresses,
1072                                              bool &AllUsesAreOutsideLoop,
1073                                       std::vector<BasedUser> &UsersToProcess) {
1074  // FIXME: Generalize to non-affine IV's.
1075  if (!Stride->isLoopInvariant(L))
1076    return SE->getIntegerSCEV(0, Stride->getType());
1077
1078  UsersToProcess.reserve(Uses.Users.size());
1079  for (ilist<IVStrideUse>::iterator I = Uses.Users.begin(),
1080       E = Uses.Users.end(); I != E; ++I) {
1081    UsersToProcess.push_back(BasedUser(*I, SE));
1082
1083    // Move any loop variant operands from the offset field to the immediate
1084    // field of the use, so that we don't try to use something before it is
1085    // computed.
1086    MoveLoopVariantsToImmediateField(UsersToProcess.back().Base,
1087                                     UsersToProcess.back().Imm, L, SE);
1088    assert(UsersToProcess.back().Base->isLoopInvariant(L) &&
1089           "Base value is not loop invariant!");
1090  }
1091
1092  // We now have a whole bunch of uses of like-strided induction variables, but
1093  // they might all have different bases.  We want to emit one PHI node for this
1094  // stride which we fold as many common expressions (between the IVs) into as
1095  // possible.  Start by identifying the common expressions in the base values
1096  // for the strides (e.g. if we have "A+C+B" and "A+B+D" as our bases, find
1097  // "A+B"), emit it to the preheader, then remove the expression from the
1098  // UsersToProcess base values.
1099  const SCEV *CommonExprs =
1100    RemoveCommonExpressionsFromUseBases(UsersToProcess, SE, L, TLI);
1101
1102  // Next, figure out what we can represent in the immediate fields of
1103  // instructions.  If we can represent anything there, move it to the imm
1104  // fields of the BasedUsers.  We do this so that it increases the commonality
1105  // of the remaining uses.
1106  unsigned NumPHI = 0;
1107  bool HasAddress = false;
1108  for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1109    // If the user is not in the current loop, this means it is using the exit
1110    // value of the IV.  Do not put anything in the base, make sure it's all in
1111    // the immediate field to allow as much factoring as possible.
1112    if (!L->contains(UsersToProcess[i].Inst)) {
1113      UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm,
1114                                             UsersToProcess[i].Base);
1115      UsersToProcess[i].Base =
1116        SE->getIntegerSCEV(0, UsersToProcess[i].Base->getType());
1117    } else {
1118      // Not all uses are outside the loop.
1119      AllUsesAreOutsideLoop = false;
1120
1121      // Addressing modes can be folded into loads and stores.  Be careful that
1122      // the store is through the expression, not of the expression though.
1123      bool isPHI = false;
1124      bool isAddress = isAddressUse(UsersToProcess[i].Inst,
1125                                    UsersToProcess[i].OperandValToReplace);
1126      if (isa<PHINode>(UsersToProcess[i].Inst)) {
1127        isPHI = true;
1128        ++NumPHI;
1129      }
1130
1131      if (isAddress)
1132        HasAddress = true;
1133
1134      // If this use isn't an address, then not all uses are addresses.
1135      if (!isAddress && !isPHI)
1136        AllUsesAreAddresses = false;
1137
1138      MoveImmediateValues(TLI, UsersToProcess[i].Inst, UsersToProcess[i].Base,
1139                          UsersToProcess[i].Imm, isAddress, L, SE);
1140    }
1141  }
1142
1143  // If one of the use is a PHI node and all other uses are addresses, still
1144  // allow iv reuse. Essentially we are trading one constant multiplication
1145  // for one fewer iv.
1146  if (NumPHI > 1)
1147    AllUsesAreAddresses = false;
1148
1149  // There are no in-loop address uses.
1150  if (AllUsesAreAddresses && (!HasAddress && !AllUsesAreOutsideLoop))
1151    AllUsesAreAddresses = false;
1152
1153  return CommonExprs;
1154}
1155
1156/// ShouldUseFullStrengthReductionMode - Test whether full strength-reduction
1157/// is valid and profitable for the given set of users of a stride. In
1158/// full strength-reduction mode, all addresses at the current stride are
1159/// strength-reduced all the way down to pointer arithmetic.
1160///
1161bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode(
1162                                   const std::vector<BasedUser> &UsersToProcess,
1163                                   const Loop *L,
1164                                   bool AllUsesAreAddresses,
1165                                   const SCEV *Stride) {
1166  if (!EnableFullLSRMode)
1167    return false;
1168
1169  // The heuristics below aim to avoid increasing register pressure, but
1170  // fully strength-reducing all the addresses increases the number of
1171  // add instructions, so don't do this when optimizing for size.
1172  // TODO: If the loop is large, the savings due to simpler addresses
1173  // may oughtweight the costs of the extra increment instructions.
1174  if (L->getHeader()->getParent()->hasFnAttr(Attribute::OptimizeForSize))
1175    return false;
1176
1177  // TODO: For now, don't do full strength reduction if there could
1178  // potentially be greater-stride multiples of the current stride
1179  // which could reuse the current stride IV.
1180  if (IU->StrideOrder.back() != Stride)
1181    return false;
1182
1183  // Iterate through the uses to find conditions that automatically rule out
1184  // full-lsr mode.
1185  for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
1186    const SCEV *Base = UsersToProcess[i].Base;
1187    const SCEV *Imm = UsersToProcess[i].Imm;
1188    // If any users have a loop-variant component, they can't be fully
1189    // strength-reduced.
1190    if (Imm && !Imm->isLoopInvariant(L))
1191      return false;
1192    // If there are to users with the same base and the difference between
1193    // the two Imm values can't be folded into the address, full
1194    // strength reduction would increase register pressure.
1195    do {
1196      const SCEV *CurImm = UsersToProcess[i].Imm;
1197      if ((CurImm || Imm) && CurImm != Imm) {
1198        if (!CurImm) CurImm = SE->getIntegerSCEV(0, Stride->getType());
1199        if (!Imm)       Imm = SE->getIntegerSCEV(0, Stride->getType());
1200        const Instruction *Inst = UsersToProcess[i].Inst;
1201        const Type *AccessTy = getAccessType(Inst);
1202        const SCEV *Diff = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm);
1203        if (!Diff->isZero() &&
1204            (!AllUsesAreAddresses ||
1205             !fitsInAddressMode(Diff, AccessTy, TLI, /*HasBaseReg=*/true)))
1206          return false;
1207      }
1208    } while (++i != e && Base == UsersToProcess[i].Base);
1209  }
1210
1211  // If there's exactly one user in this stride, fully strength-reducing it
1212  // won't increase register pressure. If it's starting from a non-zero base,
1213  // it'll be simpler this way.
1214  if (UsersToProcess.size() == 1 && !UsersToProcess[0].Base->isZero())
1215    return true;
1216
1217  // Otherwise, if there are any users in this stride that don't require
1218  // a register for their base, full strength-reduction will increase
1219  // register pressure.
1220  for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1221    if (UsersToProcess[i].Base->isZero())
1222      return false;
1223
1224  // Otherwise, go for it.
1225  return true;
1226}
1227
1228/// InsertAffinePhi Create and insert a PHI node for an induction variable
1229/// with the specified start and step values in the specified loop.
1230///
1231/// If NegateStride is true, the stride should be negated by using a
1232/// subtract instead of an add.
1233///
1234/// Return the created phi node.
1235///
1236static PHINode *InsertAffinePhi(const SCEV *Start, const SCEV *Step,
1237                                Instruction *IVIncInsertPt,
1238                                const Loop *L,
1239                                SCEVExpander &Rewriter) {
1240  assert(Start->isLoopInvariant(L) && "New PHI start is not loop invariant!");
1241  assert(Step->isLoopInvariant(L) && "New PHI stride is not loop invariant!");
1242
1243  BasicBlock *Header = L->getHeader();
1244  BasicBlock *Preheader = L->getLoopPreheader();
1245  BasicBlock *LatchBlock = L->getLoopLatch();
1246  const Type *Ty = Start->getType();
1247  Ty = Rewriter.SE.getEffectiveSCEVType(Ty);
1248
1249  PHINode *PN = PHINode::Create(Ty, "lsr.iv", Header->begin());
1250  PN->addIncoming(Rewriter.expandCodeFor(Start, Ty, Preheader->getTerminator()),
1251                  Preheader);
1252
1253  // If the stride is negative, insert a sub instead of an add for the
1254  // increment.
1255  bool isNegative = isNonConstantNegative(Step);
1256  const SCEV *IncAmount = Step;
1257  if (isNegative)
1258    IncAmount = Rewriter.SE.getNegativeSCEV(Step);
1259
1260  // Insert an add instruction right before the terminator corresponding
1261  // to the back-edge or just before the only use. The location is determined
1262  // by the caller and passed in as IVIncInsertPt.
1263  Value *StepV = Rewriter.expandCodeFor(IncAmount, Ty,
1264                                        Preheader->getTerminator());
1265  Instruction *IncV;
1266  if (isNegative) {
1267    IncV = BinaryOperator::CreateSub(PN, StepV, "lsr.iv.next",
1268                                     IVIncInsertPt);
1269  } else {
1270    IncV = BinaryOperator::CreateAdd(PN, StepV, "lsr.iv.next",
1271                                     IVIncInsertPt);
1272  }
1273  if (!isa<ConstantInt>(StepV)) ++NumVariable;
1274
1275  PN->addIncoming(IncV, LatchBlock);
1276
1277  ++NumInserted;
1278  return PN;
1279}
1280
1281static void SortUsersToProcess(std::vector<BasedUser> &UsersToProcess) {
1282  // We want to emit code for users inside the loop first.  To do this, we
1283  // rearrange BasedUser so that the entries at the end have
1284  // isUseOfPostIncrementedValue = false, because we pop off the end of the
1285  // vector (so we handle them first).
1286  std::partition(UsersToProcess.begin(), UsersToProcess.end(),
1287                 PartitionByIsUseOfPostIncrementedValue);
1288
1289  // Sort this by base, so that things with the same base are handled
1290  // together.  By partitioning first and stable-sorting later, we are
1291  // guaranteed that within each base we will pop off users from within the
1292  // loop before users outside of the loop with a particular base.
1293  //
1294  // We would like to use stable_sort here, but we can't.  The problem is that
1295  // const SCEV *'s don't have a deterministic ordering w.r.t to each other, so
1296  // we don't have anything to do a '<' comparison on.  Because we think the
1297  // number of uses is small, do a horrible bubble sort which just relies on
1298  // ==.
1299  for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1300    // Get a base value.
1301    const SCEV *Base = UsersToProcess[i].Base;
1302
1303    // Compact everything with this base to be consecutive with this one.
1304    for (unsigned j = i+1; j != e; ++j) {
1305      if (UsersToProcess[j].Base == Base) {
1306        std::swap(UsersToProcess[i+1], UsersToProcess[j]);
1307        ++i;
1308      }
1309    }
1310  }
1311}
1312
1313/// PrepareToStrengthReduceFully - Prepare to fully strength-reduce
1314/// UsersToProcess, meaning lowering addresses all the way down to direct
1315/// pointer arithmetic.
1316///
1317void
1318LoopStrengthReduce::PrepareToStrengthReduceFully(
1319                                        std::vector<BasedUser> &UsersToProcess,
1320                                        const SCEV *Stride,
1321                                        const SCEV *CommonExprs,
1322                                        const Loop *L,
1323                                        SCEVExpander &PreheaderRewriter) {
1324  DEBUG(dbgs() << "  Fully reducing all users\n");
1325
1326  // Rewrite the UsersToProcess records, creating a separate PHI for each
1327  // unique Base value.
1328  Instruction *IVIncInsertPt = L->getLoopLatch()->getTerminator();
1329  for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) {
1330    // TODO: The uses are grouped by base, but not sorted. We arbitrarily
1331    // pick the first Imm value here to start with, and adjust it for the
1332    // other uses.
1333    const SCEV *Imm = UsersToProcess[i].Imm;
1334    const SCEV *Base = UsersToProcess[i].Base;
1335    const SCEV *Start = SE->getAddExpr(CommonExprs, Base, Imm);
1336    PHINode *Phi = InsertAffinePhi(Start, Stride, IVIncInsertPt, L,
1337                                   PreheaderRewriter);
1338    // Loop over all the users with the same base.
1339    do {
1340      UsersToProcess[i].Base = SE->getIntegerSCEV(0, Stride->getType());
1341      UsersToProcess[i].Imm = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm);
1342      UsersToProcess[i].Phi = Phi;
1343      assert(UsersToProcess[i].Imm->isLoopInvariant(L) &&
1344             "ShouldUseFullStrengthReductionMode should reject this!");
1345    } while (++i != e && Base == UsersToProcess[i].Base);
1346  }
1347}
1348
1349/// FindIVIncInsertPt - Return the location to insert the increment instruction.
1350/// If the only use if a use of postinc value, (must be the loop termination
1351/// condition), then insert it just before the use.
1352static Instruction *FindIVIncInsertPt(std::vector<BasedUser> &UsersToProcess,
1353                                      const Loop *L) {
1354  if (UsersToProcess.size() == 1 &&
1355      UsersToProcess[0].isUseOfPostIncrementedValue &&
1356      L->contains(UsersToProcess[0].Inst))
1357    return UsersToProcess[0].Inst;
1358  return L->getLoopLatch()->getTerminator();
1359}
1360
1361/// PrepareToStrengthReduceWithNewPhi - Insert a new induction variable for the
1362/// given users to share.
1363///
1364void
1365LoopStrengthReduce::PrepareToStrengthReduceWithNewPhi(
1366                                         std::vector<BasedUser> &UsersToProcess,
1367                                         const SCEV *Stride,
1368                                         const SCEV *CommonExprs,
1369                                         Value *CommonBaseV,
1370                                         Instruction *IVIncInsertPt,
1371                                         const Loop *L,
1372                                         SCEVExpander &PreheaderRewriter) {
1373  DEBUG(dbgs() << "  Inserting new PHI:\n");
1374
1375  PHINode *Phi = InsertAffinePhi(SE->getUnknown(CommonBaseV),
1376                                 Stride, IVIncInsertPt, L,
1377                                 PreheaderRewriter);
1378
1379  // Remember this in case a later stride is multiple of this.
1380  IVsByStride[Stride].addIV(Stride, CommonExprs, Phi);
1381
1382  // All the users will share this new IV.
1383  for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1384    UsersToProcess[i].Phi = Phi;
1385
1386  DEBUG(dbgs() << "    IV=");
1387  DEBUG(WriteAsOperand(dbgs(), Phi, /*PrintType=*/false));
1388  DEBUG(dbgs() << "\n");
1389}
1390
1391/// PrepareToStrengthReduceFromSmallerStride - Prepare for the given users to
1392/// reuse an induction variable with a stride that is a factor of the current
1393/// induction variable.
1394///
1395void
1396LoopStrengthReduce::PrepareToStrengthReduceFromSmallerStride(
1397                                         std::vector<BasedUser> &UsersToProcess,
1398                                         Value *CommonBaseV,
1399                                         const IVExpr &ReuseIV,
1400                                         Instruction *PreInsertPt) {
1401  DEBUG(dbgs() << "  Rewriting in terms of existing IV of STRIDE "
1402               << *ReuseIV.Stride << " and BASE " << *ReuseIV.Base << "\n");
1403
1404  // All the users will share the reused IV.
1405  for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1406    UsersToProcess[i].Phi = ReuseIV.PHI;
1407
1408  Constant *C = dyn_cast<Constant>(CommonBaseV);
1409  if (C &&
1410      (!C->isNullValue() &&
1411       !fitsInAddressMode(SE->getUnknown(CommonBaseV), CommonBaseV->getType(),
1412                         TLI, false)))
1413    // We want the common base emitted into the preheader! This is just
1414    // using cast as a copy so BitCast (no-op cast) is appropriate
1415    CommonBaseV = new BitCastInst(CommonBaseV, CommonBaseV->getType(),
1416                                  "commonbase", PreInsertPt);
1417}
1418
1419static bool IsImmFoldedIntoAddrMode(GlobalValue *GV, int64_t Offset,
1420                                    const Type *AccessTy,
1421                                   std::vector<BasedUser> &UsersToProcess,
1422                                   const TargetLowering *TLI) {
1423  SmallVector<Instruction*, 16> AddrModeInsts;
1424  for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
1425    if (UsersToProcess[i].isUseOfPostIncrementedValue)
1426      continue;
1427    ExtAddrMode AddrMode =
1428      AddressingModeMatcher::Match(UsersToProcess[i].OperandValToReplace,
1429                                   AccessTy, UsersToProcess[i].Inst,
1430                                   AddrModeInsts, *TLI);
1431    if (GV && GV != AddrMode.BaseGV)
1432      return false;
1433    if (Offset && !AddrMode.BaseOffs)
1434      // FIXME: How to accurate check it's immediate offset is folded.
1435      return false;
1436    AddrModeInsts.clear();
1437  }
1438  return true;
1439}
1440
1441/// StrengthReduceIVUsersOfStride - Strength reduce all of the users of a single
1442/// stride of IV.  All of the users may have different starting values, and this
1443/// may not be the only stride.
1444void
1445LoopStrengthReduce::StrengthReduceIVUsersOfStride(const SCEV *Stride,
1446                                                  IVUsersOfOneStride &Uses,
1447                                                  Loop *L) {
1448  // If all the users are moved to another stride, then there is nothing to do.
1449  if (Uses.Users.empty())
1450    return;
1451
1452  // Keep track if every use in UsersToProcess is an address. If they all are,
1453  // we may be able to rewrite the entire collection of them in terms of a
1454  // smaller-stride IV.
1455  bool AllUsesAreAddresses = true;
1456
1457  // Keep track if every use of a single stride is outside the loop.  If so,
1458  // we want to be more aggressive about reusing a smaller-stride IV; a
1459  // multiply outside the loop is better than another IV inside.  Well, usually.
1460  bool AllUsesAreOutsideLoop = true;
1461
1462  // Transform our list of users and offsets to a bit more complex table.  In
1463  // this new vector, each 'BasedUser' contains 'Base' the base of the
1464  // strided accessas well as the old information from Uses.  We progressively
1465  // move information from the Base field to the Imm field, until we eventually
1466  // have the full access expression to rewrite the use.
1467  std::vector<BasedUser> UsersToProcess;
1468  const SCEV *CommonExprs = CollectIVUsers(Stride, Uses, L, AllUsesAreAddresses,
1469                                           AllUsesAreOutsideLoop,
1470                                           UsersToProcess);
1471
1472  // Sort the UsersToProcess array so that users with common bases are
1473  // next to each other.
1474  SortUsersToProcess(UsersToProcess);
1475
1476  // If we managed to find some expressions in common, we'll need to carry
1477  // their value in a register and add it in for each use. This will take up
1478  // a register operand, which potentially restricts what stride values are
1479  // valid.
1480  bool HaveCommonExprs = !CommonExprs->isZero();
1481  const Type *ReplacedTy = CommonExprs->getType();
1482
1483  // If all uses are addresses, consider sinking the immediate part of the
1484  // common expression back into uses if they can fit in the immediate fields.
1485  if (TLI && HaveCommonExprs && AllUsesAreAddresses) {
1486    const SCEV *NewCommon = CommonExprs;
1487    const SCEV *Imm = SE->getIntegerSCEV(0, ReplacedTy);
1488    MoveImmediateValues(TLI, Type::getVoidTy(
1489                        L->getLoopPreheader()->getContext()),
1490                        NewCommon, Imm, true, L, SE);
1491    if (!Imm->isZero()) {
1492      bool DoSink = true;
1493
1494      // If the immediate part of the common expression is a GV, check if it's
1495      // possible to fold it into the target addressing mode.
1496      GlobalValue *GV = 0;
1497      if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(Imm))
1498        GV = dyn_cast<GlobalValue>(SU->getValue());
1499      int64_t Offset = 0;
1500      if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Imm))
1501        Offset = SC->getValue()->getSExtValue();
1502      if (GV || Offset)
1503        // Pass VoidTy as the AccessTy to be conservative, because
1504        // there could be multiple access types among all the uses.
1505        DoSink = IsImmFoldedIntoAddrMode(GV, Offset,
1506                          Type::getVoidTy(L->getLoopPreheader()->getContext()),
1507                                         UsersToProcess, TLI);
1508
1509      if (DoSink) {
1510        DEBUG(dbgs() << "  Sinking " << *Imm << " back down into uses\n");
1511        for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i)
1512          UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm, Imm);
1513        CommonExprs = NewCommon;
1514        HaveCommonExprs = !CommonExprs->isZero();
1515        ++NumImmSunk;
1516      }
1517    }
1518  }
1519
1520  // Now that we know what we need to do, insert the PHI node itself.
1521  //
1522  DEBUG(dbgs() << "LSR: Examining IVs of TYPE " << *ReplacedTy << " of STRIDE "
1523               << *Stride << ":\n"
1524               << "  Common base: " << *CommonExprs << "\n");
1525
1526  SCEVExpander Rewriter(*SE);
1527  SCEVExpander PreheaderRewriter(*SE);
1528
1529  BasicBlock  *Preheader = L->getLoopPreheader();
1530  Instruction *PreInsertPt = Preheader->getTerminator();
1531  BasicBlock *LatchBlock = L->getLoopLatch();
1532  Instruction *IVIncInsertPt = LatchBlock->getTerminator();
1533
1534  Value *CommonBaseV = Constant::getNullValue(ReplacedTy);
1535
1536  const SCEV *RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy);
1537  IVExpr   ReuseIV(SE->getIntegerSCEV(0,
1538                                    Type::getInt32Ty(Preheader->getContext())),
1539                   SE->getIntegerSCEV(0,
1540                                    Type::getInt32Ty(Preheader->getContext())),
1541                   0);
1542
1543  // Choose a strength-reduction strategy and prepare for it by creating
1544  // the necessary PHIs and adjusting the bookkeeping.
1545  if (ShouldUseFullStrengthReductionMode(UsersToProcess, L,
1546                                         AllUsesAreAddresses, Stride)) {
1547    PrepareToStrengthReduceFully(UsersToProcess, Stride, CommonExprs, L,
1548                                 PreheaderRewriter);
1549  } else {
1550    // Emit the initial base value into the loop preheader.
1551    CommonBaseV = PreheaderRewriter.expandCodeFor(CommonExprs, ReplacedTy,
1552                                                  PreInsertPt);
1553
1554    // If all uses are addresses, check if it is possible to reuse an IV.  The
1555    // new IV must have a stride that is a multiple of the old stride; the
1556    // multiple must be a number that can be encoded in the scale field of the
1557    // target addressing mode; and we must have a valid instruction after this
1558    // substitution, including the immediate field, if any.
1559    RewriteFactor = CheckForIVReuse(HaveCommonExprs, AllUsesAreAddresses,
1560                                    AllUsesAreOutsideLoop,
1561                                    Stride, ReuseIV, ReplacedTy,
1562                                    UsersToProcess);
1563    if (!RewriteFactor->isZero())
1564      PrepareToStrengthReduceFromSmallerStride(UsersToProcess, CommonBaseV,
1565                                               ReuseIV, PreInsertPt);
1566    else {
1567      IVIncInsertPt = FindIVIncInsertPt(UsersToProcess, L);
1568      PrepareToStrengthReduceWithNewPhi(UsersToProcess, Stride, CommonExprs,
1569                                        CommonBaseV, IVIncInsertPt,
1570                                        L, PreheaderRewriter);
1571    }
1572  }
1573
1574  // Process all the users now, replacing their strided uses with
1575  // strength-reduced forms.  This outer loop handles all bases, the inner
1576  // loop handles all users of a particular base.
1577  while (!UsersToProcess.empty()) {
1578    const SCEV *Base = UsersToProcess.back().Base;
1579    Instruction *Inst = UsersToProcess.back().Inst;
1580
1581    // Emit the code for Base into the preheader.
1582    Value *BaseV = 0;
1583    if (!Base->isZero()) {
1584      BaseV = PreheaderRewriter.expandCodeFor(Base, 0, PreInsertPt);
1585
1586      DEBUG(dbgs() << "  INSERTING code for BASE = " << *Base << ":");
1587      if (BaseV->hasName())
1588        DEBUG(dbgs() << " Result value name = %" << BaseV->getName());
1589      DEBUG(dbgs() << "\n");
1590
1591      // If BaseV is a non-zero constant, make sure that it gets inserted into
1592      // the preheader, instead of being forward substituted into the uses.  We
1593      // do this by forcing a BitCast (noop cast) to be inserted into the
1594      // preheader in this case.
1595      if (!fitsInAddressMode(Base, getAccessType(Inst), TLI, false) &&
1596          isa<Constant>(BaseV)) {
1597        // We want this constant emitted into the preheader! This is just
1598        // using cast as a copy so BitCast (no-op cast) is appropriate
1599        BaseV = new BitCastInst(BaseV, BaseV->getType(), "preheaderinsert",
1600                                PreInsertPt);
1601      }
1602    }
1603
1604    // Emit the code to add the immediate offset to the Phi value, just before
1605    // the instructions that we identified as using this stride and base.
1606    do {
1607      // FIXME: Use emitted users to emit other users.
1608      BasedUser &User = UsersToProcess.back();
1609
1610      DEBUG(dbgs() << "    Examining ");
1611      if (User.isUseOfPostIncrementedValue)
1612        DEBUG(dbgs() << "postinc");
1613      else
1614        DEBUG(dbgs() << "preinc");
1615      DEBUG(dbgs() << " use ");
1616      DEBUG(WriteAsOperand(dbgs(), UsersToProcess.back().OperandValToReplace,
1617                           /*PrintType=*/false));
1618      DEBUG(dbgs() << " in Inst: " << *User.Inst);
1619
1620      // If this instruction wants to use the post-incremented value, move it
1621      // after the post-inc and use its value instead of the PHI.
1622      Value *RewriteOp = User.Phi;
1623      if (User.isUseOfPostIncrementedValue) {
1624        RewriteOp = User.Phi->getIncomingValueForBlock(LatchBlock);
1625        // If this user is in the loop, make sure it is the last thing in the
1626        // loop to ensure it is dominated by the increment. In case it's the
1627        // only use of the iv, the increment instruction is already before the
1628        // use.
1629        if (L->contains(User.Inst) && User.Inst != IVIncInsertPt)
1630          User.Inst->moveBefore(IVIncInsertPt);
1631      }
1632
1633      const SCEV *RewriteExpr = SE->getUnknown(RewriteOp);
1634
1635      if (SE->getEffectiveSCEVType(RewriteOp->getType()) !=
1636          SE->getEffectiveSCEVType(ReplacedTy)) {
1637        assert(SE->getTypeSizeInBits(RewriteOp->getType()) >
1638               SE->getTypeSizeInBits(ReplacedTy) &&
1639               "Unexpected widening cast!");
1640        RewriteExpr = SE->getTruncateExpr(RewriteExpr, ReplacedTy);
1641      }
1642
1643      // If we had to insert new instructions for RewriteOp, we have to
1644      // consider that they may not have been able to end up immediately
1645      // next to RewriteOp, because non-PHI instructions may never precede
1646      // PHI instructions in a block. In this case, remember where the last
1647      // instruction was inserted so that if we're replacing a different
1648      // PHI node, we can use the later point to expand the final
1649      // RewriteExpr.
1650      Instruction *NewBasePt = dyn_cast<Instruction>(RewriteOp);
1651      if (RewriteOp == User.Phi) NewBasePt = 0;
1652
1653      // Clear the SCEVExpander's expression map so that we are guaranteed
1654      // to have the code emitted where we expect it.
1655      Rewriter.clear();
1656
1657      // If we are reusing the iv, then it must be multiplied by a constant
1658      // factor to take advantage of the addressing mode scale component.
1659      if (!RewriteFactor->isZero()) {
1660        // If we're reusing an IV with a nonzero base (currently this happens
1661        // only when all reuses are outside the loop) subtract that base here.
1662        // The base has been used to initialize the PHI node but we don't want
1663        // it here.
1664        if (!ReuseIV.Base->isZero()) {
1665          const SCEV *typedBase = ReuseIV.Base;
1666          if (SE->getEffectiveSCEVType(RewriteExpr->getType()) !=
1667              SE->getEffectiveSCEVType(ReuseIV.Base->getType())) {
1668            // It's possible the original IV is a larger type than the new IV,
1669            // in which case we have to truncate the Base.  We checked in
1670            // RequiresTypeConversion that this is valid.
1671            assert(SE->getTypeSizeInBits(RewriteExpr->getType()) <
1672                   SE->getTypeSizeInBits(ReuseIV.Base->getType()) &&
1673                   "Unexpected lengthening conversion!");
1674            typedBase = SE->getTruncateExpr(ReuseIV.Base,
1675                                            RewriteExpr->getType());
1676          }
1677          RewriteExpr = SE->getMinusSCEV(RewriteExpr, typedBase);
1678        }
1679
1680        // Multiply old variable, with base removed, by new scale factor.
1681        RewriteExpr = SE->getMulExpr(RewriteFactor,
1682                                     RewriteExpr);
1683
1684        // The common base is emitted in the loop preheader. But since we
1685        // are reusing an IV, it has not been used to initialize the PHI node.
1686        // Add it to the expression used to rewrite the uses.
1687        // When this use is outside the loop, we earlier subtracted the
1688        // common base, and are adding it back here.  Use the same expression
1689        // as before, rather than CommonBaseV, so DAGCombiner will zap it.
1690        if (!CommonExprs->isZero()) {
1691          if (L->contains(User.Inst))
1692            RewriteExpr = SE->getAddExpr(RewriteExpr,
1693                                       SE->getUnknown(CommonBaseV));
1694          else
1695            RewriteExpr = SE->getAddExpr(RewriteExpr, CommonExprs);
1696        }
1697      }
1698
1699      // Now that we know what we need to do, insert code before User for the
1700      // immediate and any loop-variant expressions.
1701      if (BaseV)
1702        // Add BaseV to the PHI value if needed.
1703        RewriteExpr = SE->getAddExpr(RewriteExpr, SE->getUnknown(BaseV));
1704
1705      User.RewriteInstructionToUseNewBase(RewriteExpr, NewBasePt,
1706                                          Rewriter, L, this,
1707                                          DeadInsts, SE);
1708
1709      // Mark old value we replaced as possibly dead, so that it is eliminated
1710      // if we just replaced the last use of that value.
1711      DeadInsts.push_back(User.OperandValToReplace);
1712
1713      UsersToProcess.pop_back();
1714      ++NumReduced;
1715
1716      // If there are any more users to process with the same base, process them
1717      // now.  We sorted by base above, so we just have to check the last elt.
1718    } while (!UsersToProcess.empty() && UsersToProcess.back().Base == Base);
1719    // TODO: Next, find out which base index is the most common, pull it out.
1720  }
1721
1722  // IMPORTANT TODO: Figure out how to partition the IV's with this stride, but
1723  // different starting values, into different PHIs.
1724}
1725
1726void LoopStrengthReduce::StrengthReduceIVUsers(Loop *L) {
1727  // Note: this processes each stride/type pair individually.  All users
1728  // passed into StrengthReduceIVUsersOfStride have the same type AND stride.
1729  // Also, note that we iterate over IVUsesByStride indirectly by using
1730  // StrideOrder. This extra layer of indirection makes the ordering of
1731  // strides deterministic - not dependent on map order.
1732  for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e; ++Stride) {
1733    std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
1734      IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
1735    assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
1736    // FIXME: Generalize to non-affine IV's.
1737    if (!SI->first->isLoopInvariant(L))
1738      continue;
1739    StrengthReduceIVUsersOfStride(SI->first, *SI->second, L);
1740  }
1741}
1742
1743/// FindIVUserForCond - If Cond has an operand that is an expression of an IV,
1744/// set the IV user and stride information and return true, otherwise return
1745/// false.
1746bool LoopStrengthReduce::FindIVUserForCond(ICmpInst *Cond,
1747                                           IVStrideUse *&CondUse,
1748                                           const SCEV* &CondStride) {
1749  for (unsigned Stride = 0, e = IU->StrideOrder.size();
1750       Stride != e && !CondUse; ++Stride) {
1751    std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
1752      IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
1753    assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
1754
1755    for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
1756         E = SI->second->Users.end(); UI != E; ++UI)
1757      if (UI->getUser() == Cond) {
1758        // NOTE: we could handle setcc instructions with multiple uses here, but
1759        // InstCombine does it as well for simple uses, it's not clear that it
1760        // occurs enough in real life to handle.
1761        CondUse = UI;
1762        CondStride = SI->first;
1763        return true;
1764      }
1765  }
1766  return false;
1767}
1768
1769namespace {
1770  // Constant strides come first which in turns are sorted by their absolute
1771  // values. If absolute values are the same, then positive strides comes first.
1772  // e.g.
1773  // 4, -1, X, 1, 2 ==> 1, -1, 2, 4, X
1774  struct StrideCompare {
1775    const ScalarEvolution *SE;
1776    explicit StrideCompare(const ScalarEvolution *se) : SE(se) {}
1777
1778    bool operator()(const SCEV *LHS, const SCEV *RHS) {
1779      const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS);
1780      const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS);
1781      if (LHSC && RHSC) {
1782        int64_t  LV = LHSC->getValue()->getSExtValue();
1783        int64_t  RV = RHSC->getValue()->getSExtValue();
1784        uint64_t ALV = (LV < 0) ? -LV : LV;
1785        uint64_t ARV = (RV < 0) ? -RV : RV;
1786        if (ALV == ARV) {
1787          if (LV != RV)
1788            return LV > RV;
1789        } else {
1790          return ALV < ARV;
1791        }
1792
1793        // If it's the same value but different type, sort by bit width so
1794        // that we emit larger induction variables before smaller
1795        // ones, letting the smaller be re-written in terms of larger ones.
1796        return SE->getTypeSizeInBits(RHS->getType()) <
1797               SE->getTypeSizeInBits(LHS->getType());
1798      }
1799      return LHSC && !RHSC;
1800    }
1801  };
1802}
1803
1804/// ChangeCompareStride - If a loop termination compare instruction is the
1805/// only use of its stride, and the compaison is against a constant value,
1806/// try eliminate the stride by moving the compare instruction to another
1807/// stride and change its constant operand accordingly. e.g.
1808///
1809/// loop:
1810/// ...
1811/// v1 = v1 + 3
1812/// v2 = v2 + 1
1813/// if (v2 < 10) goto loop
1814/// =>
1815/// loop:
1816/// ...
1817/// v1 = v1 + 3
1818/// if (v1 < 30) goto loop
1819ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
1820                                                  IVStrideUse* &CondUse,
1821                                                  const SCEV* &CondStride,
1822                                                  bool PostPass) {
1823  // If there's only one stride in the loop, there's nothing to do here.
1824  if (IU->StrideOrder.size() < 2)
1825    return Cond;
1826  // If there are other users of the condition's stride, don't bother
1827  // trying to change the condition because the stride will still
1828  // remain.
1829  std::map<const SCEV *, IVUsersOfOneStride *>::iterator I =
1830    IU->IVUsesByStride.find(CondStride);
1831  if (I == IU->IVUsesByStride.end())
1832    return Cond;
1833  if (I->second->Users.size() > 1) {
1834    for (ilist<IVStrideUse>::iterator II = I->second->Users.begin(),
1835           EE = I->second->Users.end(); II != EE; ++II) {
1836      if (II->getUser() == Cond)
1837        continue;
1838      if (!isInstructionTriviallyDead(II->getUser()))
1839        return Cond;
1840    }
1841  }
1842  // Only handle constant strides for now.
1843  const SCEVConstant *SC = dyn_cast<SCEVConstant>(CondStride);
1844  if (!SC) return Cond;
1845
1846  ICmpInst::Predicate Predicate = Cond->getPredicate();
1847  int64_t CmpSSInt = SC->getValue()->getSExtValue();
1848  unsigned BitWidth = SE->getTypeSizeInBits(CondStride->getType());
1849  uint64_t SignBit = 1ULL << (BitWidth-1);
1850  const Type *CmpTy = Cond->getOperand(0)->getType();
1851  const Type *NewCmpTy = NULL;
1852  unsigned TyBits = SE->getTypeSizeInBits(CmpTy);
1853  unsigned NewTyBits = 0;
1854  const SCEV *NewStride = NULL;
1855  Value *NewCmpLHS = NULL;
1856  Value *NewCmpRHS = NULL;
1857  int64_t Scale = 1;
1858  const SCEV *NewOffset = SE->getIntegerSCEV(0, CmpTy);
1859
1860  if (ConstantInt *C = dyn_cast<ConstantInt>(Cond->getOperand(1))) {
1861    int64_t CmpVal = C->getValue().getSExtValue();
1862
1863    // Check the relevant induction variable for conformance to
1864    // the pattern.
1865    const SCEV *IV = SE->getSCEV(Cond->getOperand(0));
1866    const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
1867    if (!AR || !AR->isAffine())
1868      return Cond;
1869
1870    const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
1871    // Check stride constant and the comparision constant signs to detect
1872    // overflow.
1873    if (StartC) {
1874      if ((StartC->getValue()->getSExtValue() < CmpVal && CmpSSInt < 0) ||
1875          (StartC->getValue()->getSExtValue() > CmpVal && CmpSSInt > 0))
1876        return Cond;
1877    } else {
1878      // More restrictive check for the other cases.
1879      if ((CmpVal & SignBit) != (CmpSSInt & SignBit))
1880        return Cond;
1881    }
1882
1883    // Look for a suitable stride / iv as replacement.
1884    for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) {
1885      std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
1886        IU->IVUsesByStride.find(IU->StrideOrder[i]);
1887      if (!isa<SCEVConstant>(SI->first) || SI->second->Users.empty())
1888        continue;
1889      int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
1890      if (SSInt == CmpSSInt ||
1891          abs64(SSInt) < abs64(CmpSSInt) ||
1892          (SSInt % CmpSSInt) != 0)
1893        continue;
1894
1895      Scale = SSInt / CmpSSInt;
1896      int64_t NewCmpVal = CmpVal * Scale;
1897
1898      // If old icmp value fits in icmp immediate field, but the new one doesn't
1899      // try something else.
1900      if (TLI &&
1901          TLI->isLegalICmpImmediate(CmpVal) &&
1902          !TLI->isLegalICmpImmediate(NewCmpVal))
1903        continue;
1904
1905      APInt Mul = APInt(BitWidth*2, CmpVal, true);
1906      Mul = Mul * APInt(BitWidth*2, Scale, true);
1907      // Check for overflow.
1908      if (!Mul.isSignedIntN(BitWidth))
1909        continue;
1910      // Check for overflow in the stride's type too.
1911      if (!Mul.isSignedIntN(SE->getTypeSizeInBits(SI->first->getType())))
1912        continue;
1913
1914      // Watch out for overflow.
1915      if (ICmpInst::isSigned(Predicate) &&
1916          (CmpVal & SignBit) != (NewCmpVal & SignBit))
1917        continue;
1918
1919      // Pick the best iv to use trying to avoid a cast.
1920      NewCmpLHS = NULL;
1921      for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
1922             E = SI->second->Users.end(); UI != E; ++UI) {
1923        Value *Op = UI->getOperandValToReplace();
1924
1925        // If the IVStrideUse implies a cast, check for an actual cast which
1926        // can be used to find the original IV expression.
1927        if (SE->getEffectiveSCEVType(Op->getType()) !=
1928            SE->getEffectiveSCEVType(SI->first->getType())) {
1929          CastInst *CI = dyn_cast<CastInst>(Op);
1930          // If it's not a simple cast, it's complicated.
1931          if (!CI)
1932            continue;
1933          // If it's a cast from a type other than the stride type,
1934          // it's complicated.
1935          if (CI->getOperand(0)->getType() != SI->first->getType())
1936            continue;
1937          // Ok, we found the IV expression in the stride's type.
1938          Op = CI->getOperand(0);
1939        }
1940
1941        NewCmpLHS = Op;
1942        if (NewCmpLHS->getType() == CmpTy)
1943          break;
1944      }
1945      if (!NewCmpLHS)
1946        continue;
1947
1948      NewCmpTy = NewCmpLHS->getType();
1949      NewTyBits = SE->getTypeSizeInBits(NewCmpTy);
1950      const Type *NewCmpIntTy = IntegerType::get(Cond->getContext(), NewTyBits);
1951      if (RequiresTypeConversion(NewCmpTy, CmpTy)) {
1952        // Check if it is possible to rewrite it using
1953        // an iv / stride of a smaller integer type.
1954        unsigned Bits = NewTyBits;
1955        if (ICmpInst::isSigned(Predicate))
1956          --Bits;
1957        uint64_t Mask = (1ULL << Bits) - 1;
1958        if (((uint64_t)NewCmpVal & Mask) != (uint64_t)NewCmpVal)
1959          continue;
1960      }
1961
1962      // Don't rewrite if use offset is non-constant and the new type is
1963      // of a different type.
1964      // FIXME: too conservative?
1965      if (NewTyBits != TyBits && !isa<SCEVConstant>(CondUse->getOffset()))
1966        continue;
1967
1968      if (!PostPass) {
1969        bool AllUsesAreAddresses = true;
1970        bool AllUsesAreOutsideLoop = true;
1971        std::vector<BasedUser> UsersToProcess;
1972        const SCEV *CommonExprs = CollectIVUsers(SI->first, *SI->second, L,
1973                                                 AllUsesAreAddresses,
1974                                                 AllUsesAreOutsideLoop,
1975                                                 UsersToProcess);
1976        // Avoid rewriting the compare instruction with an iv of new stride
1977        // if it's likely the new stride uses will be rewritten using the
1978        // stride of the compare instruction.
1979        if (AllUsesAreAddresses &&
1980            ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess))
1981          continue;
1982      }
1983
1984      // Avoid rewriting the compare instruction with an iv which has
1985      // implicit extension or truncation built into it.
1986      // TODO: This is over-conservative.
1987      if (SE->getTypeSizeInBits(CondUse->getOffset()->getType()) != TyBits)
1988        continue;
1989
1990      // If scale is negative, use swapped predicate unless it's testing
1991      // for equality.
1992      if (Scale < 0 && !Cond->isEquality())
1993        Predicate = ICmpInst::getSwappedPredicate(Predicate);
1994
1995      NewStride = IU->StrideOrder[i];
1996      if (!isa<PointerType>(NewCmpTy))
1997        NewCmpRHS = ConstantInt::get(NewCmpTy, NewCmpVal);
1998      else {
1999        Constant *CI = ConstantInt::get(NewCmpIntTy, NewCmpVal);
2000        NewCmpRHS = ConstantExpr::getIntToPtr(CI, NewCmpTy);
2001      }
2002      NewOffset = TyBits == NewTyBits
2003        ? SE->getMulExpr(CondUse->getOffset(),
2004                         SE->getConstant(CmpTy, Scale))
2005        : SE->getConstant(NewCmpIntTy,
2006          cast<SCEVConstant>(CondUse->getOffset())->getValue()
2007            ->getSExtValue()*Scale);
2008      break;
2009    }
2010  }
2011
2012  // Forgo this transformation if it the increment happens to be
2013  // unfortunately positioned after the condition, and the condition
2014  // has multiple uses which prevent it from being moved immediately
2015  // before the branch. See
2016  // test/Transforms/LoopStrengthReduce/change-compare-stride-trickiness-*.ll
2017  // for an example of this situation.
2018  if (!Cond->hasOneUse()) {
2019    for (BasicBlock::iterator I = Cond, E = Cond->getParent()->end();
2020         I != E; ++I)
2021      if (I == NewCmpLHS)
2022        return Cond;
2023  }
2024
2025  if (NewCmpRHS) {
2026    // Create a new compare instruction using new stride / iv.
2027    ICmpInst *OldCond = Cond;
2028    // Insert new compare instruction.
2029    Cond = new ICmpInst(OldCond, Predicate, NewCmpLHS, NewCmpRHS,
2030                        L->getHeader()->getName() + ".termcond");
2031
2032    DEBUG(dbgs() << "    Change compare stride in Inst " << *OldCond);
2033    DEBUG(dbgs() << " to " << *Cond << '\n');
2034
2035    // Remove the old compare instruction. The old indvar is probably dead too.
2036    DeadInsts.push_back(CondUse->getOperandValToReplace());
2037    OldCond->replaceAllUsesWith(Cond);
2038    OldCond->eraseFromParent();
2039
2040    IU->IVUsesByStride[NewStride]->addUser(NewOffset, Cond, NewCmpLHS);
2041    CondUse = &IU->IVUsesByStride[NewStride]->Users.back();
2042    CondStride = NewStride;
2043    ++NumEliminated;
2044    Changed = true;
2045  }
2046
2047  return Cond;
2048}
2049
2050/// OptimizeMax - Rewrite the loop's terminating condition if it uses
2051/// a max computation.
2052///
2053/// This is a narrow solution to a specific, but acute, problem. For loops
2054/// like this:
2055///
2056///   i = 0;
2057///   do {
2058///     p[i] = 0.0;
2059///   } while (++i < n);
2060///
2061/// the trip count isn't just 'n', because 'n' might not be positive. And
2062/// unfortunately this can come up even for loops where the user didn't use
2063/// a C do-while loop. For example, seemingly well-behaved top-test loops
2064/// will commonly be lowered like this:
2065//
2066///   if (n > 0) {
2067///     i = 0;
2068///     do {
2069///       p[i] = 0.0;
2070///     } while (++i < n);
2071///   }
2072///
2073/// and then it's possible for subsequent optimization to obscure the if
2074/// test in such a way that indvars can't find it.
2075///
2076/// When indvars can't find the if test in loops like this, it creates a
2077/// max expression, which allows it to give the loop a canonical
2078/// induction variable:
2079///
2080///   i = 0;
2081///   max = n < 1 ? 1 : n;
2082///   do {
2083///     p[i] = 0.0;
2084///   } while (++i != max);
2085///
2086/// Canonical induction variables are necessary because the loop passes
2087/// are designed around them. The most obvious example of this is the
2088/// LoopInfo analysis, which doesn't remember trip count values. It
2089/// expects to be able to rediscover the trip count each time it is
2090/// needed, and it does this using a simple analyis that only succeeds if
2091/// the loop has a canonical induction variable.
2092///
2093/// However, when it comes time to generate code, the maximum operation
2094/// can be quite costly, especially if it's inside of an outer loop.
2095///
2096/// This function solves this problem by detecting this type of loop and
2097/// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
2098/// the instructions for the maximum computation.
2099///
2100ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond,
2101                                          IVStrideUse* &CondUse) {
2102  // Check that the loop matches the pattern we're looking for.
2103  if (Cond->getPredicate() != CmpInst::ICMP_EQ &&
2104      Cond->getPredicate() != CmpInst::ICMP_NE)
2105    return Cond;
2106
2107  SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1));
2108  if (!Sel || !Sel->hasOneUse()) return Cond;
2109
2110  const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
2111  if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2112    return Cond;
2113  const SCEV *One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType());
2114
2115  // Add one to the backedge-taken count to get the trip count.
2116  const SCEV *IterationCount = SE->getAddExpr(BackedgeTakenCount, One);
2117
2118  // Check for a max calculation that matches the pattern.
2119  if (!isa<SCEVSMaxExpr>(IterationCount) && !isa<SCEVUMaxExpr>(IterationCount))
2120    return Cond;
2121  const SCEVNAryExpr *Max = cast<SCEVNAryExpr>(IterationCount);
2122  if (Max != SE->getSCEV(Sel)) return Cond;
2123
2124  // To handle a max with more than two operands, this optimization would
2125  // require additional checking and setup.
2126  if (Max->getNumOperands() != 2)
2127    return Cond;
2128
2129  const SCEV *MaxLHS = Max->getOperand(0);
2130  const SCEV *MaxRHS = Max->getOperand(1);
2131  if (!MaxLHS || MaxLHS != One) return Cond;
2132
2133  // Check the relevant induction variable for conformance to
2134  // the pattern.
2135  const SCEV *IV = SE->getSCEV(Cond->getOperand(0));
2136  const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
2137  if (!AR || !AR->isAffine() ||
2138      AR->getStart() != One ||
2139      AR->getStepRecurrence(*SE) != One)
2140    return Cond;
2141
2142  assert(AR->getLoop() == L &&
2143         "Loop condition operand is an addrec in a different loop!");
2144
2145  // Check the right operand of the select, and remember it, as it will
2146  // be used in the new comparison instruction.
2147  Value *NewRHS = 0;
2148  if (SE->getSCEV(Sel->getOperand(1)) == MaxRHS)
2149    NewRHS = Sel->getOperand(1);
2150  else if (SE->getSCEV(Sel->getOperand(2)) == MaxRHS)
2151    NewRHS = Sel->getOperand(2);
2152  if (!NewRHS) return Cond;
2153
2154  // Determine the new comparison opcode. It may be signed or unsigned,
2155  // and the original comparison may be either equality or inequality.
2156  CmpInst::Predicate Pred =
2157    isa<SCEVSMaxExpr>(Max) ? CmpInst::ICMP_SLT : CmpInst::ICMP_ULT;
2158  if (Cond->getPredicate() == CmpInst::ICMP_EQ)
2159    Pred = CmpInst::getInversePredicate(Pred);
2160
2161  // Ok, everything looks ok to change the condition into an SLT or SGE and
2162  // delete the max calculation.
2163  ICmpInst *NewCond =
2164    new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp");
2165
2166  // Delete the max calculation instructions.
2167  Cond->replaceAllUsesWith(NewCond);
2168  CondUse->setUser(NewCond);
2169  Instruction *Cmp = cast<Instruction>(Sel->getOperand(0));
2170  Cond->eraseFromParent();
2171  Sel->eraseFromParent();
2172  if (Cmp->use_empty())
2173    Cmp->eraseFromParent();
2174  return NewCond;
2175}
2176
2177/// OptimizeShadowIV - If IV is used in a int-to-float cast
2178/// inside the loop then try to eliminate the cast opeation.
2179void LoopStrengthReduce::OptimizeShadowIV(Loop *L) {
2180
2181  const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
2182  if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
2183    return;
2184
2185  for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e;
2186       ++Stride) {
2187    std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
2188      IU->IVUsesByStride.find(IU->StrideOrder[Stride]);
2189    assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
2190    if (!isa<SCEVConstant>(SI->first))
2191      continue;
2192
2193    for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(),
2194           E = SI->second->Users.end(); UI != E; /* empty */) {
2195      ilist<IVStrideUse>::iterator CandidateUI = UI;
2196      ++UI;
2197      Instruction *ShadowUse = CandidateUI->getUser();
2198      const Type *DestTy = NULL;
2199
2200      /* If shadow use is a int->float cast then insert a second IV
2201         to eliminate this cast.
2202
2203           for (unsigned i = 0; i < n; ++i)
2204             foo((double)i);
2205
2206         is transformed into
2207
2208           double d = 0.0;
2209           for (unsigned i = 0; i < n; ++i, ++d)
2210             foo(d);
2211      */
2212      if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser()))
2213        DestTy = UCast->getDestTy();
2214      else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser()))
2215        DestTy = SCast->getDestTy();
2216      if (!DestTy) continue;
2217
2218      if (TLI) {
2219        // If target does not support DestTy natively then do not apply
2220        // this transformation.
2221        EVT DVT = TLI->getValueType(DestTy);
2222        if (!TLI->isTypeLegal(DVT)) continue;
2223      }
2224
2225      PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
2226      if (!PH) continue;
2227      if (PH->getNumIncomingValues() != 2) continue;
2228
2229      const Type *SrcTy = PH->getType();
2230      int Mantissa = DestTy->getFPMantissaWidth();
2231      if (Mantissa == -1) continue;
2232      if ((int)SE->getTypeSizeInBits(SrcTy) > Mantissa)
2233        continue;
2234
2235      unsigned Entry, Latch;
2236      if (PH->getIncomingBlock(0) == L->getLoopPreheader()) {
2237        Entry = 0;
2238        Latch = 1;
2239      } else {
2240        Entry = 1;
2241        Latch = 0;
2242      }
2243
2244      ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
2245      if (!Init) continue;
2246      Constant *NewInit = ConstantFP::get(DestTy, Init->getZExtValue());
2247
2248      BinaryOperator *Incr =
2249        dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
2250      if (!Incr) continue;
2251      if (Incr->getOpcode() != Instruction::Add
2252          && Incr->getOpcode() != Instruction::Sub)
2253        continue;
2254
2255      /* Initialize new IV, double d = 0.0 in above example. */
2256      ConstantInt *C = NULL;
2257      if (Incr->getOperand(0) == PH)
2258        C = dyn_cast<ConstantInt>(Incr->getOperand(1));
2259      else if (Incr->getOperand(1) == PH)
2260        C = dyn_cast<ConstantInt>(Incr->getOperand(0));
2261      else
2262        continue;
2263
2264      if (!C) continue;
2265
2266      // Ignore negative constants, as the code below doesn't handle them
2267      // correctly. TODO: Remove this restriction.
2268      if (!C->getValue().isStrictlyPositive()) continue;
2269
2270      /* Add new PHINode. */
2271      PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH);
2272
2273      /* create new increment. '++d' in above example. */
2274      Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue());
2275      BinaryOperator *NewIncr =
2276        BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ?
2277                                 Instruction::FAdd : Instruction::FSub,
2278                               NewPH, CFP, "IV.S.next.", Incr);
2279
2280      NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry));
2281      NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch));
2282
2283      /* Remove cast operation */
2284      ShadowUse->replaceAllUsesWith(NewPH);
2285      ShadowUse->eraseFromParent();
2286      NumShadow++;
2287      break;
2288    }
2289  }
2290}
2291
2292/// OptimizeIndvars - Now that IVUsesByStride is set up with all of the indvar
2293/// uses in the loop, look to see if we can eliminate some, in favor of using
2294/// common indvars for the different uses.
2295void LoopStrengthReduce::OptimizeIndvars(Loop *L) {
2296  // TODO: implement optzns here.
2297
2298  OptimizeShadowIV(L);
2299}
2300
2301bool LoopStrengthReduce::StrideMightBeShared(const SCEV* Stride, Loop *L,
2302                                             bool CheckPreInc) {
2303  int64_t SInt = cast<SCEVConstant>(Stride)->getValue()->getSExtValue();
2304  for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) {
2305    std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
2306      IU->IVUsesByStride.find(IU->StrideOrder[i]);
2307    const SCEV *Share = SI->first;
2308    if (!isa<SCEVConstant>(SI->first) || Share == Stride)
2309      continue;
2310    int64_t SSInt = cast<SCEVConstant>(Share)->getValue()->getSExtValue();
2311    if (SSInt == SInt)
2312      return true; // This can definitely be reused.
2313    if (unsigned(abs64(SSInt)) < SInt || (SSInt % SInt) != 0)
2314      continue;
2315    int64_t Scale = SSInt / SInt;
2316    bool AllUsesAreAddresses = true;
2317    bool AllUsesAreOutsideLoop = true;
2318    std::vector<BasedUser> UsersToProcess;
2319    const SCEV *CommonExprs = CollectIVUsers(SI->first, *SI->second, L,
2320                                             AllUsesAreAddresses,
2321                                             AllUsesAreOutsideLoop,
2322                                             UsersToProcess);
2323    if (AllUsesAreAddresses &&
2324        ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess)) {
2325      if (!CheckPreInc)
2326        return true;
2327      // Any pre-inc iv use?
2328      IVUsersOfOneStride &StrideUses = *IU->IVUsesByStride[Share];
2329      for (ilist<IVStrideUse>::iterator I = StrideUses.Users.begin(),
2330             E = StrideUses.Users.end(); I != E; ++I) {
2331        if (!I->isUseOfPostIncrementedValue())
2332          return true;
2333      }
2334    }
2335  }
2336  return false;
2337}
2338
2339/// isUsedByExitBranch - Return true if icmp is used by a loop terminating
2340/// conditional branch or it's and / or with other conditions before being used
2341/// as the condition.
2342static bool isUsedByExitBranch(ICmpInst *Cond, Loop *L) {
2343  BasicBlock *CondBB = Cond->getParent();
2344  if (!L->isLoopExiting(CondBB))
2345    return false;
2346  BranchInst *TermBr = dyn_cast<BranchInst>(CondBB->getTerminator());
2347  if (!TermBr || !TermBr->isConditional())
2348    return false;
2349
2350  Value *User = *Cond->use_begin();
2351  Instruction *UserInst = dyn_cast<Instruction>(User);
2352  while (UserInst &&
2353         (UserInst->getOpcode() == Instruction::And ||
2354          UserInst->getOpcode() == Instruction::Or)) {
2355    if (!UserInst->hasOneUse() || UserInst->getParent() != CondBB)
2356      return false;
2357    User = *User->use_begin();
2358    UserInst = dyn_cast<Instruction>(User);
2359  }
2360  return User == TermBr;
2361}
2362
2363static bool ShouldCountToZero(ICmpInst *Cond, IVStrideUse* &CondUse,
2364                              ScalarEvolution *SE, Loop *L,
2365                              const TargetLowering *TLI = 0) {
2366  if (!L->contains(Cond))
2367    return false;
2368
2369  if (!isa<SCEVConstant>(CondUse->getOffset()))
2370    return false;
2371
2372  // Handle only tests for equality for the moment.
2373  if (!Cond->isEquality() || !Cond->hasOneUse())
2374    return false;
2375  if (!isUsedByExitBranch(Cond, L))
2376    return false;
2377
2378  Value *CondOp0 = Cond->getOperand(0);
2379  const SCEV *IV = SE->getSCEV(CondOp0);
2380  const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
2381  if (!AR || !AR->isAffine())
2382    return false;
2383
2384  const SCEVConstant *SC = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE));
2385  if (!SC || SC->getValue()->getSExtValue() < 0)
2386    // If it's already counting down, don't do anything.
2387    return false;
2388
2389  // If the RHS of the comparison is not an loop invariant, the rewrite
2390  // cannot be done. Also bail out if it's already comparing against a zero.
2391  // If we are checking this before cmp stride optimization, check if it's
2392  // comparing against a already legal immediate.
2393  Value *RHS = Cond->getOperand(1);
2394  ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS);
2395  if (!L->isLoopInvariant(RHS) ||
2396      (RHSC && RHSC->isZero()) ||
2397      (RHSC && TLI && TLI->isLegalICmpImmediate(RHSC->getSExtValue())))
2398    return false;
2399
2400  // Make sure the IV is only used for counting.  Value may be preinc or
2401  // postinc; 2 uses in either case.
2402  if (!CondOp0->hasNUses(2))
2403    return false;
2404
2405  return true;
2406}
2407
2408/// OptimizeLoopTermCond - Change loop terminating condition to use the
2409/// postinc iv when possible.
2410void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) {
2411  BasicBlock *LatchBlock = L->getLoopLatch();
2412  bool LatchExit = L->isLoopExiting(LatchBlock);
2413  SmallVector<BasicBlock*, 8> ExitingBlocks;
2414  L->getExitingBlocks(ExitingBlocks);
2415
2416  for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
2417    BasicBlock *ExitingBlock = ExitingBlocks[i];
2418
2419    // Finally, get the terminating condition for the loop if possible.  If we
2420    // can, we want to change it to use a post-incremented version of its
2421    // induction variable, to allow coalescing the live ranges for the IV into
2422    // one register value.
2423
2424    BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
2425    if (!TermBr)
2426      continue;
2427    // FIXME: Overly conservative, termination condition could be an 'or' etc..
2428    if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition()))
2429      continue;
2430
2431    // Search IVUsesByStride to find Cond's IVUse if there is one.
2432    IVStrideUse *CondUse = 0;
2433    const SCEV *CondStride = 0;
2434    ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
2435    if (!FindIVUserForCond(Cond, CondUse, CondStride))
2436      continue;
2437
2438    // If the latch block is exiting and it's not a single block loop, it's
2439    // not safe to use postinc iv in other exiting blocks. FIXME: overly
2440    // conservative? How about icmp stride optimization?
2441    bool UsePostInc =  !(e > 1 && LatchExit && ExitingBlock != LatchBlock);
2442    if (UsePostInc && ExitingBlock != LatchBlock) {
2443      if (!Cond->hasOneUse())
2444        // See below, we don't want the condition to be cloned.
2445        UsePostInc = false;
2446      else {
2447        // If exiting block is the latch block, we know it's safe and profitable
2448        // to transform the icmp to use post-inc iv. Otherwise do so only if it
2449        // would not reuse another iv and its iv would be reused by other uses.
2450        // We are optimizing for the case where the icmp is the only use of the
2451        // iv.
2452        IVUsersOfOneStride &StrideUses = *IU->IVUsesByStride[CondStride];
2453        for (ilist<IVStrideUse>::iterator I = StrideUses.Users.begin(),
2454               E = StrideUses.Users.end(); I != E; ++I) {
2455          if (I->getUser() == Cond)
2456            continue;
2457          if (!I->isUseOfPostIncrementedValue()) {
2458            UsePostInc = false;
2459            break;
2460          }
2461        }
2462      }
2463
2464      // If iv for the stride might be shared and any of the users use pre-inc
2465      // iv might be used, then it's not safe to use post-inc iv.
2466      if (UsePostInc &&
2467          isa<SCEVConstant>(CondStride) &&
2468          StrideMightBeShared(CondStride, L, true))
2469        UsePostInc = false;
2470    }
2471
2472    // If the trip count is computed in terms of a max (due to ScalarEvolution
2473    // being unable to find a sufficient guard, for example), change the loop
2474    // comparison to use SLT or ULT instead of NE.
2475    Cond = OptimizeMax(L, Cond, CondUse);
2476
2477    // If possible, change stride and operands of the compare instruction to
2478    // eliminate one stride. However, avoid rewriting the compare instruction
2479    // with an iv of new stride if it's likely the new stride uses will be
2480    // rewritten using the stride of the compare instruction.
2481    if (ExitingBlock == LatchBlock && isa<SCEVConstant>(CondStride)) {
2482      // If the condition stride is a constant and it's the only use, we might
2483      // want to optimize it first by turning it to count toward zero.
2484      if (!StrideMightBeShared(CondStride, L, false) &&
2485          !ShouldCountToZero(Cond, CondUse, SE, L, TLI))
2486        Cond = ChangeCompareStride(L, Cond, CondUse, CondStride);
2487    }
2488
2489    if (!UsePostInc)
2490      continue;
2491
2492    DEBUG(dbgs() << "  Change loop exiting icmp to use postinc iv: "
2493          << *Cond << '\n');
2494
2495    // It's possible for the setcc instruction to be anywhere in the loop, and
2496    // possible for it to have multiple users.  If it is not immediately before
2497    // the exiting block branch, move it.
2498    if (&*++BasicBlock::iterator(Cond) != (Instruction*)TermBr) {
2499      if (Cond->hasOneUse()) {   // Condition has a single use, just move it.
2500        Cond->moveBefore(TermBr);
2501      } else {
2502        // Otherwise, clone the terminating condition and insert into the
2503        // loopend.
2504        Cond = cast<ICmpInst>(Cond->clone());
2505        Cond->setName(L->getHeader()->getName() + ".termcond");
2506        ExitingBlock->getInstList().insert(TermBr, Cond);
2507
2508        // Clone the IVUse, as the old use still exists!
2509        IU->IVUsesByStride[CondStride]->addUser(CondUse->getOffset(), Cond,
2510                                             CondUse->getOperandValToReplace());
2511        CondUse = &IU->IVUsesByStride[CondStride]->Users.back();
2512      }
2513    }
2514
2515    // If we get to here, we know that we can transform the setcc instruction to
2516    // use the post-incremented version of the IV, allowing us to coalesce the
2517    // live ranges for the IV correctly.
2518    CondUse->setOffset(SE->getMinusSCEV(CondUse->getOffset(), CondStride));
2519    CondUse->setIsUseOfPostIncrementedValue(true);
2520    Changed = true;
2521
2522    ++NumLoopCond;
2523  }
2524}
2525
2526bool LoopStrengthReduce::OptimizeLoopCountIVOfStride(const SCEV* &Stride,
2527                                                     IVStrideUse* &CondUse,
2528                                                     Loop *L) {
2529  // If the only use is an icmp of a loop exiting conditional branch, then
2530  // attempt the optimization.
2531  BasedUser User = BasedUser(*CondUse, SE);
2532  assert(isa<ICmpInst>(User.Inst) && "Expecting an ICMPInst!");
2533  ICmpInst *Cond = cast<ICmpInst>(User.Inst);
2534
2535  // Less strict check now that compare stride optimization is done.
2536  if (!ShouldCountToZero(Cond, CondUse, SE, L))
2537    return false;
2538
2539  Value *CondOp0 = Cond->getOperand(0);
2540  PHINode *PHIExpr = dyn_cast<PHINode>(CondOp0);
2541  Instruction *Incr;
2542  if (!PHIExpr) {
2543    // Value tested is postinc. Find the phi node.
2544    Incr = dyn_cast<BinaryOperator>(CondOp0);
2545    // FIXME: Just use User.OperandValToReplace here?
2546    if (!Incr || Incr->getOpcode() != Instruction::Add)
2547      return false;
2548
2549    PHIExpr = dyn_cast<PHINode>(Incr->getOperand(0));
2550    if (!PHIExpr)
2551      return false;
2552    // 1 use for preinc value, the increment.
2553    if (!PHIExpr->hasOneUse())
2554      return false;
2555  } else {
2556    assert(isa<PHINode>(CondOp0) &&
2557           "Unexpected loop exiting counting instruction sequence!");
2558    PHIExpr = cast<PHINode>(CondOp0);
2559    // Value tested is preinc.  Find the increment.
2560    // A CmpInst is not a BinaryOperator; we depend on this.
2561    Instruction::use_iterator UI = PHIExpr->use_begin();
2562    Incr = dyn_cast<BinaryOperator>(UI);
2563    if (!Incr)
2564      Incr = dyn_cast<BinaryOperator>(++UI);
2565    // One use for postinc value, the phi.  Unnecessarily conservative?
2566    if (!Incr || !Incr->hasOneUse() || Incr->getOpcode() != Instruction::Add)
2567      return false;
2568  }
2569
2570  // Replace the increment with a decrement.
2571  DEBUG(dbgs() << "LSR: Examining use ");
2572  DEBUG(WriteAsOperand(dbgs(), CondOp0, /*PrintType=*/false));
2573  DEBUG(dbgs() << " in Inst: " << *Cond << '\n');
2574  BinaryOperator *Decr =  BinaryOperator::Create(Instruction::Sub,
2575                         Incr->getOperand(0), Incr->getOperand(1), "tmp", Incr);
2576  Incr->replaceAllUsesWith(Decr);
2577  Incr->eraseFromParent();
2578
2579  // Substitute endval-startval for the original startval, and 0 for the
2580  // original endval.  Since we're only testing for equality this is OK even
2581  // if the computation wraps around.
2582  BasicBlock  *Preheader = L->getLoopPreheader();
2583  Instruction *PreInsertPt = Preheader->getTerminator();
2584  unsigned InBlock = L->contains(PHIExpr->getIncomingBlock(0)) ? 1 : 0;
2585  Value *StartVal = PHIExpr->getIncomingValue(InBlock);
2586  Value *EndVal = Cond->getOperand(1);
2587  DEBUG(dbgs() << "    Optimize loop counting iv to count down ["
2588        << *EndVal << " .. " << *StartVal << "]\n");
2589
2590  // FIXME: check for case where both are constant.
2591  Constant* Zero = ConstantInt::get(Cond->getOperand(1)->getType(), 0);
2592  BinaryOperator *NewStartVal = BinaryOperator::Create(Instruction::Sub,
2593                                          EndVal, StartVal, "tmp", PreInsertPt);
2594  PHIExpr->setIncomingValue(InBlock, NewStartVal);
2595  Cond->setOperand(1, Zero);
2596  DEBUG(dbgs() << "    New icmp: " << *Cond << "\n");
2597
2598  int64_t SInt = cast<SCEVConstant>(Stride)->getValue()->getSExtValue();
2599  const SCEV *NewStride = 0;
2600  bool Found = false;
2601  for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) {
2602    const SCEV *OldStride = IU->StrideOrder[i];
2603    if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OldStride))
2604      if (SC->getValue()->getSExtValue() == -SInt) {
2605        Found = true;
2606        NewStride = OldStride;
2607        break;
2608      }
2609  }
2610
2611  if (!Found)
2612    NewStride = SE->getIntegerSCEV(-SInt, Stride->getType());
2613  IU->AddUser(NewStride, CondUse->getOffset(), Cond, Cond->getOperand(0));
2614  IU->IVUsesByStride[Stride]->removeUser(CondUse);
2615
2616  CondUse = &IU->IVUsesByStride[NewStride]->Users.back();
2617  Stride = NewStride;
2618
2619  ++NumCountZero;
2620
2621  return true;
2622}
2623
2624/// OptimizeLoopCountIV - If, after all sharing of IVs, the IV used for deciding
2625/// when to exit the loop is used only for that purpose, try to rearrange things
2626/// so it counts down to a test against zero.
2627bool LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) {
2628  bool ThisChanged = false;
2629  for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) {
2630    const SCEV *Stride = IU->StrideOrder[i];
2631    std::map<const SCEV *, IVUsersOfOneStride *>::iterator SI =
2632      IU->IVUsesByStride.find(Stride);
2633    assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!");
2634    // FIXME: Generalize to non-affine IV's.
2635    if (!SI->first->isLoopInvariant(L))
2636      continue;
2637    // If stride is a constant and it has an icmpinst use, check if we can
2638    // optimize the loop to count down.
2639    if (isa<SCEVConstant>(Stride) && SI->second->Users.size() == 1) {
2640      Instruction *User = SI->second->Users.begin()->getUser();
2641      if (!isa<ICmpInst>(User))
2642        continue;
2643      const SCEV *CondStride = Stride;
2644      IVStrideUse *Use = &*SI->second->Users.begin();
2645      if (!OptimizeLoopCountIVOfStride(CondStride, Use, L))
2646        continue;
2647      ThisChanged = true;
2648
2649      // Now check if it's possible to reuse this iv for other stride uses.
2650      for (unsigned j = 0, ee = IU->StrideOrder.size(); j != ee; ++j) {
2651        const SCEV *SStride = IU->StrideOrder[j];
2652        if (SStride == CondStride)
2653          continue;
2654        std::map<const SCEV *, IVUsersOfOneStride *>::iterator SII =
2655          IU->IVUsesByStride.find(SStride);
2656        assert(SII != IU->IVUsesByStride.end() && "Stride doesn't exist!");
2657        // FIXME: Generalize to non-affine IV's.
2658        if (!SII->first->isLoopInvariant(L))
2659          continue;
2660        // FIXME: Rewrite other stride using CondStride.
2661      }
2662    }
2663  }
2664
2665  Changed |= ThisChanged;
2666  return ThisChanged;
2667}
2668
2669bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) {
2670  IU = &getAnalysis<IVUsers>();
2671  SE = &getAnalysis<ScalarEvolution>();
2672  Changed = false;
2673
2674  // If LoopSimplify form is not available, stay out of trouble.
2675  if (!L->getLoopPreheader() || !L->getLoopLatch())
2676    return false;
2677
2678  if (!IU->IVUsesByStride.empty()) {
2679    DEBUG(dbgs() << "\nLSR on \"" << L->getHeader()->getParent()->getName()
2680          << "\" ";
2681          L->print(dbgs()));
2682
2683    // Sort the StrideOrder so we process larger strides first.
2684    std::stable_sort(IU->StrideOrder.begin(), IU->StrideOrder.end(),
2685                     StrideCompare(SE));
2686
2687    // Optimize induction variables.  Some indvar uses can be transformed to use
2688    // strides that will be needed for other purposes.  A common example of this
2689    // is the exit test for the loop, which can often be rewritten to use the
2690    // computation of some other indvar to decide when to terminate the loop.
2691    OptimizeIndvars(L);
2692
2693    // Change loop terminating condition to use the postinc iv when possible
2694    // and optimize loop terminating compare. FIXME: Move this after
2695    // StrengthReduceIVUsersOfStride?
2696    OptimizeLoopTermCond(L);
2697
2698    // FIXME: We can shrink overlarge IV's here.  e.g. if the code has
2699    // computation in i64 values and the target doesn't support i64, demote
2700    // the computation to 32-bit if safe.
2701
2702    // FIXME: Attempt to reuse values across multiple IV's.  In particular, we
2703    // could have something like "for(i) { foo(i*8); bar(i*16) }", which should
2704    // be codegened as "for (j = 0;; j+=8) { foo(j); bar(j+j); }" on X86/PPC.
2705    // Need to be careful that IV's are all the same type.  Only works for
2706    // intptr_t indvars.
2707
2708    // IVsByStride keeps IVs for one particular loop.
2709    assert(IVsByStride.empty() && "Stale entries in IVsByStride?");
2710
2711    StrengthReduceIVUsers(L);
2712
2713    // After all sharing is done, see if we can adjust the loop to test against
2714    // zero instead of counting up to a maximum.  This is usually faster.
2715    OptimizeLoopCountIV(L);
2716
2717    // We're done analyzing this loop; release all the state we built up for it.
2718    IVsByStride.clear();
2719
2720    // Clean up after ourselves
2721    DeleteTriviallyDeadInstructions();
2722  }
2723
2724  // At this point, it is worth checking to see if any recurrence PHIs are also
2725  // dead, so that we can remove them as well.
2726  DeleteDeadPHIs(L->getHeader());
2727
2728  return Changed;
2729}
2730