ScalarEvolutionExpander.cpp revision 263508
1//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the scalar evolution expander,
11// which is used to generate the code corresponding to a given scalar evolution
12// expression.
13//
14//===----------------------------------------------------------------------===//
15
16#include "llvm/Analysis/ScalarEvolutionExpander.h"
17#include "llvm/ADT/SmallSet.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/Analysis/LoopInfo.h"
20#include "llvm/Analysis/TargetTransformInfo.h"
21#include "llvm/IR/DataLayout.h"
22#include "llvm/IR/IntrinsicInst.h"
23#include "llvm/IR/LLVMContext.h"
24#include "llvm/Support/Debug.h"
25
26using namespace llvm;
27
28/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
29/// reusing an existing cast if a suitable one exists, moving an existing
30/// cast if a suitable one exists but isn't in the right place, or
31/// creating a new one.
32Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
33                                       Instruction::CastOps Op,
34                                       BasicBlock::iterator IP) {
35  // This function must be called with the builder having a valid insertion
36  // point. It doesn't need to be the actual IP where the uses of the returned
37  // cast will be added, but it must dominate such IP.
38  // We use this precondition to produce a cast that will dominate all its
39  // uses. In particular, this is crucial for the case where the builder's
40  // insertion point *is* the point where we were asked to put the cast.
41  // Since we don't know the builder's insertion point is actually
42  // where the uses will be added (only that it dominates it), we are
43  // not allowed to move it.
44  BasicBlock::iterator BIP = Builder.GetInsertPoint();
45
46  Instruction *Ret = NULL;
47
48  // Check to see if there is already a cast!
49  for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
50       UI != E; ++UI) {
51    User *U = *UI;
52    if (U->getType() == Ty)
53      if (CastInst *CI = dyn_cast<CastInst>(U))
54        if (CI->getOpcode() == Op) {
55          // If the cast isn't where we want it, create a new cast at IP.
56          // Likewise, do not reuse a cast at BIP because it must dominate
57          // instructions that might be inserted before BIP.
58          if (BasicBlock::iterator(CI) != IP || BIP == IP) {
59            // Create a new cast, and leave the old cast in place in case
60            // it is being used as an insert point. Clear its operand
61            // so that it doesn't hold anything live.
62            Ret = CastInst::Create(Op, V, Ty, "", IP);
63            Ret->takeName(CI);
64            CI->replaceAllUsesWith(Ret);
65            CI->setOperand(0, UndefValue::get(V->getType()));
66            break;
67          }
68          Ret = CI;
69          break;
70        }
71  }
72
73  // Create a new cast.
74  if (!Ret)
75    Ret = CastInst::Create(Op, V, Ty, V->getName(), IP);
76
77  // We assert at the end of the function since IP might point to an
78  // instruction with different dominance properties than a cast
79  // (an invoke for example) and not dominate BIP (but the cast does).
80  assert(SE.DT->dominates(Ret, BIP));
81
82  rememberInstruction(Ret);
83  return Ret;
84}
85
86/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
87/// which must be possible with a noop cast, doing what we can to share
88/// the casts.
89Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
90  Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
91  assert((Op == Instruction::BitCast ||
92          Op == Instruction::PtrToInt ||
93          Op == Instruction::IntToPtr) &&
94         "InsertNoopCastOfTo cannot perform non-noop casts!");
95  assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
96         "InsertNoopCastOfTo cannot change sizes!");
97
98  // Short-circuit unnecessary bitcasts.
99  if (Op == Instruction::BitCast) {
100    if (V->getType() == Ty)
101      return V;
102    if (CastInst *CI = dyn_cast<CastInst>(V)) {
103      if (CI->getOperand(0)->getType() == Ty)
104        return CI->getOperand(0);
105    }
106  }
107  // Short-circuit unnecessary inttoptr<->ptrtoint casts.
108  if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
109      SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
110    if (CastInst *CI = dyn_cast<CastInst>(V))
111      if ((CI->getOpcode() == Instruction::PtrToInt ||
112           CI->getOpcode() == Instruction::IntToPtr) &&
113          SE.getTypeSizeInBits(CI->getType()) ==
114          SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
115        return CI->getOperand(0);
116    if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
117      if ((CE->getOpcode() == Instruction::PtrToInt ||
118           CE->getOpcode() == Instruction::IntToPtr) &&
119          SE.getTypeSizeInBits(CE->getType()) ==
120          SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
121        return CE->getOperand(0);
122  }
123
124  // Fold a cast of a constant.
125  if (Constant *C = dyn_cast<Constant>(V))
126    return ConstantExpr::getCast(Op, C, Ty);
127
128  // Cast the argument at the beginning of the entry block, after
129  // any bitcasts of other arguments.
130  if (Argument *A = dyn_cast<Argument>(V)) {
131    BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
132    while ((isa<BitCastInst>(IP) &&
133            isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
134            cast<BitCastInst>(IP)->getOperand(0) != A) ||
135           isa<DbgInfoIntrinsic>(IP) ||
136           isa<LandingPadInst>(IP))
137      ++IP;
138    return ReuseOrCreateCast(A, Ty, Op, IP);
139  }
140
141  // Cast the instruction immediately after the instruction.
142  Instruction *I = cast<Instruction>(V);
143  BasicBlock::iterator IP = I; ++IP;
144  if (InvokeInst *II = dyn_cast<InvokeInst>(I))
145    IP = II->getNormalDest()->begin();
146  while (isa<PHINode>(IP) || isa<LandingPadInst>(IP))
147    ++IP;
148  return ReuseOrCreateCast(I, Ty, Op, IP);
149}
150
151/// InsertBinop - Insert the specified binary operator, doing a small amount
152/// of work to avoid inserting an obviously redundant operation.
153Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
154                                 Value *LHS, Value *RHS) {
155  // Fold a binop with constant operands.
156  if (Constant *CLHS = dyn_cast<Constant>(LHS))
157    if (Constant *CRHS = dyn_cast<Constant>(RHS))
158      return ConstantExpr::get(Opcode, CLHS, CRHS);
159
160  // Do a quick scan to see if we have this binop nearby.  If so, reuse it.
161  unsigned ScanLimit = 6;
162  BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
163  // Scanning starts from the last instruction before the insertion point.
164  BasicBlock::iterator IP = Builder.GetInsertPoint();
165  if (IP != BlockBegin) {
166    --IP;
167    for (; ScanLimit; --IP, --ScanLimit) {
168      // Don't count dbg.value against the ScanLimit, to avoid perturbing the
169      // generated code.
170      if (isa<DbgInfoIntrinsic>(IP))
171        ScanLimit++;
172      if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
173          IP->getOperand(1) == RHS)
174        return IP;
175      if (IP == BlockBegin) break;
176    }
177  }
178
179  // Save the original insertion point so we can restore it when we're done.
180  DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
181  BuilderType::InsertPointGuard Guard(Builder);
182
183  // Move the insertion point out of as many loops as we can.
184  while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
185    if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
186    BasicBlock *Preheader = L->getLoopPreheader();
187    if (!Preheader) break;
188
189    // Ok, move up a level.
190    Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
191  }
192
193  // If we haven't found this binop, insert it.
194  Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
195  BO->setDebugLoc(Loc);
196  rememberInstruction(BO);
197
198  return BO;
199}
200
201/// FactorOutConstant - Test if S is divisible by Factor, using signed
202/// division. If so, update S with Factor divided out and return true.
203/// S need not be evenly divisible if a reasonable remainder can be
204/// computed.
205/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
206/// unnecessary; in its place, just signed-divide Ops[i] by the scale and
207/// check to see if the divide was folded.
208static bool FactorOutConstant(const SCEV *&S,
209                              const SCEV *&Remainder,
210                              const SCEV *Factor,
211                              ScalarEvolution &SE,
212                              const DataLayout *TD) {
213  // Everything is divisible by one.
214  if (Factor->isOne())
215    return true;
216
217  // x/x == 1.
218  if (S == Factor) {
219    S = SE.getConstant(S->getType(), 1);
220    return true;
221  }
222
223  // For a Constant, check for a multiple of the given factor.
224  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
225    // 0/x == 0.
226    if (C->isZero())
227      return true;
228    // Check for divisibility.
229    if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
230      ConstantInt *CI =
231        ConstantInt::get(SE.getContext(),
232                         C->getValue()->getValue().sdiv(
233                                                   FC->getValue()->getValue()));
234      // If the quotient is zero and the remainder is non-zero, reject
235      // the value at this scale. It will be considered for subsequent
236      // smaller scales.
237      if (!CI->isZero()) {
238        const SCEV *Div = SE.getConstant(CI);
239        S = Div;
240        Remainder =
241          SE.getAddExpr(Remainder,
242                        SE.getConstant(C->getValue()->getValue().srem(
243                                                  FC->getValue()->getValue())));
244        return true;
245      }
246    }
247  }
248
249  // In a Mul, check if there is a constant operand which is a multiple
250  // of the given factor.
251  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
252    if (TD) {
253      // With DataLayout, the size is known. Check if there is a constant
254      // operand which is a multiple of the given factor. If so, we can
255      // factor it.
256      const SCEVConstant *FC = cast<SCEVConstant>(Factor);
257      if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
258        if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
259          SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
260          NewMulOps[0] =
261            SE.getConstant(C->getValue()->getValue().sdiv(
262                                                   FC->getValue()->getValue()));
263          S = SE.getMulExpr(NewMulOps);
264          return true;
265        }
266    } else {
267      // Without DataLayout, check if Factor can be factored out of any of the
268      // Mul's operands. If so, we can just remove it.
269      for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
270        const SCEV *SOp = M->getOperand(i);
271        const SCEV *Remainder = SE.getConstant(SOp->getType(), 0);
272        if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) &&
273            Remainder->isZero()) {
274          SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
275          NewMulOps[i] = SOp;
276          S = SE.getMulExpr(NewMulOps);
277          return true;
278        }
279      }
280    }
281  }
282
283  // In an AddRec, check if both start and step are divisible.
284  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
285    const SCEV *Step = A->getStepRecurrence(SE);
286    const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
287    if (!FactorOutConstant(Step, StepRem, Factor, SE, TD))
288      return false;
289    if (!StepRem->isZero())
290      return false;
291    const SCEV *Start = A->getStart();
292    if (!FactorOutConstant(Start, Remainder, Factor, SE, TD))
293      return false;
294    S = SE.getAddRecExpr(Start, Step, A->getLoop(),
295                         A->getNoWrapFlags(SCEV::FlagNW));
296    return true;
297  }
298
299  return false;
300}
301
302/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
303/// is the number of SCEVAddRecExprs present, which are kept at the end of
304/// the list.
305///
306static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
307                                Type *Ty,
308                                ScalarEvolution &SE) {
309  unsigned NumAddRecs = 0;
310  for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
311    ++NumAddRecs;
312  // Group Ops into non-addrecs and addrecs.
313  SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
314  SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
315  // Let ScalarEvolution sort and simplify the non-addrecs list.
316  const SCEV *Sum = NoAddRecs.empty() ?
317                    SE.getConstant(Ty, 0) :
318                    SE.getAddExpr(NoAddRecs);
319  // If it returned an add, use the operands. Otherwise it simplified
320  // the sum into a single value, so just use that.
321  Ops.clear();
322  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
323    Ops.append(Add->op_begin(), Add->op_end());
324  else if (!Sum->isZero())
325    Ops.push_back(Sum);
326  // Then append the addrecs.
327  Ops.append(AddRecs.begin(), AddRecs.end());
328}
329
330/// SplitAddRecs - Flatten a list of add operands, moving addrec start values
331/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
332/// This helps expose more opportunities for folding parts of the expressions
333/// into GEP indices.
334///
335static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
336                         Type *Ty,
337                         ScalarEvolution &SE) {
338  // Find the addrecs.
339  SmallVector<const SCEV *, 8> AddRecs;
340  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
341    while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
342      const SCEV *Start = A->getStart();
343      if (Start->isZero()) break;
344      const SCEV *Zero = SE.getConstant(Ty, 0);
345      AddRecs.push_back(SE.getAddRecExpr(Zero,
346                                         A->getStepRecurrence(SE),
347                                         A->getLoop(),
348                                         A->getNoWrapFlags(SCEV::FlagNW)));
349      if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
350        Ops[i] = Zero;
351        Ops.append(Add->op_begin(), Add->op_end());
352        e += Add->getNumOperands();
353      } else {
354        Ops[i] = Start;
355      }
356    }
357  if (!AddRecs.empty()) {
358    // Add the addrecs onto the end of the list.
359    Ops.append(AddRecs.begin(), AddRecs.end());
360    // Resort the operand list, moving any constants to the front.
361    SimplifyAddOperands(Ops, Ty, SE);
362  }
363}
364
365/// expandAddToGEP - Expand an addition expression with a pointer type into
366/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
367/// BasicAliasAnalysis and other passes analyze the result. See the rules
368/// for getelementptr vs. inttoptr in
369/// http://llvm.org/docs/LangRef.html#pointeraliasing
370/// for details.
371///
372/// Design note: The correctness of using getelementptr here depends on
373/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
374/// they may introduce pointer arithmetic which may not be safely converted
375/// into getelementptr.
376///
377/// Design note: It might seem desirable for this function to be more
378/// loop-aware. If some of the indices are loop-invariant while others
379/// aren't, it might seem desirable to emit multiple GEPs, keeping the
380/// loop-invariant portions of the overall computation outside the loop.
381/// However, there are a few reasons this is not done here. Hoisting simple
382/// arithmetic is a low-level optimization that often isn't very
383/// important until late in the optimization process. In fact, passes
384/// like InstructionCombining will combine GEPs, even if it means
385/// pushing loop-invariant computation down into loops, so even if the
386/// GEPs were split here, the work would quickly be undone. The
387/// LoopStrengthReduction pass, which is usually run quite late (and
388/// after the last InstructionCombining pass), takes care of hoisting
389/// loop-invariant portions of expressions, after considering what
390/// can be folded using target addressing modes.
391///
392Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
393                                    const SCEV *const *op_end,
394                                    PointerType *PTy,
395                                    Type *Ty,
396                                    Value *V) {
397  Type *ElTy = PTy->getElementType();
398  SmallVector<Value *, 4> GepIndices;
399  SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
400  bool AnyNonZeroIndices = false;
401
402  // Split AddRecs up into parts as either of the parts may be usable
403  // without the other.
404  SplitAddRecs(Ops, Ty, SE);
405
406  Type *IntPtrTy = SE.TD
407                 ? SE.TD->getIntPtrType(PTy)
408                 : Type::getInt64Ty(PTy->getContext());
409
410  // Descend down the pointer's type and attempt to convert the other
411  // operands into GEP indices, at each level. The first index in a GEP
412  // indexes into the array implied by the pointer operand; the rest of
413  // the indices index into the element or field type selected by the
414  // preceding index.
415  for (;;) {
416    // If the scale size is not 0, attempt to factor out a scale for
417    // array indexing.
418    SmallVector<const SCEV *, 8> ScaledOps;
419    if (ElTy->isSized()) {
420      const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
421      if (!ElSize->isZero()) {
422        SmallVector<const SCEV *, 8> NewOps;
423        for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
424          const SCEV *Op = Ops[i];
425          const SCEV *Remainder = SE.getConstant(Ty, 0);
426          if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) {
427            // Op now has ElSize factored out.
428            ScaledOps.push_back(Op);
429            if (!Remainder->isZero())
430              NewOps.push_back(Remainder);
431            AnyNonZeroIndices = true;
432          } else {
433            // The operand was not divisible, so add it to the list of operands
434            // we'll scan next iteration.
435            NewOps.push_back(Ops[i]);
436          }
437        }
438        // If we made any changes, update Ops.
439        if (!ScaledOps.empty()) {
440          Ops = NewOps;
441          SimplifyAddOperands(Ops, Ty, SE);
442        }
443      }
444    }
445
446    // Record the scaled array index for this level of the type. If
447    // we didn't find any operands that could be factored, tentatively
448    // assume that element zero was selected (since the zero offset
449    // would obviously be folded away).
450    Value *Scaled = ScaledOps.empty() ?
451                    Constant::getNullValue(Ty) :
452                    expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
453    GepIndices.push_back(Scaled);
454
455    // Collect struct field index operands.
456    while (StructType *STy = dyn_cast<StructType>(ElTy)) {
457      bool FoundFieldNo = false;
458      // An empty struct has no fields.
459      if (STy->getNumElements() == 0) break;
460      if (SE.TD) {
461        // With DataLayout, field offsets are known. See if a constant offset
462        // falls within any of the struct fields.
463        if (Ops.empty()) break;
464        if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
465          if (SE.getTypeSizeInBits(C->getType()) <= 64) {
466            const StructLayout &SL = *SE.TD->getStructLayout(STy);
467            uint64_t FullOffset = C->getValue()->getZExtValue();
468            if (FullOffset < SL.getSizeInBytes()) {
469              unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
470              GepIndices.push_back(
471                  ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
472              ElTy = STy->getTypeAtIndex(ElIdx);
473              Ops[0] =
474                SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
475              AnyNonZeroIndices = true;
476              FoundFieldNo = true;
477            }
478          }
479      } else {
480        // Without DataLayout, just check for an offsetof expression of the
481        // appropriate struct type.
482        for (unsigned i = 0, e = Ops.size(); i != e; ++i)
483          if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
484            Type *CTy;
485            Constant *FieldNo;
486            if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) {
487              GepIndices.push_back(FieldNo);
488              ElTy =
489                STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue());
490              Ops[i] = SE.getConstant(Ty, 0);
491              AnyNonZeroIndices = true;
492              FoundFieldNo = true;
493              break;
494            }
495          }
496      }
497      // If no struct field offsets were found, tentatively assume that
498      // field zero was selected (since the zero offset would obviously
499      // be folded away).
500      if (!FoundFieldNo) {
501        ElTy = STy->getTypeAtIndex(0u);
502        GepIndices.push_back(
503          Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
504      }
505    }
506
507    if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
508      ElTy = ATy->getElementType();
509    else
510      break;
511  }
512
513  // If none of the operands were convertible to proper GEP indices, cast
514  // the base to i8* and do an ugly getelementptr with that. It's still
515  // better than ptrtoint+arithmetic+inttoptr at least.
516  if (!AnyNonZeroIndices) {
517    // Cast the base to i8*.
518    V = InsertNoopCastOfTo(V,
519       Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
520
521    assert(!isa<Instruction>(V) ||
522           SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint()));
523
524    // Expand the operands for a plain byte offset.
525    Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
526
527    // Fold a GEP with constant operands.
528    if (Constant *CLHS = dyn_cast<Constant>(V))
529      if (Constant *CRHS = dyn_cast<Constant>(Idx))
530        return ConstantExpr::getGetElementPtr(CLHS, CRHS);
531
532    // Do a quick scan to see if we have this GEP nearby.  If so, reuse it.
533    unsigned ScanLimit = 6;
534    BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
535    // Scanning starts from the last instruction before the insertion point.
536    BasicBlock::iterator IP = Builder.GetInsertPoint();
537    if (IP != BlockBegin) {
538      --IP;
539      for (; ScanLimit; --IP, --ScanLimit) {
540        // Don't count dbg.value against the ScanLimit, to avoid perturbing the
541        // generated code.
542        if (isa<DbgInfoIntrinsic>(IP))
543          ScanLimit++;
544        if (IP->getOpcode() == Instruction::GetElementPtr &&
545            IP->getOperand(0) == V && IP->getOperand(1) == Idx)
546          return IP;
547        if (IP == BlockBegin) break;
548      }
549    }
550
551    // Save the original insertion point so we can restore it when we're done.
552    BuilderType::InsertPointGuard Guard(Builder);
553
554    // Move the insertion point out of as many loops as we can.
555    while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
556      if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
557      BasicBlock *Preheader = L->getLoopPreheader();
558      if (!Preheader) break;
559
560      // Ok, move up a level.
561      Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
562    }
563
564    // Emit a GEP.
565    Value *GEP = Builder.CreateGEP(V, Idx, "uglygep");
566    rememberInstruction(GEP);
567
568    return GEP;
569  }
570
571  // Save the original insertion point so we can restore it when we're done.
572  BuilderType::InsertPoint SaveInsertPt = Builder.saveIP();
573
574  // Move the insertion point out of as many loops as we can.
575  while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
576    if (!L->isLoopInvariant(V)) break;
577
578    bool AnyIndexNotLoopInvariant = false;
579    for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(),
580         E = GepIndices.end(); I != E; ++I)
581      if (!L->isLoopInvariant(*I)) {
582        AnyIndexNotLoopInvariant = true;
583        break;
584      }
585    if (AnyIndexNotLoopInvariant)
586      break;
587
588    BasicBlock *Preheader = L->getLoopPreheader();
589    if (!Preheader) break;
590
591    // Ok, move up a level.
592    Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
593  }
594
595  // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
596  // because ScalarEvolution may have changed the address arithmetic to
597  // compute a value which is beyond the end of the allocated object.
598  Value *Casted = V;
599  if (V->getType() != PTy)
600    Casted = InsertNoopCastOfTo(Casted, PTy);
601  Value *GEP = Builder.CreateGEP(Casted,
602                                 GepIndices,
603                                 "scevgep");
604  Ops.push_back(SE.getUnknown(GEP));
605  rememberInstruction(GEP);
606
607  // Restore the original insert point.
608  Builder.restoreIP(SaveInsertPt);
609
610  return expand(SE.getAddExpr(Ops));
611}
612
613/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
614/// SCEV expansion. If they are nested, this is the most nested. If they are
615/// neighboring, pick the later.
616static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
617                                        DominatorTree &DT) {
618  if (!A) return B;
619  if (!B) return A;
620  if (A->contains(B)) return B;
621  if (B->contains(A)) return A;
622  if (DT.dominates(A->getHeader(), B->getHeader())) return B;
623  if (DT.dominates(B->getHeader(), A->getHeader())) return A;
624  return A; // Arbitrarily break the tie.
625}
626
627/// getRelevantLoop - Get the most relevant loop associated with the given
628/// expression, according to PickMostRelevantLoop.
629const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
630  // Test whether we've already computed the most relevant loop for this SCEV.
631  std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair =
632    RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0)));
633  if (!Pair.second)
634    return Pair.first->second;
635
636  if (isa<SCEVConstant>(S))
637    // A constant has no relevant loops.
638    return 0;
639  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
640    if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
641      return Pair.first->second = SE.LI->getLoopFor(I->getParent());
642    // A non-instruction has no relevant loops.
643    return 0;
644  }
645  if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
646    const Loop *L = 0;
647    if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
648      L = AR->getLoop();
649    for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
650         I != E; ++I)
651      L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT);
652    return RelevantLoops[N] = L;
653  }
654  if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
655    const Loop *Result = getRelevantLoop(C->getOperand());
656    return RelevantLoops[C] = Result;
657  }
658  if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
659    const Loop *Result =
660      PickMostRelevantLoop(getRelevantLoop(D->getLHS()),
661                           getRelevantLoop(D->getRHS()),
662                           *SE.DT);
663    return RelevantLoops[D] = Result;
664  }
665  llvm_unreachable("Unexpected SCEV type!");
666}
667
668namespace {
669
670/// LoopCompare - Compare loops by PickMostRelevantLoop.
671class LoopCompare {
672  DominatorTree &DT;
673public:
674  explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
675
676  bool operator()(std::pair<const Loop *, const SCEV *> LHS,
677                  std::pair<const Loop *, const SCEV *> RHS) const {
678    // Keep pointer operands sorted at the end.
679    if (LHS.second->getType()->isPointerTy() !=
680        RHS.second->getType()->isPointerTy())
681      return LHS.second->getType()->isPointerTy();
682
683    // Compare loops with PickMostRelevantLoop.
684    if (LHS.first != RHS.first)
685      return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
686
687    // If one operand is a non-constant negative and the other is not,
688    // put the non-constant negative on the right so that a sub can
689    // be used instead of a negate and add.
690    if (LHS.second->isNonConstantNegative()) {
691      if (!RHS.second->isNonConstantNegative())
692        return false;
693    } else if (RHS.second->isNonConstantNegative())
694      return true;
695
696    // Otherwise they are equivalent according to this comparison.
697    return false;
698  }
699};
700
701}
702
703Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
704  Type *Ty = SE.getEffectiveSCEVType(S->getType());
705
706  // Collect all the add operands in a loop, along with their associated loops.
707  // Iterate in reverse so that constants are emitted last, all else equal, and
708  // so that pointer operands are inserted first, which the code below relies on
709  // to form more involved GEPs.
710  SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
711  for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
712       E(S->op_begin()); I != E; ++I)
713    OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
714
715  // Sort by loop. Use a stable sort so that constants follow non-constants and
716  // pointer operands precede non-pointer operands.
717  std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
718
719  // Emit instructions to add all the operands. Hoist as much as possible
720  // out of loops, and form meaningful getelementptrs where possible.
721  Value *Sum = 0;
722  for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
723       I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
724    const Loop *CurLoop = I->first;
725    const SCEV *Op = I->second;
726    if (!Sum) {
727      // This is the first operand. Just expand it.
728      Sum = expand(Op);
729      ++I;
730    } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
731      // The running sum expression is a pointer. Try to form a getelementptr
732      // at this level with that as the base.
733      SmallVector<const SCEV *, 4> NewOps;
734      for (; I != E && I->first == CurLoop; ++I) {
735        // If the operand is SCEVUnknown and not instructions, peek through
736        // it, to enable more of it to be folded into the GEP.
737        const SCEV *X = I->second;
738        if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
739          if (!isa<Instruction>(U->getValue()))
740            X = SE.getSCEV(U->getValue());
741        NewOps.push_back(X);
742      }
743      Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
744    } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
745      // The running sum is an integer, and there's a pointer at this level.
746      // Try to form a getelementptr. If the running sum is instructions,
747      // use a SCEVUnknown to avoid re-analyzing them.
748      SmallVector<const SCEV *, 4> NewOps;
749      NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
750                                               SE.getSCEV(Sum));
751      for (++I; I != E && I->first == CurLoop; ++I)
752        NewOps.push_back(I->second);
753      Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
754    } else if (Op->isNonConstantNegative()) {
755      // Instead of doing a negate and add, just do a subtract.
756      Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
757      Sum = InsertNoopCastOfTo(Sum, Ty);
758      Sum = InsertBinop(Instruction::Sub, Sum, W);
759      ++I;
760    } else {
761      // A simple add.
762      Value *W = expandCodeFor(Op, Ty);
763      Sum = InsertNoopCastOfTo(Sum, Ty);
764      // Canonicalize a constant to the RHS.
765      if (isa<Constant>(Sum)) std::swap(Sum, W);
766      Sum = InsertBinop(Instruction::Add, Sum, W);
767      ++I;
768    }
769  }
770
771  return Sum;
772}
773
774Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
775  Type *Ty = SE.getEffectiveSCEVType(S->getType());
776
777  // Collect all the mul operands in a loop, along with their associated loops.
778  // Iterate in reverse so that constants are emitted last, all else equal.
779  SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
780  for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
781       E(S->op_begin()); I != E; ++I)
782    OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
783
784  // Sort by loop. Use a stable sort so that constants follow non-constants.
785  std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
786
787  // Emit instructions to mul all the operands. Hoist as much as possible
788  // out of loops.
789  Value *Prod = 0;
790  for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
791       I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
792    const SCEV *Op = I->second;
793    if (!Prod) {
794      // This is the first operand. Just expand it.
795      Prod = expand(Op);
796      ++I;
797    } else if (Op->isAllOnesValue()) {
798      // Instead of doing a multiply by negative one, just do a negate.
799      Prod = InsertNoopCastOfTo(Prod, Ty);
800      Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
801      ++I;
802    } else {
803      // A simple mul.
804      Value *W = expandCodeFor(Op, Ty);
805      Prod = InsertNoopCastOfTo(Prod, Ty);
806      // Canonicalize a constant to the RHS.
807      if (isa<Constant>(Prod)) std::swap(Prod, W);
808      Prod = InsertBinop(Instruction::Mul, Prod, W);
809      ++I;
810    }
811  }
812
813  return Prod;
814}
815
816Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
817  Type *Ty = SE.getEffectiveSCEVType(S->getType());
818
819  Value *LHS = expandCodeFor(S->getLHS(), Ty);
820  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
821    const APInt &RHS = SC->getValue()->getValue();
822    if (RHS.isPowerOf2())
823      return InsertBinop(Instruction::LShr, LHS,
824                         ConstantInt::get(Ty, RHS.logBase2()));
825  }
826
827  Value *RHS = expandCodeFor(S->getRHS(), Ty);
828  return InsertBinop(Instruction::UDiv, LHS, RHS);
829}
830
831/// Move parts of Base into Rest to leave Base with the minimal
832/// expression that provides a pointer operand suitable for a
833/// GEP expansion.
834static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
835                              ScalarEvolution &SE) {
836  while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
837    Base = A->getStart();
838    Rest = SE.getAddExpr(Rest,
839                         SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
840                                          A->getStepRecurrence(SE),
841                                          A->getLoop(),
842                                          A->getNoWrapFlags(SCEV::FlagNW)));
843  }
844  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
845    Base = A->getOperand(A->getNumOperands()-1);
846    SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
847    NewAddOps.back() = Rest;
848    Rest = SE.getAddExpr(NewAddOps);
849    ExposePointerBase(Base, Rest, SE);
850  }
851}
852
853/// Determine if this is a well-behaved chain of instructions leading back to
854/// the PHI. If so, it may be reused by expanded expressions.
855bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
856                                         const Loop *L) {
857  if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
858      (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
859    return false;
860  // If any of the operands don't dominate the insert position, bail.
861  // Addrec operands are always loop-invariant, so this can only happen
862  // if there are instructions which haven't been hoisted.
863  if (L == IVIncInsertLoop) {
864    for (User::op_iterator OI = IncV->op_begin()+1,
865           OE = IncV->op_end(); OI != OE; ++OI)
866      if (Instruction *OInst = dyn_cast<Instruction>(OI))
867        if (!SE.DT->dominates(OInst, IVIncInsertPos))
868          return false;
869  }
870  // Advance to the next instruction.
871  IncV = dyn_cast<Instruction>(IncV->getOperand(0));
872  if (!IncV)
873    return false;
874
875  if (IncV->mayHaveSideEffects())
876    return false;
877
878  if (IncV != PN)
879    return true;
880
881  return isNormalAddRecExprPHI(PN, IncV, L);
882}
883
884/// getIVIncOperand returns an induction variable increment's induction
885/// variable operand.
886///
887/// If allowScale is set, any type of GEP is allowed as long as the nonIV
888/// operands dominate InsertPos.
889///
890/// If allowScale is not set, ensure that a GEP increment conforms to one of the
891/// simple patterns generated by getAddRecExprPHILiterally and
892/// expandAddtoGEP. If the pattern isn't recognized, return NULL.
893Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
894                                           Instruction *InsertPos,
895                                           bool allowScale) {
896  if (IncV == InsertPos)
897    return NULL;
898
899  switch (IncV->getOpcode()) {
900  default:
901    return NULL;
902  // Check for a simple Add/Sub or GEP of a loop invariant step.
903  case Instruction::Add:
904  case Instruction::Sub: {
905    Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
906    if (!OInst || SE.DT->dominates(OInst, InsertPos))
907      return dyn_cast<Instruction>(IncV->getOperand(0));
908    return NULL;
909  }
910  case Instruction::BitCast:
911    return dyn_cast<Instruction>(IncV->getOperand(0));
912  case Instruction::GetElementPtr:
913    for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end();
914         I != E; ++I) {
915      if (isa<Constant>(*I))
916        continue;
917      if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
918        if (!SE.DT->dominates(OInst, InsertPos))
919          return NULL;
920      }
921      if (allowScale) {
922        // allow any kind of GEP as long as it can be hoisted.
923        continue;
924      }
925      // This must be a pointer addition of constants (pretty), which is already
926      // handled, or some number of address-size elements (ugly). Ugly geps
927      // have 2 operands. i1* is used by the expander to represent an
928      // address-size element.
929      if (IncV->getNumOperands() != 2)
930        return NULL;
931      unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
932      if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
933          && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
934        return NULL;
935      break;
936    }
937    return dyn_cast<Instruction>(IncV->getOperand(0));
938  }
939}
940
941/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
942/// it available to other uses in this loop. Recursively hoist any operands,
943/// until we reach a value that dominates InsertPos.
944bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
945  if (SE.DT->dominates(IncV, InsertPos))
946      return true;
947
948  // InsertPos must itself dominate IncV so that IncV's new position satisfies
949  // its existing users.
950  if (isa<PHINode>(InsertPos)
951      || !SE.DT->dominates(InsertPos->getParent(), IncV->getParent()))
952    return false;
953
954  // Check that the chain of IV operands leading back to Phi can be hoisted.
955  SmallVector<Instruction*, 4> IVIncs;
956  for(;;) {
957    Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
958    if (!Oper)
959      return false;
960    // IncV is safe to hoist.
961    IVIncs.push_back(IncV);
962    IncV = Oper;
963    if (SE.DT->dominates(IncV, InsertPos))
964      break;
965  }
966  for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(),
967         E = IVIncs.rend(); I != E; ++I) {
968    (*I)->moveBefore(InsertPos);
969  }
970  return true;
971}
972
973/// Determine if this cyclic phi is in a form that would have been generated by
974/// LSR. We don't care if the phi was actually expanded in this pass, as long
975/// as it is in a low-cost form, for example, no implied multiplication. This
976/// should match any patterns generated by getAddRecExprPHILiterally and
977/// expandAddtoGEP.
978bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
979                                           const Loop *L) {
980  for(Instruction *IVOper = IncV;
981      (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
982                                /*allowScale=*/false));) {
983    if (IVOper == PN)
984      return true;
985  }
986  return false;
987}
988
989/// expandIVInc - Expand an IV increment at Builder's current InsertPos.
990/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
991/// need to materialize IV increments elsewhere to handle difficult situations.
992Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
993                                 Type *ExpandTy, Type *IntTy,
994                                 bool useSubtract) {
995  Value *IncV;
996  // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
997  if (ExpandTy->isPointerTy()) {
998    PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
999    // If the step isn't constant, don't use an implicitly scaled GEP, because
1000    // that would require a multiply inside the loop.
1001    if (!isa<ConstantInt>(StepV))
1002      GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
1003                                  GEPPtrTy->getAddressSpace());
1004    const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
1005    IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
1006    if (IncV->getType() != PN->getType()) {
1007      IncV = Builder.CreateBitCast(IncV, PN->getType());
1008      rememberInstruction(IncV);
1009    }
1010  } else {
1011    IncV = useSubtract ?
1012      Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
1013      Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
1014    rememberInstruction(IncV);
1015  }
1016  return IncV;
1017}
1018
1019/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1020/// the base addrec, which is the addrec without any non-loop-dominating
1021/// values, and return the PHI.
1022PHINode *
1023SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
1024                                        const Loop *L,
1025                                        Type *ExpandTy,
1026                                        Type *IntTy) {
1027  assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
1028
1029  // Reuse a previously-inserted PHI, if present.
1030  BasicBlock *LatchBlock = L->getLoopLatch();
1031  if (LatchBlock) {
1032    for (BasicBlock::iterator I = L->getHeader()->begin();
1033         PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1034      if (!SE.isSCEVable(PN->getType()) ||
1035          (SE.getEffectiveSCEVType(PN->getType()) !=
1036           SE.getEffectiveSCEVType(Normalized->getType())) ||
1037          SE.getSCEV(PN) != Normalized)
1038        continue;
1039
1040      Instruction *IncV =
1041        cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
1042
1043      if (LSRMode) {
1044        if (!isExpandedAddRecExprPHI(PN, IncV, L))
1045          continue;
1046        if (L == IVIncInsertLoop && !hoistIVInc(IncV, IVIncInsertPos))
1047          continue;
1048      }
1049      else {
1050        if (!isNormalAddRecExprPHI(PN, IncV, L))
1051          continue;
1052        if (L == IVIncInsertLoop)
1053          do {
1054            if (SE.DT->dominates(IncV, IVIncInsertPos))
1055              break;
1056            // Make sure the increment is where we want it. But don't move it
1057            // down past a potential existing post-inc user.
1058            IncV->moveBefore(IVIncInsertPos);
1059            IVIncInsertPos = IncV;
1060            IncV = cast<Instruction>(IncV->getOperand(0));
1061          } while (IncV != PN);
1062      }
1063      // Ok, the add recurrence looks usable.
1064      // Remember this PHI, even in post-inc mode.
1065      InsertedValues.insert(PN);
1066      // Remember the increment.
1067      rememberInstruction(IncV);
1068      return PN;
1069    }
1070  }
1071
1072  // Save the original insertion point so we can restore it when we're done.
1073  BuilderType::InsertPointGuard Guard(Builder);
1074
1075  // Another AddRec may need to be recursively expanded below. For example, if
1076  // this AddRec is quadratic, the StepV may itself be an AddRec in this
1077  // loop. Remove this loop from the PostIncLoops set before expanding such
1078  // AddRecs. Otherwise, we cannot find a valid position for the step
1079  // (i.e. StepV can never dominate its loop header).  Ideally, we could do
1080  // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1081  // so it's not worth implementing SmallPtrSet::swap.
1082  PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1083  PostIncLoops.clear();
1084
1085  // Expand code for the start value.
1086  Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
1087                                L->getHeader()->begin());
1088
1089  // StartV must be hoisted into L's preheader to dominate the new phi.
1090  assert(!isa<Instruction>(StartV) ||
1091         SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(),
1092                                  L->getHeader()));
1093
1094  // Expand code for the step value. Do this before creating the PHI so that PHI
1095  // reuse code doesn't see an incomplete PHI.
1096  const SCEV *Step = Normalized->getStepRecurrence(SE);
1097  // If the stride is negative, insert a sub instead of an add for the increment
1098  // (unless it's a constant, because subtracts of constants are canonicalized
1099  // to adds).
1100  bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1101  if (useSubtract)
1102    Step = SE.getNegativeSCEV(Step);
1103  // Expand the step somewhere that dominates the loop header.
1104  Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
1105
1106  // Create the PHI.
1107  BasicBlock *Header = L->getHeader();
1108  Builder.SetInsertPoint(Header, Header->begin());
1109  pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1110  PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1111                                  Twine(IVName) + ".iv");
1112  rememberInstruction(PN);
1113
1114  // Create the step instructions and populate the PHI.
1115  for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1116    BasicBlock *Pred = *HPI;
1117
1118    // Add a start value.
1119    if (!L->contains(Pred)) {
1120      PN->addIncoming(StartV, Pred);
1121      continue;
1122    }
1123
1124    // Create a step value and add it to the PHI.
1125    // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1126    // instructions at IVIncInsertPos.
1127    Instruction *InsertPos = L == IVIncInsertLoop ?
1128      IVIncInsertPos : Pred->getTerminator();
1129    Builder.SetInsertPoint(InsertPos);
1130    Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1131    if (isa<OverflowingBinaryOperator>(IncV)) {
1132      if (Normalized->getNoWrapFlags(SCEV::FlagNUW))
1133        cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1134      if (Normalized->getNoWrapFlags(SCEV::FlagNSW))
1135        cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1136    }
1137    PN->addIncoming(IncV, Pred);
1138  }
1139
1140  // After expanding subexpressions, restore the PostIncLoops set so the caller
1141  // can ensure that IVIncrement dominates the current uses.
1142  PostIncLoops = SavedPostIncLoops;
1143
1144  // Remember this PHI, even in post-inc mode.
1145  InsertedValues.insert(PN);
1146
1147  return PN;
1148}
1149
1150Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1151  Type *STy = S->getType();
1152  Type *IntTy = SE.getEffectiveSCEVType(STy);
1153  const Loop *L = S->getLoop();
1154
1155  // Determine a normalized form of this expression, which is the expression
1156  // before any post-inc adjustment is made.
1157  const SCEVAddRecExpr *Normalized = S;
1158  if (PostIncLoops.count(L)) {
1159    PostIncLoopSet Loops;
1160    Loops.insert(L);
1161    Normalized =
1162      cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0,
1163                                                  Loops, SE, *SE.DT));
1164  }
1165
1166  // Strip off any non-loop-dominating component from the addrec start.
1167  const SCEV *Start = Normalized->getStart();
1168  const SCEV *PostLoopOffset = 0;
1169  if (!SE.properlyDominates(Start, L->getHeader())) {
1170    PostLoopOffset = Start;
1171    Start = SE.getConstant(Normalized->getType(), 0);
1172    Normalized = cast<SCEVAddRecExpr>(
1173      SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1174                       Normalized->getLoop(),
1175                       Normalized->getNoWrapFlags(SCEV::FlagNW)));
1176  }
1177
1178  // Strip off any non-loop-dominating component from the addrec step.
1179  const SCEV *Step = Normalized->getStepRecurrence(SE);
1180  const SCEV *PostLoopScale = 0;
1181  if (!SE.dominates(Step, L->getHeader())) {
1182    PostLoopScale = Step;
1183    Step = SE.getConstant(Normalized->getType(), 1);
1184    Normalized =
1185      cast<SCEVAddRecExpr>(SE.getAddRecExpr(
1186                             Start, Step, Normalized->getLoop(),
1187                             Normalized->getNoWrapFlags(SCEV::FlagNW)));
1188  }
1189
1190  // Expand the core addrec. If we need post-loop scaling, force it to
1191  // expand to an integer type to avoid the need for additional casting.
1192  Type *ExpandTy = PostLoopScale ? IntTy : STy;
1193  PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy);
1194
1195  // Accommodate post-inc mode, if necessary.
1196  Value *Result;
1197  if (!PostIncLoops.count(L))
1198    Result = PN;
1199  else {
1200    // In PostInc mode, use the post-incremented value.
1201    BasicBlock *LatchBlock = L->getLoopLatch();
1202    assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1203    Result = PN->getIncomingValueForBlock(LatchBlock);
1204
1205    // For an expansion to use the postinc form, the client must call
1206    // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1207    // or dominated by IVIncInsertPos.
1208    if (isa<Instruction>(Result)
1209        && !SE.DT->dominates(cast<Instruction>(Result),
1210                             Builder.GetInsertPoint())) {
1211      // The induction variable's postinc expansion does not dominate this use.
1212      // IVUsers tries to prevent this case, so it is rare. However, it can
1213      // happen when an IVUser outside the loop is not dominated by the latch
1214      // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1215      // all cases. Consider a phi outide whose operand is replaced during
1216      // expansion with the value of the postinc user. Without fundamentally
1217      // changing the way postinc users are tracked, the only remedy is
1218      // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1219      // but hopefully expandCodeFor handles that.
1220      bool useSubtract =
1221        !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1222      if (useSubtract)
1223        Step = SE.getNegativeSCEV(Step);
1224      Value *StepV;
1225      {
1226        // Expand the step somewhere that dominates the loop header.
1227        BuilderType::InsertPointGuard Guard(Builder);
1228        StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
1229      }
1230      Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1231    }
1232  }
1233
1234  // Re-apply any non-loop-dominating scale.
1235  if (PostLoopScale) {
1236    assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
1237    Result = InsertNoopCastOfTo(Result, IntTy);
1238    Result = Builder.CreateMul(Result,
1239                               expandCodeFor(PostLoopScale, IntTy));
1240    rememberInstruction(Result);
1241  }
1242
1243  // Re-apply any non-loop-dominating offset.
1244  if (PostLoopOffset) {
1245    if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1246      const SCEV *const OffsetArray[1] = { PostLoopOffset };
1247      Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
1248    } else {
1249      Result = InsertNoopCastOfTo(Result, IntTy);
1250      Result = Builder.CreateAdd(Result,
1251                                 expandCodeFor(PostLoopOffset, IntTy));
1252      rememberInstruction(Result);
1253    }
1254  }
1255
1256  return Result;
1257}
1258
1259Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1260  if (!CanonicalMode) return expandAddRecExprLiterally(S);
1261
1262  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1263  const Loop *L = S->getLoop();
1264
1265  // First check for an existing canonical IV in a suitable type.
1266  PHINode *CanonicalIV = 0;
1267  if (PHINode *PN = L->getCanonicalInductionVariable())
1268    if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1269      CanonicalIV = PN;
1270
1271  // Rewrite an AddRec in terms of the canonical induction variable, if
1272  // its type is more narrow.
1273  if (CanonicalIV &&
1274      SE.getTypeSizeInBits(CanonicalIV->getType()) >
1275      SE.getTypeSizeInBits(Ty)) {
1276    SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1277    for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1278      NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1279    Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1280                                       S->getNoWrapFlags(SCEV::FlagNW)));
1281    BasicBlock::iterator NewInsertPt =
1282      llvm::next(BasicBlock::iterator(cast<Instruction>(V)));
1283    BuilderType::InsertPointGuard Guard(Builder);
1284    while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) ||
1285           isa<LandingPadInst>(NewInsertPt))
1286      ++NewInsertPt;
1287    V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0,
1288                      NewInsertPt);
1289    return V;
1290  }
1291
1292  // {X,+,F} --> X + {0,+,F}
1293  if (!S->getStart()->isZero()) {
1294    SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
1295    NewOps[0] = SE.getConstant(Ty, 0);
1296    const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1297                                        S->getNoWrapFlags(SCEV::FlagNW));
1298
1299    // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1300    // comments on expandAddToGEP for details.
1301    const SCEV *Base = S->getStart();
1302    const SCEV *RestArray[1] = { Rest };
1303    // Dig into the expression to find the pointer base for a GEP.
1304    ExposePointerBase(Base, RestArray[0], SE);
1305    // If we found a pointer, expand the AddRec with a GEP.
1306    if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
1307      // Make sure the Base isn't something exotic, such as a multiplied
1308      // or divided pointer value. In those cases, the result type isn't
1309      // actually a pointer type.
1310      if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
1311        Value *StartV = expand(Base);
1312        assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1313        return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
1314      }
1315    }
1316
1317    // Just do a normal add. Pre-expand the operands to suppress folding.
1318    return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())),
1319                                SE.getUnknown(expand(Rest))));
1320  }
1321
1322  // If we don't yet have a canonical IV, create one.
1323  if (!CanonicalIV) {
1324    // Create and insert the PHI node for the induction variable in the
1325    // specified loop.
1326    BasicBlock *Header = L->getHeader();
1327    pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1328    CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1329                                  Header->begin());
1330    rememberInstruction(CanonicalIV);
1331
1332    SmallSet<BasicBlock *, 4> PredSeen;
1333    Constant *One = ConstantInt::get(Ty, 1);
1334    for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1335      BasicBlock *HP = *HPI;
1336      if (!PredSeen.insert(HP))
1337        continue;
1338
1339      if (L->contains(HP)) {
1340        // Insert a unit add instruction right before the terminator
1341        // corresponding to the back-edge.
1342        Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1343                                                     "indvar.next",
1344                                                     HP->getTerminator());
1345        Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1346        rememberInstruction(Add);
1347        CanonicalIV->addIncoming(Add, HP);
1348      } else {
1349        CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1350      }
1351    }
1352  }
1353
1354  // {0,+,1} --> Insert a canonical induction variable into the loop!
1355  if (S->isAffine() && S->getOperand(1)->isOne()) {
1356    assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1357           "IVs with types different from the canonical IV should "
1358           "already have been handled!");
1359    return CanonicalIV;
1360  }
1361
1362  // {0,+,F} --> {0,+,1} * F
1363
1364  // If this is a simple linear addrec, emit it now as a special case.
1365  if (S->isAffine())    // {0,+,F} --> i*F
1366    return
1367      expand(SE.getTruncateOrNoop(
1368        SE.getMulExpr(SE.getUnknown(CanonicalIV),
1369                      SE.getNoopOrAnyExtend(S->getOperand(1),
1370                                            CanonicalIV->getType())),
1371        Ty));
1372
1373  // If this is a chain of recurrences, turn it into a closed form, using the
1374  // folders, then expandCodeFor the closed form.  This allows the folders to
1375  // simplify the expression without having to build a bunch of special code
1376  // into this folder.
1377  const SCEV *IH = SE.getUnknown(CanonicalIV);   // Get I as a "symbolic" SCEV.
1378
1379  // Promote S up to the canonical IV type, if the cast is foldable.
1380  const SCEV *NewS = S;
1381  const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1382  if (isa<SCEVAddRecExpr>(Ext))
1383    NewS = Ext;
1384
1385  const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1386  //cerr << "Evaluated: " << *this << "\n     to: " << *V << "\n";
1387
1388  // Truncate the result down to the original type, if needed.
1389  const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1390  return expand(T);
1391}
1392
1393Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1394  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1395  Value *V = expandCodeFor(S->getOperand(),
1396                           SE.getEffectiveSCEVType(S->getOperand()->getType()));
1397  Value *I = Builder.CreateTrunc(V, Ty);
1398  rememberInstruction(I);
1399  return I;
1400}
1401
1402Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1403  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1404  Value *V = expandCodeFor(S->getOperand(),
1405                           SE.getEffectiveSCEVType(S->getOperand()->getType()));
1406  Value *I = Builder.CreateZExt(V, Ty);
1407  rememberInstruction(I);
1408  return I;
1409}
1410
1411Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1412  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1413  Value *V = expandCodeFor(S->getOperand(),
1414                           SE.getEffectiveSCEVType(S->getOperand()->getType()));
1415  Value *I = Builder.CreateSExt(V, Ty);
1416  rememberInstruction(I);
1417  return I;
1418}
1419
1420Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1421  Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1422  Type *Ty = LHS->getType();
1423  for (int i = S->getNumOperands()-2; i >= 0; --i) {
1424    // In the case of mixed integer and pointer types, do the
1425    // rest of the comparisons as integer.
1426    if (S->getOperand(i)->getType() != Ty) {
1427      Ty = SE.getEffectiveSCEVType(Ty);
1428      LHS = InsertNoopCastOfTo(LHS, Ty);
1429    }
1430    Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1431    Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1432    rememberInstruction(ICmp);
1433    Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1434    rememberInstruction(Sel);
1435    LHS = Sel;
1436  }
1437  // In the case of mixed integer and pointer types, cast the
1438  // final result back to the pointer type.
1439  if (LHS->getType() != S->getType())
1440    LHS = InsertNoopCastOfTo(LHS, S->getType());
1441  return LHS;
1442}
1443
1444Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1445  Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1446  Type *Ty = LHS->getType();
1447  for (int i = S->getNumOperands()-2; i >= 0; --i) {
1448    // In the case of mixed integer and pointer types, do the
1449    // rest of the comparisons as integer.
1450    if (S->getOperand(i)->getType() != Ty) {
1451      Ty = SE.getEffectiveSCEVType(Ty);
1452      LHS = InsertNoopCastOfTo(LHS, Ty);
1453    }
1454    Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1455    Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1456    rememberInstruction(ICmp);
1457    Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1458    rememberInstruction(Sel);
1459    LHS = Sel;
1460  }
1461  // In the case of mixed integer and pointer types, cast the
1462  // final result back to the pointer type.
1463  if (LHS->getType() != S->getType())
1464    LHS = InsertNoopCastOfTo(LHS, S->getType());
1465  return LHS;
1466}
1467
1468Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
1469                                   Instruction *IP) {
1470  Builder.SetInsertPoint(IP->getParent(), IP);
1471  return expandCodeFor(SH, Ty);
1472}
1473
1474Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
1475  // Expand the code for this SCEV.
1476  Value *V = expand(SH);
1477  if (Ty) {
1478    assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1479           "non-trivial casts should be done with the SCEVs directly!");
1480    V = InsertNoopCastOfTo(V, Ty);
1481  }
1482  return V;
1483}
1484
1485Value *SCEVExpander::expand(const SCEV *S) {
1486  // Compute an insertion point for this SCEV object. Hoist the instructions
1487  // as far out in the loop nest as possible.
1488  Instruction *InsertPt = Builder.GetInsertPoint();
1489  for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ;
1490       L = L->getParentLoop())
1491    if (SE.isLoopInvariant(S, L)) {
1492      if (!L) break;
1493      if (BasicBlock *Preheader = L->getLoopPreheader())
1494        InsertPt = Preheader->getTerminator();
1495      else {
1496        // LSR sets the insertion point for AddRec start/step values to the
1497        // block start to simplify value reuse, even though it's an invalid
1498        // position. SCEVExpander must correct for this in all cases.
1499        InsertPt = L->getHeader()->getFirstInsertionPt();
1500      }
1501    } else {
1502      // If the SCEV is computable at this level, insert it into the header
1503      // after the PHIs (and after any other instructions that we've inserted
1504      // there) so that it is guaranteed to dominate any user inside the loop.
1505      if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1506        InsertPt = L->getHeader()->getFirstInsertionPt();
1507      while (InsertPt != Builder.GetInsertPoint()
1508             && (isInsertedInstruction(InsertPt)
1509                 || isa<DbgInfoIntrinsic>(InsertPt))) {
1510        InsertPt = llvm::next(BasicBlock::iterator(InsertPt));
1511      }
1512      break;
1513    }
1514
1515  // Check to see if we already expanded this here.
1516  std::map<std::pair<const SCEV *, Instruction *>, TrackingVH<Value> >::iterator
1517    I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1518  if (I != InsertedExpressions.end())
1519    return I->second;
1520
1521  BuilderType::InsertPointGuard Guard(Builder);
1522  Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1523
1524  // Expand the expression into instructions.
1525  Value *V = visit(S);
1526
1527  // Remember the expanded value for this SCEV at this location.
1528  //
1529  // This is independent of PostIncLoops. The mapped value simply materializes
1530  // the expression at this insertion point. If the mapped value happened to be
1531  // a postinc expansion, it could be reused by a non postinc user, but only if
1532  // its insertion point was already at the head of the loop.
1533  InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1534  return V;
1535}
1536
1537void SCEVExpander::rememberInstruction(Value *I) {
1538  if (!PostIncLoops.empty())
1539    InsertedPostIncValues.insert(I);
1540  else
1541    InsertedValues.insert(I);
1542}
1543
1544/// getOrInsertCanonicalInductionVariable - This method returns the
1545/// canonical induction variable of the specified type for the specified
1546/// loop (inserting one if there is none).  A canonical induction variable
1547/// starts at zero and steps by one on each iteration.
1548PHINode *
1549SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
1550                                                    Type *Ty) {
1551  assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
1552
1553  // Build a SCEV for {0,+,1}<L>.
1554  // Conservatively use FlagAnyWrap for now.
1555  const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
1556                                   SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
1557
1558  // Emit code for it.
1559  BuilderType::InsertPointGuard Guard(Builder);
1560  PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin()));
1561
1562  return V;
1563}
1564
1565/// Sort values by integer width for replaceCongruentIVs.
1566static bool width_descending(Value *lhs, Value *rhs) {
1567  // Put pointers at the back and make sure pointer < pointer = false.
1568  if (!lhs->getType()->isIntegerTy() || !rhs->getType()->isIntegerTy())
1569    return rhs->getType()->isIntegerTy() && !lhs->getType()->isIntegerTy();
1570  return rhs->getType()->getPrimitiveSizeInBits()
1571    < lhs->getType()->getPrimitiveSizeInBits();
1572}
1573
1574/// replaceCongruentIVs - Check for congruent phis in this loop header and
1575/// replace them with their most canonical representative. Return the number of
1576/// phis eliminated.
1577///
1578/// This does not depend on any SCEVExpander state but should be used in
1579/// the same context that SCEVExpander is used.
1580unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
1581                                           SmallVectorImpl<WeakVH> &DeadInsts,
1582                                           const TargetTransformInfo *TTI) {
1583  // Find integer phis in order of increasing width.
1584  SmallVector<PHINode*, 8> Phis;
1585  for (BasicBlock::iterator I = L->getHeader()->begin();
1586       PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
1587    Phis.push_back(Phi);
1588  }
1589  if (TTI)
1590    std::sort(Phis.begin(), Phis.end(), width_descending);
1591
1592  unsigned NumElim = 0;
1593  DenseMap<const SCEV *, PHINode *> ExprToIVMap;
1594  // Process phis from wide to narrow. Mapping wide phis to the their truncation
1595  // so narrow phis can reuse them.
1596  for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(),
1597         PEnd = Phis.end(); PIter != PEnd; ++PIter) {
1598    PHINode *Phi = *PIter;
1599
1600    // Fold constant phis. They may be congruent to other constant phis and
1601    // would confuse the logic below that expects proper IVs.
1602    if (Value *V = Phi->hasConstantValue()) {
1603      Phi->replaceAllUsesWith(V);
1604      DeadInsts.push_back(Phi);
1605      ++NumElim;
1606      DEBUG_WITH_TYPE(DebugType, dbgs()
1607                      << "INDVARS: Eliminated constant iv: " << *Phi << '\n');
1608      continue;
1609    }
1610
1611    if (!SE.isSCEVable(Phi->getType()))
1612      continue;
1613
1614    PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1615    if (!OrigPhiRef) {
1616      OrigPhiRef = Phi;
1617      if (Phi->getType()->isIntegerTy() && TTI
1618          && TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
1619        // This phi can be freely truncated to the narrowest phi type. Map the
1620        // truncated expression to it so it will be reused for narrow types.
1621        const SCEV *TruncExpr =
1622          SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
1623        ExprToIVMap[TruncExpr] = Phi;
1624      }
1625      continue;
1626    }
1627
1628    // Replacing a pointer phi with an integer phi or vice-versa doesn't make
1629    // sense.
1630    if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
1631      continue;
1632
1633    if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1634      Instruction *OrigInc =
1635        cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock));
1636      Instruction *IsomorphicInc =
1637        cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1638
1639      // If this phi has the same width but is more canonical, replace the
1640      // original with it. As part of the "more canonical" determination,
1641      // respect a prior decision to use an IV chain.
1642      if (OrigPhiRef->getType() == Phi->getType()
1643          && !(ChainedPhis.count(Phi)
1644               || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L))
1645          && (ChainedPhis.count(Phi)
1646              || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1647        std::swap(OrigPhiRef, Phi);
1648        std::swap(OrigInc, IsomorphicInc);
1649      }
1650      // Replacing the congruent phi is sufficient because acyclic redundancy
1651      // elimination, CSE/GVN, should handle the rest. However, once SCEV proves
1652      // that a phi is congruent, it's often the head of an IV user cycle that
1653      // is isomorphic with the original phi. It's worth eagerly cleaning up the
1654      // common case of a single IV increment so that DeleteDeadPHIs can remove
1655      // cycles that had postinc uses.
1656      const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc),
1657                                                   IsomorphicInc->getType());
1658      if (OrigInc != IsomorphicInc
1659          && TruncExpr == SE.getSCEV(IsomorphicInc)
1660          && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc))
1661              || hoistIVInc(OrigInc, IsomorphicInc))) {
1662        DEBUG_WITH_TYPE(DebugType, dbgs()
1663                        << "INDVARS: Eliminated congruent iv.inc: "
1664                        << *IsomorphicInc << '\n');
1665        Value *NewInc = OrigInc;
1666        if (OrigInc->getType() != IsomorphicInc->getType()) {
1667          Instruction *IP = isa<PHINode>(OrigInc)
1668            ? (Instruction*)L->getHeader()->getFirstInsertionPt()
1669            : OrigInc->getNextNode();
1670          IRBuilder<> Builder(IP);
1671          Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
1672          NewInc = Builder.
1673            CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
1674        }
1675        IsomorphicInc->replaceAllUsesWith(NewInc);
1676        DeadInsts.push_back(IsomorphicInc);
1677      }
1678    }
1679    DEBUG_WITH_TYPE(DebugType, dbgs()
1680                    << "INDVARS: Eliminated congruent iv: " << *Phi << '\n');
1681    ++NumElim;
1682    Value *NewIV = OrigPhiRef;
1683    if (OrigPhiRef->getType() != Phi->getType()) {
1684      IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt());
1685      Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1686      NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1687    }
1688    Phi->replaceAllUsesWith(NewIV);
1689    DeadInsts.push_back(Phi);
1690  }
1691  return NumElim;
1692}
1693
1694namespace {
1695// Search for a SCEV subexpression that is not safe to expand.  Any expression
1696// that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
1697// UDiv expressions. We don't know if the UDiv is derived from an IR divide
1698// instruction, but the important thing is that we prove the denominator is
1699// nonzero before expansion.
1700//
1701// IVUsers already checks that IV-derived expressions are safe. So this check is
1702// only needed when the expression includes some subexpression that is not IV
1703// derived.
1704//
1705// Currently, we only allow division by a nonzero constant here. If this is
1706// inadequate, we could easily allow division by SCEVUnknown by using
1707// ValueTracking to check isKnownNonZero().
1708//
1709// We cannot generally expand recurrences unless the step dominates the loop
1710// header. The expander handles the special case of affine recurrences by
1711// scaling the recurrence outside the loop, but this technique isn't generally
1712// applicable. Expanding a nested recurrence outside a loop requires computing
1713// binomial coefficients. This could be done, but the recurrence has to be in a
1714// perfectly reduced form, which can't be guaranteed.
1715struct SCEVFindUnsafe {
1716  ScalarEvolution &SE;
1717  bool IsUnsafe;
1718
1719  SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {}
1720
1721  bool follow(const SCEV *S) {
1722    if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
1723      const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
1724      if (!SC || SC->getValue()->isZero()) {
1725        IsUnsafe = true;
1726        return false;
1727      }
1728    }
1729    if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
1730      const SCEV *Step = AR->getStepRecurrence(SE);
1731      if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
1732        IsUnsafe = true;
1733        return false;
1734      }
1735    }
1736    return true;
1737  }
1738  bool isDone() const { return IsUnsafe; }
1739};
1740}
1741
1742namespace llvm {
1743bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
1744  SCEVFindUnsafe Search(SE);
1745  visitAll(S, Search);
1746  return !Search.IsUnsafe;
1747}
1748}
1749