1193323Sed//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===// 2193323Sed// 3193323Sed// The LLVM Compiler Infrastructure 4193323Sed// 5193323Sed// This file is distributed under the University of Illinois Open Source 6193323Sed// License. See LICENSE.TXT for details. 7193323Sed// 8193323Sed//===----------------------------------------------------------------------===// 9193323Sed// 10193323Sed// This file contains the implementation of the scalar evolution expander, 11193323Sed// which is used to generate the code corresponding to a given scalar evolution 12193323Sed// expression. 13193323Sed// 14193323Sed//===----------------------------------------------------------------------===// 15193323Sed 16193323Sed#include "llvm/Analysis/ScalarEvolutionExpander.h" 17263509Sdim#include "llvm/ADT/SmallSet.h" 18252723Sdim#include "llvm/ADT/STLExtras.h" 19193323Sed#include "llvm/Analysis/LoopInfo.h" 20252723Sdim#include "llvm/Analysis/TargetTransformInfo.h" 21252723Sdim#include "llvm/IR/DataLayout.h" 22252723Sdim#include "llvm/IR/IntrinsicInst.h" 23252723Sdim#include "llvm/IR/LLVMContext.h" 24226890Sdim#include "llvm/Support/Debug.h" 25224145Sdim 26193323Sedusing namespace llvm; 27193323Sed 28210299Sed/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP, 29210299Sed/// reusing an existing cast if a suitable one exists, moving an existing 30210299Sed/// cast if a suitable one exists but isn't in the right place, or 31210299Sed/// creating a new one. 32226890SdimValue *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty, 33210299Sed Instruction::CastOps Op, 34210299Sed BasicBlock::iterator IP) { 35235633Sdim // This function must be called with the builder having a valid insertion 36235633Sdim // point. It doesn't need to be the actual IP where the uses of the returned 37235633Sdim // cast will be added, but it must dominate such IP. 38235633Sdim // We use this precondition to produce a cast that will dominate all its 39235633Sdim // uses. In particular, this is crucial for the case where the builder's 40235633Sdim // insertion point *is* the point where we were asked to put the cast. 41245431Sdim // Since we don't know the builder's insertion point is actually 42235633Sdim // where the uses will be added (only that it dominates it), we are 43235633Sdim // not allowed to move it. 44235633Sdim BasicBlock::iterator BIP = Builder.GetInsertPoint(); 45235633Sdim 46235633Sdim Instruction *Ret = NULL; 47235633Sdim 48210299Sed // Check to see if there is already a cast! 49210299Sed for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); 50210299Sed UI != E; ++UI) { 51210299Sed User *U = *UI; 52210299Sed if (U->getType() == Ty) 53210299Sed if (CastInst *CI = dyn_cast<CastInst>(U)) 54210299Sed if (CI->getOpcode() == Op) { 55235633Sdim // If the cast isn't where we want it, create a new cast at IP. 56235633Sdim // Likewise, do not reuse a cast at BIP because it must dominate 57235633Sdim // instructions that might be inserted before BIP. 58235633Sdim if (BasicBlock::iterator(CI) != IP || BIP == IP) { 59210299Sed // Create a new cast, and leave the old cast in place in case 60210299Sed // it is being used as an insert point. Clear its operand 61210299Sed // so that it doesn't hold anything live. 62235633Sdim Ret = CastInst::Create(Op, V, Ty, "", IP); 63235633Sdim Ret->takeName(CI); 64235633Sdim CI->replaceAllUsesWith(Ret); 65210299Sed CI->setOperand(0, UndefValue::get(V->getType())); 66235633Sdim break; 67210299Sed } 68235633Sdim Ret = CI; 69235633Sdim break; 70210299Sed } 71210299Sed } 72210299Sed 73210299Sed // Create a new cast. 74235633Sdim if (!Ret) 75235633Sdim Ret = CastInst::Create(Op, V, Ty, V->getName(), IP); 76235633Sdim 77235633Sdim // We assert at the end of the function since IP might point to an 78235633Sdim // instruction with different dominance properties than a cast 79235633Sdim // (an invoke for example) and not dominate BIP (but the cast does). 80235633Sdim assert(SE.DT->dominates(Ret, BIP)); 81235633Sdim 82235633Sdim rememberInstruction(Ret); 83235633Sdim return Ret; 84210299Sed} 85210299Sed 86195340Sed/// InsertNoopCastOfTo - Insert a cast of V to the specified type, 87195340Sed/// which must be possible with a noop cast, doing what we can to share 88195340Sed/// the casts. 89226890SdimValue *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) { 90195340Sed Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false); 91195340Sed assert((Op == Instruction::BitCast || 92195340Sed Op == Instruction::PtrToInt || 93195340Sed Op == Instruction::IntToPtr) && 94195340Sed "InsertNoopCastOfTo cannot perform non-noop casts!"); 95195340Sed assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) && 96195340Sed "InsertNoopCastOfTo cannot change sizes!"); 97195340Sed 98193323Sed // Short-circuit unnecessary bitcasts. 99235633Sdim if (Op == Instruction::BitCast) { 100235633Sdim if (V->getType() == Ty) 101235633Sdim return V; 102235633Sdim if (CastInst *CI = dyn_cast<CastInst>(V)) { 103235633Sdim if (CI->getOperand(0)->getType() == Ty) 104235633Sdim return CI->getOperand(0); 105235633Sdim } 106235633Sdim } 107193323Sed // Short-circuit unnecessary inttoptr<->ptrtoint casts. 108195340Sed if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) && 109193323Sed SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { 110193323Sed if (CastInst *CI = dyn_cast<CastInst>(V)) 111193323Sed if ((CI->getOpcode() == Instruction::PtrToInt || 112193323Sed CI->getOpcode() == Instruction::IntToPtr) && 113193323Sed SE.getTypeSizeInBits(CI->getType()) == 114193323Sed SE.getTypeSizeInBits(CI->getOperand(0)->getType())) 115193323Sed return CI->getOperand(0); 116193323Sed if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 117193323Sed if ((CE->getOpcode() == Instruction::PtrToInt || 118193323Sed CE->getOpcode() == Instruction::IntToPtr) && 119193323Sed SE.getTypeSizeInBits(CE->getType()) == 120193323Sed SE.getTypeSizeInBits(CE->getOperand(0)->getType())) 121193323Sed return CE->getOperand(0); 122193323Sed } 123193323Sed 124210299Sed // Fold a cast of a constant. 125193323Sed if (Constant *C = dyn_cast<Constant>(V)) 126195340Sed return ConstantExpr::getCast(Op, C, Ty); 127198090Srdivacky 128210299Sed // Cast the argument at the beginning of the entry block, after 129210299Sed // any bitcasts of other arguments. 130193323Sed if (Argument *A = dyn_cast<Argument>(V)) { 131210299Sed BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin(); 132210299Sed while ((isa<BitCastInst>(IP) && 133210299Sed isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) && 134210299Sed cast<BitCastInst>(IP)->getOperand(0) != A) || 135226890Sdim isa<DbgInfoIntrinsic>(IP) || 136226890Sdim isa<LandingPadInst>(IP)) 137210299Sed ++IP; 138210299Sed return ReuseOrCreateCast(A, Ty, Op, IP); 139193323Sed } 140193323Sed 141210299Sed // Cast the instruction immediately after the instruction. 142193323Sed Instruction *I = cast<Instruction>(V); 143193323Sed BasicBlock::iterator IP = I; ++IP; 144193323Sed if (InvokeInst *II = dyn_cast<InvokeInst>(I)) 145193323Sed IP = II->getNormalDest()->begin(); 146235633Sdim while (isa<PHINode>(IP) || isa<LandingPadInst>(IP)) 147226890Sdim ++IP; 148210299Sed return ReuseOrCreateCast(I, Ty, Op, IP); 149193323Sed} 150193323Sed 151193323Sed/// InsertBinop - Insert the specified binary operator, doing a small amount 152193323Sed/// of work to avoid inserting an obviously redundant operation. 153195340SedValue *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, 154195340Sed Value *LHS, Value *RHS) { 155193323Sed // Fold a binop with constant operands. 156193323Sed if (Constant *CLHS = dyn_cast<Constant>(LHS)) 157193323Sed if (Constant *CRHS = dyn_cast<Constant>(RHS)) 158193323Sed return ConstantExpr::get(Opcode, CLHS, CRHS); 159193323Sed 160193323Sed // Do a quick scan to see if we have this binop nearby. If so, reuse it. 161193323Sed unsigned ScanLimit = 6; 162195340Sed BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 163195340Sed // Scanning starts from the last instruction before the insertion point. 164195340Sed BasicBlock::iterator IP = Builder.GetInsertPoint(); 165195340Sed if (IP != BlockBegin) { 166193323Sed --IP; 167193323Sed for (; ScanLimit; --IP, --ScanLimit) { 168204792Srdivacky // Don't count dbg.value against the ScanLimit, to avoid perturbing the 169204792Srdivacky // generated code. 170204792Srdivacky if (isa<DbgInfoIntrinsic>(IP)) 171204792Srdivacky ScanLimit++; 172193323Sed if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && 173193323Sed IP->getOperand(1) == RHS) 174193323Sed return IP; 175193323Sed if (IP == BlockBegin) break; 176193323Sed } 177193323Sed } 178195340Sed 179204642Srdivacky // Save the original insertion point so we can restore it when we're done. 180263509Sdim DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc(); 181263509Sdim BuilderType::InsertPointGuard Guard(Builder); 182204642Srdivacky 183204642Srdivacky // Move the insertion point out of as many loops as we can. 184204642Srdivacky while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 185204642Srdivacky if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break; 186204642Srdivacky BasicBlock *Preheader = L->getLoopPreheader(); 187204642Srdivacky if (!Preheader) break; 188204642Srdivacky 189204642Srdivacky // Ok, move up a level. 190204642Srdivacky Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 191204642Srdivacky } 192204642Srdivacky 193193323Sed // If we haven't found this binop, insert it. 194226890Sdim Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS)); 195263509Sdim BO->setDebugLoc(Loc); 196202878Srdivacky rememberInstruction(BO); 197204642Srdivacky 198193323Sed return BO; 199193323Sed} 200193323Sed 201193323Sed/// FactorOutConstant - Test if S is divisible by Factor, using signed 202193323Sed/// division. If so, update S with Factor divided out and return true. 203204642Srdivacky/// S need not be evenly divisible if a reasonable remainder can be 204193323Sed/// computed. 205193323Sed/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made 206193323Sed/// unnecessary; in its place, just signed-divide Ops[i] by the scale and 207193323Sed/// check to see if the divide was folded. 208198090Srdivackystatic bool FactorOutConstant(const SCEV *&S, 209198090Srdivacky const SCEV *&Remainder, 210198090Srdivacky const SCEV *Factor, 211198090Srdivacky ScalarEvolution &SE, 212245431Sdim const DataLayout *TD) { 213193323Sed // Everything is divisible by one. 214198090Srdivacky if (Factor->isOne()) 215193323Sed return true; 216193323Sed 217198090Srdivacky // x/x == 1. 218198090Srdivacky if (S == Factor) { 219207618Srdivacky S = SE.getConstant(S->getType(), 1); 220198090Srdivacky return true; 221198090Srdivacky } 222198090Srdivacky 223193323Sed // For a Constant, check for a multiple of the given factor. 224193323Sed if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 225198090Srdivacky // 0/x == 0. 226198090Srdivacky if (C->isZero()) 227193323Sed return true; 228198090Srdivacky // Check for divisibility. 229198090Srdivacky if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) { 230198090Srdivacky ConstantInt *CI = 231198090Srdivacky ConstantInt::get(SE.getContext(), 232198090Srdivacky C->getValue()->getValue().sdiv( 233198090Srdivacky FC->getValue()->getValue())); 234198090Srdivacky // If the quotient is zero and the remainder is non-zero, reject 235198090Srdivacky // the value at this scale. It will be considered for subsequent 236198090Srdivacky // smaller scales. 237198090Srdivacky if (!CI->isZero()) { 238198090Srdivacky const SCEV *Div = SE.getConstant(CI); 239198090Srdivacky S = Div; 240198090Srdivacky Remainder = 241198090Srdivacky SE.getAddExpr(Remainder, 242198090Srdivacky SE.getConstant(C->getValue()->getValue().srem( 243198090Srdivacky FC->getValue()->getValue()))); 244198090Srdivacky return true; 245198090Srdivacky } 246193323Sed } 247193323Sed } 248193323Sed 249193323Sed // In a Mul, check if there is a constant operand which is a multiple 250193323Sed // of the given factor. 251198090Srdivacky if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 252198090Srdivacky if (TD) { 253245431Sdim // With DataLayout, the size is known. Check if there is a constant 254198090Srdivacky // operand which is a multiple of the given factor. If so, we can 255198090Srdivacky // factor it. 256198090Srdivacky const SCEVConstant *FC = cast<SCEVConstant>(Factor); 257198090Srdivacky if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0))) 258198090Srdivacky if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) { 259205407Srdivacky SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 260198090Srdivacky NewMulOps[0] = 261198090Srdivacky SE.getConstant(C->getValue()->getValue().sdiv( 262198090Srdivacky FC->getValue()->getValue())); 263198090Srdivacky S = SE.getMulExpr(NewMulOps); 264198090Srdivacky return true; 265198090Srdivacky } 266198090Srdivacky } else { 267245431Sdim // Without DataLayout, check if Factor can be factored out of any of the 268198090Srdivacky // Mul's operands. If so, we can just remove it. 269198090Srdivacky for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 270198090Srdivacky const SCEV *SOp = M->getOperand(i); 271207618Srdivacky const SCEV *Remainder = SE.getConstant(SOp->getType(), 0); 272198090Srdivacky if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) && 273198090Srdivacky Remainder->isZero()) { 274205407Srdivacky SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 275198090Srdivacky NewMulOps[i] = SOp; 276198090Srdivacky S = SE.getMulExpr(NewMulOps); 277198090Srdivacky return true; 278198090Srdivacky } 279193323Sed } 280198090Srdivacky } 281198090Srdivacky } 282193323Sed 283193323Sed // In an AddRec, check if both start and step are divisible. 284193323Sed if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 285198090Srdivacky const SCEV *Step = A->getStepRecurrence(SE); 286207618Srdivacky const SCEV *StepRem = SE.getConstant(Step->getType(), 0); 287198090Srdivacky if (!FactorOutConstant(Step, StepRem, Factor, SE, TD)) 288193323Sed return false; 289193323Sed if (!StepRem->isZero()) 290193323Sed return false; 291198090Srdivacky const SCEV *Start = A->getStart(); 292198090Srdivacky if (!FactorOutConstant(Start, Remainder, Factor, SE, TD)) 293193323Sed return false; 294263509Sdim S = SE.getAddRecExpr(Start, Step, A->getLoop(), 295263509Sdim A->getNoWrapFlags(SCEV::FlagNW)); 296193323Sed return true; 297193323Sed } 298193323Sed 299193323Sed return false; 300193323Sed} 301193323Sed 302198090Srdivacky/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs 303198090Srdivacky/// is the number of SCEVAddRecExprs present, which are kept at the end of 304198090Srdivacky/// the list. 305193323Sed/// 306198090Srdivackystatic void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops, 307226890Sdim Type *Ty, 308198090Srdivacky ScalarEvolution &SE) { 309198090Srdivacky unsigned NumAddRecs = 0; 310198090Srdivacky for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i) 311198090Srdivacky ++NumAddRecs; 312198090Srdivacky // Group Ops into non-addrecs and addrecs. 313198090Srdivacky SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs); 314198090Srdivacky SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end()); 315198090Srdivacky // Let ScalarEvolution sort and simplify the non-addrecs list. 316198090Srdivacky const SCEV *Sum = NoAddRecs.empty() ? 317207618Srdivacky SE.getConstant(Ty, 0) : 318198090Srdivacky SE.getAddExpr(NoAddRecs); 319198090Srdivacky // If it returned an add, use the operands. Otherwise it simplified 320198090Srdivacky // the sum into a single value, so just use that. 321205407Srdivacky Ops.clear(); 322198090Srdivacky if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum)) 323210299Sed Ops.append(Add->op_begin(), Add->op_end()); 324205407Srdivacky else if (!Sum->isZero()) 325205407Srdivacky Ops.push_back(Sum); 326198090Srdivacky // Then append the addrecs. 327210299Sed Ops.append(AddRecs.begin(), AddRecs.end()); 328198090Srdivacky} 329198090Srdivacky 330198090Srdivacky/// SplitAddRecs - Flatten a list of add operands, moving addrec start values 331198090Srdivacky/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}. 332198090Srdivacky/// This helps expose more opportunities for folding parts of the expressions 333198090Srdivacky/// into GEP indices. 334198090Srdivacky/// 335198090Srdivackystatic void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops, 336226890Sdim Type *Ty, 337198090Srdivacky ScalarEvolution &SE) { 338198090Srdivacky // Find the addrecs. 339198090Srdivacky SmallVector<const SCEV *, 8> AddRecs; 340198090Srdivacky for (unsigned i = 0, e = Ops.size(); i != e; ++i) 341198090Srdivacky while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) { 342198090Srdivacky const SCEV *Start = A->getStart(); 343198090Srdivacky if (Start->isZero()) break; 344207618Srdivacky const SCEV *Zero = SE.getConstant(Ty, 0); 345198090Srdivacky AddRecs.push_back(SE.getAddRecExpr(Zero, 346198090Srdivacky A->getStepRecurrence(SE), 347221345Sdim A->getLoop(), 348263509Sdim A->getNoWrapFlags(SCEV::FlagNW))); 349198090Srdivacky if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) { 350198090Srdivacky Ops[i] = Zero; 351210299Sed Ops.append(Add->op_begin(), Add->op_end()); 352198090Srdivacky e += Add->getNumOperands(); 353198090Srdivacky } else { 354198090Srdivacky Ops[i] = Start; 355198090Srdivacky } 356198090Srdivacky } 357198090Srdivacky if (!AddRecs.empty()) { 358198090Srdivacky // Add the addrecs onto the end of the list. 359210299Sed Ops.append(AddRecs.begin(), AddRecs.end()); 360198090Srdivacky // Resort the operand list, moving any constants to the front. 361198090Srdivacky SimplifyAddOperands(Ops, Ty, SE); 362198090Srdivacky } 363198090Srdivacky} 364198090Srdivacky 365198090Srdivacky/// expandAddToGEP - Expand an addition expression with a pointer type into 366198090Srdivacky/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps 367198090Srdivacky/// BasicAliasAnalysis and other passes analyze the result. See the rules 368198090Srdivacky/// for getelementptr vs. inttoptr in 369198090Srdivacky/// http://llvm.org/docs/LangRef.html#pointeraliasing 370198090Srdivacky/// for details. 371198090Srdivacky/// 372202878Srdivacky/// Design note: The correctness of using getelementptr here depends on 373198090Srdivacky/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as 374198090Srdivacky/// they may introduce pointer arithmetic which may not be safely converted 375198090Srdivacky/// into getelementptr. 376198090Srdivacky/// 377193323Sed/// Design note: It might seem desirable for this function to be more 378193323Sed/// loop-aware. If some of the indices are loop-invariant while others 379193323Sed/// aren't, it might seem desirable to emit multiple GEPs, keeping the 380193323Sed/// loop-invariant portions of the overall computation outside the loop. 381193323Sed/// However, there are a few reasons this is not done here. Hoisting simple 382193323Sed/// arithmetic is a low-level optimization that often isn't very 383193323Sed/// important until late in the optimization process. In fact, passes 384193323Sed/// like InstructionCombining will combine GEPs, even if it means 385193323Sed/// pushing loop-invariant computation down into loops, so even if the 386193323Sed/// GEPs were split here, the work would quickly be undone. The 387193323Sed/// LoopStrengthReduction pass, which is usually run quite late (and 388193323Sed/// after the last InstructionCombining pass), takes care of hoisting 389193323Sed/// loop-invariant portions of expressions, after considering what 390193323Sed/// can be folded using target addressing modes. 391193323Sed/// 392198090SrdivackyValue *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, 393198090Srdivacky const SCEV *const *op_end, 394226890Sdim PointerType *PTy, 395226890Sdim Type *Ty, 396193323Sed Value *V) { 397226890Sdim Type *ElTy = PTy->getElementType(); 398193323Sed SmallVector<Value *, 4> GepIndices; 399198090Srdivacky SmallVector<const SCEV *, 8> Ops(op_begin, op_end); 400193323Sed bool AnyNonZeroIndices = false; 401193323Sed 402198090Srdivacky // Split AddRecs up into parts as either of the parts may be usable 403198090Srdivacky // without the other. 404198090Srdivacky SplitAddRecs(Ops, Ty, SE); 405198090Srdivacky 406263509Sdim Type *IntPtrTy = SE.TD 407263509Sdim ? SE.TD->getIntPtrType(PTy) 408263509Sdim : Type::getInt64Ty(PTy->getContext()); 409263509Sdim 410200581Srdivacky // Descend down the pointer's type and attempt to convert the other 411193323Sed // operands into GEP indices, at each level. The first index in a GEP 412193323Sed // indexes into the array implied by the pointer operand; the rest of 413193323Sed // the indices index into the element or field type selected by the 414193323Sed // preceding index. 415193323Sed for (;;) { 416198090Srdivacky // If the scale size is not 0, attempt to factor out a scale for 417198090Srdivacky // array indexing. 418198090Srdivacky SmallVector<const SCEV *, 8> ScaledOps; 419203954Srdivacky if (ElTy->isSized()) { 420263509Sdim const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy); 421203954Srdivacky if (!ElSize->isZero()) { 422203954Srdivacky SmallVector<const SCEV *, 8> NewOps; 423203954Srdivacky for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 424203954Srdivacky const SCEV *Op = Ops[i]; 425207618Srdivacky const SCEV *Remainder = SE.getConstant(Ty, 0); 426203954Srdivacky if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) { 427203954Srdivacky // Op now has ElSize factored out. 428203954Srdivacky ScaledOps.push_back(Op); 429203954Srdivacky if (!Remainder->isZero()) 430203954Srdivacky NewOps.push_back(Remainder); 431203954Srdivacky AnyNonZeroIndices = true; 432203954Srdivacky } else { 433203954Srdivacky // The operand was not divisible, so add it to the list of operands 434203954Srdivacky // we'll scan next iteration. 435203954Srdivacky NewOps.push_back(Ops[i]); 436203954Srdivacky } 437193323Sed } 438203954Srdivacky // If we made any changes, update Ops. 439203954Srdivacky if (!ScaledOps.empty()) { 440203954Srdivacky Ops = NewOps; 441203954Srdivacky SimplifyAddOperands(Ops, Ty, SE); 442203954Srdivacky } 443193323Sed } 444193323Sed } 445198090Srdivacky 446198090Srdivacky // Record the scaled array index for this level of the type. If 447198090Srdivacky // we didn't find any operands that could be factored, tentatively 448198090Srdivacky // assume that element zero was selected (since the zero offset 449198090Srdivacky // would obviously be folded away). 450193323Sed Value *Scaled = ScaledOps.empty() ? 451193323Sed Constant::getNullValue(Ty) : 452193323Sed expandCodeFor(SE.getAddExpr(ScaledOps), Ty); 453193323Sed GepIndices.push_back(Scaled); 454193323Sed 455193323Sed // Collect struct field index operands. 456226890Sdim while (StructType *STy = dyn_cast<StructType>(ElTy)) { 457198090Srdivacky bool FoundFieldNo = false; 458198090Srdivacky // An empty struct has no fields. 459198090Srdivacky if (STy->getNumElements() == 0) break; 460198090Srdivacky if (SE.TD) { 461245431Sdim // With DataLayout, field offsets are known. See if a constant offset 462198090Srdivacky // falls within any of the struct fields. 463198090Srdivacky if (Ops.empty()) break; 464193323Sed if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) 465193323Sed if (SE.getTypeSizeInBits(C->getType()) <= 64) { 466193323Sed const StructLayout &SL = *SE.TD->getStructLayout(STy); 467193323Sed uint64_t FullOffset = C->getValue()->getZExtValue(); 468193323Sed if (FullOffset < SL.getSizeInBytes()) { 469193323Sed unsigned ElIdx = SL.getElementContainingOffset(FullOffset); 470198090Srdivacky GepIndices.push_back( 471198090Srdivacky ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx)); 472193323Sed ElTy = STy->getTypeAtIndex(ElIdx); 473193323Sed Ops[0] = 474194612Sed SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx)); 475193323Sed AnyNonZeroIndices = true; 476198090Srdivacky FoundFieldNo = true; 477193323Sed } 478193323Sed } 479198090Srdivacky } else { 480245431Sdim // Without DataLayout, just check for an offsetof expression of the 481198090Srdivacky // appropriate struct type. 482198090Srdivacky for (unsigned i = 0, e = Ops.size(); i != e; ++i) 483203954Srdivacky if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) { 484226890Sdim Type *CTy; 485203954Srdivacky Constant *FieldNo; 486203954Srdivacky if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) { 487203954Srdivacky GepIndices.push_back(FieldNo); 488203954Srdivacky ElTy = 489203954Srdivacky STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue()); 490198090Srdivacky Ops[i] = SE.getConstant(Ty, 0); 491198090Srdivacky AnyNonZeroIndices = true; 492198090Srdivacky FoundFieldNo = true; 493198090Srdivacky break; 494198090Srdivacky } 495203954Srdivacky } 496193323Sed } 497198090Srdivacky // If no struct field offsets were found, tentatively assume that 498198090Srdivacky // field zero was selected (since the zero offset would obviously 499198090Srdivacky // be folded away). 500198090Srdivacky if (!FoundFieldNo) { 501198090Srdivacky ElTy = STy->getTypeAtIndex(0u); 502198090Srdivacky GepIndices.push_back( 503198090Srdivacky Constant::getNullValue(Type::getInt32Ty(Ty->getContext()))); 504198090Srdivacky } 505198090Srdivacky } 506193323Sed 507226890Sdim if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) 508193323Sed ElTy = ATy->getElementType(); 509198090Srdivacky else 510198090Srdivacky break; 511193323Sed } 512193323Sed 513204642Srdivacky // If none of the operands were convertible to proper GEP indices, cast 514193323Sed // the base to i8* and do an ugly getelementptr with that. It's still 515193323Sed // better than ptrtoint+arithmetic+inttoptr at least. 516193323Sed if (!AnyNonZeroIndices) { 517198090Srdivacky // Cast the base to i8*. 518193323Sed V = InsertNoopCastOfTo(V, 519198090Srdivacky Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace())); 520198090Srdivacky 521235633Sdim assert(!isa<Instruction>(V) || 522235633Sdim SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint())); 523235633Sdim 524198090Srdivacky // Expand the operands for a plain byte offset. 525194178Sed Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); 526193323Sed 527193323Sed // Fold a GEP with constant operands. 528193323Sed if (Constant *CLHS = dyn_cast<Constant>(V)) 529193323Sed if (Constant *CRHS = dyn_cast<Constant>(Idx)) 530226890Sdim return ConstantExpr::getGetElementPtr(CLHS, CRHS); 531193323Sed 532193323Sed // Do a quick scan to see if we have this GEP nearby. If so, reuse it. 533193323Sed unsigned ScanLimit = 6; 534195340Sed BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 535195340Sed // Scanning starts from the last instruction before the insertion point. 536195340Sed BasicBlock::iterator IP = Builder.GetInsertPoint(); 537195340Sed if (IP != BlockBegin) { 538193323Sed --IP; 539193323Sed for (; ScanLimit; --IP, --ScanLimit) { 540204792Srdivacky // Don't count dbg.value against the ScanLimit, to avoid perturbing the 541204792Srdivacky // generated code. 542204792Srdivacky if (isa<DbgInfoIntrinsic>(IP)) 543204792Srdivacky ScanLimit++; 544193323Sed if (IP->getOpcode() == Instruction::GetElementPtr && 545193323Sed IP->getOperand(0) == V && IP->getOperand(1) == Idx) 546193323Sed return IP; 547193323Sed if (IP == BlockBegin) break; 548193323Sed } 549193323Sed } 550193323Sed 551204642Srdivacky // Save the original insertion point so we can restore it when we're done. 552263509Sdim BuilderType::InsertPointGuard Guard(Builder); 553204642Srdivacky 554204642Srdivacky // Move the insertion point out of as many loops as we can. 555204642Srdivacky while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 556204642Srdivacky if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break; 557204642Srdivacky BasicBlock *Preheader = L->getLoopPreheader(); 558204642Srdivacky if (!Preheader) break; 559204642Srdivacky 560204642Srdivacky // Ok, move up a level. 561204642Srdivacky Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 562204642Srdivacky } 563204642Srdivacky 564198090Srdivacky // Emit a GEP. 565198090Srdivacky Value *GEP = Builder.CreateGEP(V, Idx, "uglygep"); 566202878Srdivacky rememberInstruction(GEP); 567204642Srdivacky 568193323Sed return GEP; 569193323Sed } 570193323Sed 571204642Srdivacky // Save the original insertion point so we can restore it when we're done. 572263509Sdim BuilderType::InsertPoint SaveInsertPt = Builder.saveIP(); 573204642Srdivacky 574204642Srdivacky // Move the insertion point out of as many loops as we can. 575204642Srdivacky while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 576204642Srdivacky if (!L->isLoopInvariant(V)) break; 577204642Srdivacky 578204642Srdivacky bool AnyIndexNotLoopInvariant = false; 579204642Srdivacky for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(), 580204642Srdivacky E = GepIndices.end(); I != E; ++I) 581204642Srdivacky if (!L->isLoopInvariant(*I)) { 582204642Srdivacky AnyIndexNotLoopInvariant = true; 583204642Srdivacky break; 584204642Srdivacky } 585204642Srdivacky if (AnyIndexNotLoopInvariant) 586204642Srdivacky break; 587204642Srdivacky 588204642Srdivacky BasicBlock *Preheader = L->getLoopPreheader(); 589204642Srdivacky if (!Preheader) break; 590204642Srdivacky 591204642Srdivacky // Ok, move up a level. 592204642Srdivacky Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 593204642Srdivacky } 594204642Srdivacky 595198090Srdivacky // Insert a pretty getelementptr. Note that this GEP is not marked inbounds, 596198090Srdivacky // because ScalarEvolution may have changed the address arithmetic to 597198090Srdivacky // compute a value which is beyond the end of the allocated object. 598202878Srdivacky Value *Casted = V; 599202878Srdivacky if (V->getType() != PTy) 600202878Srdivacky Casted = InsertNoopCastOfTo(Casted, PTy); 601202878Srdivacky Value *GEP = Builder.CreateGEP(Casted, 602226890Sdim GepIndices, 603195340Sed "scevgep"); 604193323Sed Ops.push_back(SE.getUnknown(GEP)); 605202878Srdivacky rememberInstruction(GEP); 606204642Srdivacky 607204642Srdivacky // Restore the original insert point. 608263509Sdim Builder.restoreIP(SaveInsertPt); 609204642Srdivacky 610193323Sed return expand(SE.getAddExpr(Ops)); 611193323Sed} 612193323Sed 613204642Srdivacky/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for 614204642Srdivacky/// SCEV expansion. If they are nested, this is the most nested. If they are 615204642Srdivacky/// neighboring, pick the later. 616204642Srdivackystatic const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B, 617204642Srdivacky DominatorTree &DT) { 618204642Srdivacky if (!A) return B; 619204642Srdivacky if (!B) return A; 620204642Srdivacky if (A->contains(B)) return B; 621204642Srdivacky if (B->contains(A)) return A; 622204642Srdivacky if (DT.dominates(A->getHeader(), B->getHeader())) return B; 623204642Srdivacky if (DT.dominates(B->getHeader(), A->getHeader())) return A; 624204642Srdivacky return A; // Arbitrarily break the tie. 625204642Srdivacky} 626193323Sed 627218893Sdim/// getRelevantLoop - Get the most relevant loop associated with the given 628204642Srdivacky/// expression, according to PickMostRelevantLoop. 629218893Sdimconst Loop *SCEVExpander::getRelevantLoop(const SCEV *S) { 630218893Sdim // Test whether we've already computed the most relevant loop for this SCEV. 631218893Sdim std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair = 632218893Sdim RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0))); 633218893Sdim if (!Pair.second) 634218893Sdim return Pair.first->second; 635218893Sdim 636204642Srdivacky if (isa<SCEVConstant>(S)) 637218893Sdim // A constant has no relevant loops. 638204642Srdivacky return 0; 639204642Srdivacky if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 640204642Srdivacky if (const Instruction *I = dyn_cast<Instruction>(U->getValue())) 641218893Sdim return Pair.first->second = SE.LI->getLoopFor(I->getParent()); 642218893Sdim // A non-instruction has no relevant loops. 643204642Srdivacky return 0; 644204642Srdivacky } 645204642Srdivacky if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) { 646204642Srdivacky const Loop *L = 0; 647204642Srdivacky if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 648204642Srdivacky L = AR->getLoop(); 649204642Srdivacky for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end(); 650204642Srdivacky I != E; ++I) 651218893Sdim L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT); 652218893Sdim return RelevantLoops[N] = L; 653204642Srdivacky } 654218893Sdim if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) { 655218893Sdim const Loop *Result = getRelevantLoop(C->getOperand()); 656218893Sdim return RelevantLoops[C] = Result; 657218893Sdim } 658218893Sdim if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 659218893Sdim const Loop *Result = 660218893Sdim PickMostRelevantLoop(getRelevantLoop(D->getLHS()), 661218893Sdim getRelevantLoop(D->getRHS()), 662218893Sdim *SE.DT); 663218893Sdim return RelevantLoops[D] = Result; 664218893Sdim } 665204642Srdivacky llvm_unreachable("Unexpected SCEV type!"); 666204642Srdivacky} 667198090Srdivacky 668207618Srdivackynamespace { 669207618Srdivacky 670204642Srdivacky/// LoopCompare - Compare loops by PickMostRelevantLoop. 671204642Srdivackyclass LoopCompare { 672204642Srdivacky DominatorTree &DT; 673204642Srdivackypublic: 674204642Srdivacky explicit LoopCompare(DominatorTree &dt) : DT(dt) {} 675198090Srdivacky 676204642Srdivacky bool operator()(std::pair<const Loop *, const SCEV *> LHS, 677204642Srdivacky std::pair<const Loop *, const SCEV *> RHS) const { 678212904Sdim // Keep pointer operands sorted at the end. 679212904Sdim if (LHS.second->getType()->isPointerTy() != 680212904Sdim RHS.second->getType()->isPointerTy()) 681212904Sdim return LHS.second->getType()->isPointerTy(); 682212904Sdim 683204642Srdivacky // Compare loops with PickMostRelevantLoop. 684204642Srdivacky if (LHS.first != RHS.first) 685204642Srdivacky return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first; 686204642Srdivacky 687204642Srdivacky // If one operand is a non-constant negative and the other is not, 688204642Srdivacky // put the non-constant negative on the right so that a sub can 689204642Srdivacky // be used instead of a negate and add. 690235633Sdim if (LHS.second->isNonConstantNegative()) { 691235633Sdim if (!RHS.second->isNonConstantNegative()) 692204642Srdivacky return false; 693235633Sdim } else if (RHS.second->isNonConstantNegative()) 694204642Srdivacky return true; 695204642Srdivacky 696204642Srdivacky // Otherwise they are equivalent according to this comparison. 697204642Srdivacky return false; 698198090Srdivacky } 699204642Srdivacky}; 700193323Sed 701207618Srdivacky} 702207618Srdivacky 703204642SrdivackyValue *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { 704226890Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 705193323Sed 706204642Srdivacky // Collect all the add operands in a loop, along with their associated loops. 707204642Srdivacky // Iterate in reverse so that constants are emitted last, all else equal, and 708204642Srdivacky // so that pointer operands are inserted first, which the code below relies on 709204642Srdivacky // to form more involved GEPs. 710204642Srdivacky SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 711204642Srdivacky for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()), 712204642Srdivacky E(S->op_begin()); I != E; ++I) 713218893Sdim OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 714204642Srdivacky 715204642Srdivacky // Sort by loop. Use a stable sort so that constants follow non-constants and 716204642Srdivacky // pointer operands precede non-pointer operands. 717204642Srdivacky std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 718204642Srdivacky 719204642Srdivacky // Emit instructions to add all the operands. Hoist as much as possible 720204642Srdivacky // out of loops, and form meaningful getelementptrs where possible. 721204642Srdivacky Value *Sum = 0; 722204642Srdivacky for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 723204642Srdivacky I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 724204642Srdivacky const Loop *CurLoop = I->first; 725204642Srdivacky const SCEV *Op = I->second; 726204642Srdivacky if (!Sum) { 727204642Srdivacky // This is the first operand. Just expand it. 728204642Srdivacky Sum = expand(Op); 729204642Srdivacky ++I; 730226890Sdim } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) { 731204642Srdivacky // The running sum expression is a pointer. Try to form a getelementptr 732204642Srdivacky // at this level with that as the base. 733204642Srdivacky SmallVector<const SCEV *, 4> NewOps; 734212904Sdim for (; I != E && I->first == CurLoop; ++I) { 735212904Sdim // If the operand is SCEVUnknown and not instructions, peek through 736212904Sdim // it, to enable more of it to be folded into the GEP. 737212904Sdim const SCEV *X = I->second; 738212904Sdim if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X)) 739212904Sdim if (!isa<Instruction>(U->getValue())) 740212904Sdim X = SE.getSCEV(U->getValue()); 741212904Sdim NewOps.push_back(X); 742212904Sdim } 743204642Srdivacky Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum); 744226890Sdim } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) { 745204642Srdivacky // The running sum is an integer, and there's a pointer at this level. 746207618Srdivacky // Try to form a getelementptr. If the running sum is instructions, 747207618Srdivacky // use a SCEVUnknown to avoid re-analyzing them. 748204642Srdivacky SmallVector<const SCEV *, 4> NewOps; 749207618Srdivacky NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) : 750207618Srdivacky SE.getSCEV(Sum)); 751204642Srdivacky for (++I; I != E && I->first == CurLoop; ++I) 752204642Srdivacky NewOps.push_back(I->second); 753204642Srdivacky Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op)); 754235633Sdim } else if (Op->isNonConstantNegative()) { 755204642Srdivacky // Instead of doing a negate and add, just do a subtract. 756202878Srdivacky Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty); 757204642Srdivacky Sum = InsertNoopCastOfTo(Sum, Ty); 758204642Srdivacky Sum = InsertBinop(Instruction::Sub, Sum, W); 759204642Srdivacky ++I; 760202878Srdivacky } else { 761204642Srdivacky // A simple add. 762202878Srdivacky Value *W = expandCodeFor(Op, Ty); 763204642Srdivacky Sum = InsertNoopCastOfTo(Sum, Ty); 764204642Srdivacky // Canonicalize a constant to the RHS. 765204642Srdivacky if (isa<Constant>(Sum)) std::swap(Sum, W); 766204642Srdivacky Sum = InsertBinop(Instruction::Add, Sum, W); 767204642Srdivacky ++I; 768202878Srdivacky } 769193323Sed } 770204642Srdivacky 771204642Srdivacky return Sum; 772193323Sed} 773193323Sed 774193323SedValue *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { 775226890Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 776193323Sed 777204642Srdivacky // Collect all the mul operands in a loop, along with their associated loops. 778204642Srdivacky // Iterate in reverse so that constants are emitted last, all else equal. 779204642Srdivacky SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 780204642Srdivacky for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()), 781204642Srdivacky E(S->op_begin()); I != E; ++I) 782218893Sdim OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 783193323Sed 784204642Srdivacky // Sort by loop. Use a stable sort so that constants follow non-constants. 785204642Srdivacky std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 786204642Srdivacky 787204642Srdivacky // Emit instructions to mul all the operands. Hoist as much as possible 788204642Srdivacky // out of loops. 789204642Srdivacky Value *Prod = 0; 790204642Srdivacky for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 791204642Srdivacky I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 792204642Srdivacky const SCEV *Op = I->second; 793204642Srdivacky if (!Prod) { 794204642Srdivacky // This is the first operand. Just expand it. 795204642Srdivacky Prod = expand(Op); 796204642Srdivacky ++I; 797204642Srdivacky } else if (Op->isAllOnesValue()) { 798204642Srdivacky // Instead of doing a multiply by negative one, just do a negate. 799204642Srdivacky Prod = InsertNoopCastOfTo(Prod, Ty); 800204642Srdivacky Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod); 801204642Srdivacky ++I; 802204642Srdivacky } else { 803204642Srdivacky // A simple mul. 804204642Srdivacky Value *W = expandCodeFor(Op, Ty); 805204642Srdivacky Prod = InsertNoopCastOfTo(Prod, Ty); 806204642Srdivacky // Canonicalize a constant to the RHS. 807204642Srdivacky if (isa<Constant>(Prod)) std::swap(Prod, W); 808204642Srdivacky Prod = InsertBinop(Instruction::Mul, Prod, W); 809204642Srdivacky ++I; 810204642Srdivacky } 811193323Sed } 812193323Sed 813204642Srdivacky return Prod; 814193323Sed} 815193323Sed 816193323SedValue *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { 817226890Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 818193323Sed 819194178Sed Value *LHS = expandCodeFor(S->getLHS(), Ty); 820193323Sed if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) { 821193323Sed const APInt &RHS = SC->getValue()->getValue(); 822193323Sed if (RHS.isPowerOf2()) 823193323Sed return InsertBinop(Instruction::LShr, LHS, 824195340Sed ConstantInt::get(Ty, RHS.logBase2())); 825193323Sed } 826193323Sed 827194178Sed Value *RHS = expandCodeFor(S->getRHS(), Ty); 828195340Sed return InsertBinop(Instruction::UDiv, LHS, RHS); 829193323Sed} 830193323Sed 831193323Sed/// Move parts of Base into Rest to leave Base with the minimal 832193323Sed/// expression that provides a pointer operand suitable for a 833193323Sed/// GEP expansion. 834198090Srdivackystatic void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, 835193323Sed ScalarEvolution &SE) { 836193323Sed while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) { 837193323Sed Base = A->getStart(); 838193323Sed Rest = SE.getAddExpr(Rest, 839207618Srdivacky SE.getAddRecExpr(SE.getConstant(A->getType(), 0), 840193323Sed A->getStepRecurrence(SE), 841221345Sdim A->getLoop(), 842263509Sdim A->getNoWrapFlags(SCEV::FlagNW))); 843193323Sed } 844193323Sed if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) { 845193323Sed Base = A->getOperand(A->getNumOperands()-1); 846198090Srdivacky SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end()); 847193323Sed NewAddOps.back() = Rest; 848193323Sed Rest = SE.getAddExpr(NewAddOps); 849193323Sed ExposePointerBase(Base, Rest, SE); 850193323Sed } 851193323Sed} 852193323Sed 853226890Sdim/// Determine if this is a well-behaved chain of instructions leading back to 854226890Sdim/// the PHI. If so, it may be reused by expanded expressions. 855226890Sdimbool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, 856226890Sdim const Loop *L) { 857226890Sdim if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) || 858226890Sdim (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV))) 859226890Sdim return false; 860226890Sdim // If any of the operands don't dominate the insert position, bail. 861226890Sdim // Addrec operands are always loop-invariant, so this can only happen 862226890Sdim // if there are instructions which haven't been hoisted. 863226890Sdim if (L == IVIncInsertLoop) { 864226890Sdim for (User::op_iterator OI = IncV->op_begin()+1, 865226890Sdim OE = IncV->op_end(); OI != OE; ++OI) 866226890Sdim if (Instruction *OInst = dyn_cast<Instruction>(OI)) 867226890Sdim if (!SE.DT->dominates(OInst, IVIncInsertPos)) 868226890Sdim return false; 869226890Sdim } 870226890Sdim // Advance to the next instruction. 871226890Sdim IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 872226890Sdim if (!IncV) 873226890Sdim return false; 874226890Sdim 875226890Sdim if (IncV->mayHaveSideEffects()) 876226890Sdim return false; 877226890Sdim 878226890Sdim if (IncV != PN) 879226890Sdim return true; 880226890Sdim 881226890Sdim return isNormalAddRecExprPHI(PN, IncV, L); 882226890Sdim} 883226890Sdim 884235633Sdim/// getIVIncOperand returns an induction variable increment's induction 885235633Sdim/// variable operand. 886235633Sdim/// 887235633Sdim/// If allowScale is set, any type of GEP is allowed as long as the nonIV 888235633Sdim/// operands dominate InsertPos. 889235633Sdim/// 890235633Sdim/// If allowScale is not set, ensure that a GEP increment conforms to one of the 891235633Sdim/// simple patterns generated by getAddRecExprPHILiterally and 892235633Sdim/// expandAddtoGEP. If the pattern isn't recognized, return NULL. 893235633SdimInstruction *SCEVExpander::getIVIncOperand(Instruction *IncV, 894235633Sdim Instruction *InsertPos, 895235633Sdim bool allowScale) { 896235633Sdim if (IncV == InsertPos) 897235633Sdim return NULL; 898235633Sdim 899226890Sdim switch (IncV->getOpcode()) { 900235633Sdim default: 901235633Sdim return NULL; 902226890Sdim // Check for a simple Add/Sub or GEP of a loop invariant step. 903226890Sdim case Instruction::Add: 904235633Sdim case Instruction::Sub: { 905235633Sdim Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1)); 906235633Sdim if (!OInst || SE.DT->dominates(OInst, InsertPos)) 907235633Sdim return dyn_cast<Instruction>(IncV->getOperand(0)); 908235633Sdim return NULL; 909235633Sdim } 910226890Sdim case Instruction::BitCast: 911235633Sdim return dyn_cast<Instruction>(IncV->getOperand(0)); 912235633Sdim case Instruction::GetElementPtr: 913226890Sdim for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end(); 914226890Sdim I != E; ++I) { 915226890Sdim if (isa<Constant>(*I)) 916226890Sdim continue; 917235633Sdim if (Instruction *OInst = dyn_cast<Instruction>(*I)) { 918235633Sdim if (!SE.DT->dominates(OInst, InsertPos)) 919235633Sdim return NULL; 920235633Sdim } 921235633Sdim if (allowScale) { 922235633Sdim // allow any kind of GEP as long as it can be hoisted. 923235633Sdim continue; 924235633Sdim } 925235633Sdim // This must be a pointer addition of constants (pretty), which is already 926235633Sdim // handled, or some number of address-size elements (ugly). Ugly geps 927235633Sdim // have 2 operands. i1* is used by the expander to represent an 928235633Sdim // address-size element. 929226890Sdim if (IncV->getNumOperands() != 2) 930235633Sdim return NULL; 931226890Sdim unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace(); 932226890Sdim if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS) 933226890Sdim && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS)) 934235633Sdim return NULL; 935226890Sdim break; 936226890Sdim } 937235633Sdim return dyn_cast<Instruction>(IncV->getOperand(0)); 938226890Sdim } 939235633Sdim} 940235633Sdim 941235633Sdim/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make 942235633Sdim/// it available to other uses in this loop. Recursively hoist any operands, 943235633Sdim/// until we reach a value that dominates InsertPos. 944235633Sdimbool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) { 945235633Sdim if (SE.DT->dominates(IncV, InsertPos)) 946235633Sdim return true; 947235633Sdim 948235633Sdim // InsertPos must itself dominate IncV so that IncV's new position satisfies 949235633Sdim // its existing users. 950245431Sdim if (isa<PHINode>(InsertPos) 951245431Sdim || !SE.DT->dominates(InsertPos->getParent(), IncV->getParent())) 952226890Sdim return false; 953235633Sdim 954235633Sdim // Check that the chain of IV operands leading back to Phi can be hoisted. 955235633Sdim SmallVector<Instruction*, 4> IVIncs; 956235633Sdim for(;;) { 957235633Sdim Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true); 958235633Sdim if (!Oper) 959235633Sdim return false; 960235633Sdim // IncV is safe to hoist. 961235633Sdim IVIncs.push_back(IncV); 962235633Sdim IncV = Oper; 963235633Sdim if (SE.DT->dominates(IncV, InsertPos)) 964235633Sdim break; 965226890Sdim } 966235633Sdim for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(), 967235633Sdim E = IVIncs.rend(); I != E; ++I) { 968235633Sdim (*I)->moveBefore(InsertPos); 969235633Sdim } 970235633Sdim return true; 971226890Sdim} 972226890Sdim 973235633Sdim/// Determine if this cyclic phi is in a form that would have been generated by 974235633Sdim/// LSR. We don't care if the phi was actually expanded in this pass, as long 975235633Sdim/// as it is in a low-cost form, for example, no implied multiplication. This 976235633Sdim/// should match any patterns generated by getAddRecExprPHILiterally and 977235633Sdim/// expandAddtoGEP. 978235633Sdimbool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, 979235633Sdim const Loop *L) { 980235633Sdim for(Instruction *IVOper = IncV; 981235633Sdim (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(), 982235633Sdim /*allowScale=*/false));) { 983235633Sdim if (IVOper == PN) 984235633Sdim return true; 985235633Sdim } 986235633Sdim return false; 987235633Sdim} 988235633Sdim 989235633Sdim/// expandIVInc - Expand an IV increment at Builder's current InsertPos. 990235633Sdim/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may 991235633Sdim/// need to materialize IV increments elsewhere to handle difficult situations. 992235633SdimValue *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L, 993235633Sdim Type *ExpandTy, Type *IntTy, 994235633Sdim bool useSubtract) { 995235633Sdim Value *IncV; 996235633Sdim // If the PHI is a pointer, use a GEP, otherwise use an add or sub. 997235633Sdim if (ExpandTy->isPointerTy()) { 998235633Sdim PointerType *GEPPtrTy = cast<PointerType>(ExpandTy); 999235633Sdim // If the step isn't constant, don't use an implicitly scaled GEP, because 1000235633Sdim // that would require a multiply inside the loop. 1001235633Sdim if (!isa<ConstantInt>(StepV)) 1002235633Sdim GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()), 1003235633Sdim GEPPtrTy->getAddressSpace()); 1004235633Sdim const SCEV *const StepArray[1] = { SE.getSCEV(StepV) }; 1005235633Sdim IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN); 1006235633Sdim if (IncV->getType() != PN->getType()) { 1007235633Sdim IncV = Builder.CreateBitCast(IncV, PN->getType()); 1008235633Sdim rememberInstruction(IncV); 1009235633Sdim } 1010235633Sdim } else { 1011235633Sdim IncV = useSubtract ? 1012235633Sdim Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") : 1013235633Sdim Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next"); 1014235633Sdim rememberInstruction(IncV); 1015235633Sdim } 1016235633Sdim return IncV; 1017235633Sdim} 1018235633Sdim 1019202878Srdivacky/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand 1020202878Srdivacky/// the base addrec, which is the addrec without any non-loop-dominating 1021202878Srdivacky/// values, and return the PHI. 1022202878SrdivackyPHINode * 1023202878SrdivackySCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, 1024202878Srdivacky const Loop *L, 1025226890Sdim Type *ExpandTy, 1026226890Sdim Type *IntTy) { 1027224145Sdim assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position"); 1028224145Sdim 1029202878Srdivacky // Reuse a previously-inserted PHI, if present. 1030226890Sdim BasicBlock *LatchBlock = L->getLoopLatch(); 1031226890Sdim if (LatchBlock) { 1032226890Sdim for (BasicBlock::iterator I = L->getHeader()->begin(); 1033226890Sdim PHINode *PN = dyn_cast<PHINode>(I); ++I) { 1034226890Sdim if (!SE.isSCEVable(PN->getType()) || 1035226890Sdim (SE.getEffectiveSCEVType(PN->getType()) != 1036226890Sdim SE.getEffectiveSCEVType(Normalized->getType())) || 1037226890Sdim SE.getSCEV(PN) != Normalized) 1038226890Sdim continue; 1039202878Srdivacky 1040226890Sdim Instruction *IncV = 1041226890Sdim cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock)); 1042226890Sdim 1043226890Sdim if (LSRMode) { 1044226890Sdim if (!isExpandedAddRecExprPHI(PN, IncV, L)) 1045226890Sdim continue; 1046235633Sdim if (L == IVIncInsertLoop && !hoistIVInc(IncV, IVIncInsertPos)) 1047235633Sdim continue; 1048226890Sdim } 1049226890Sdim else { 1050226890Sdim if (!isNormalAddRecExprPHI(PN, IncV, L)) 1051226890Sdim continue; 1052235633Sdim if (L == IVIncInsertLoop) 1053235633Sdim do { 1054235633Sdim if (SE.DT->dominates(IncV, IVIncInsertPos)) 1055235633Sdim break; 1056235633Sdim // Make sure the increment is where we want it. But don't move it 1057235633Sdim // down past a potential existing post-inc user. 1058235633Sdim IncV->moveBefore(IVIncInsertPos); 1059235633Sdim IVIncInsertPos = IncV; 1060235633Sdim IncV = cast<Instruction>(IncV->getOperand(0)); 1061235633Sdim } while (IncV != PN); 1062226890Sdim } 1063226890Sdim // Ok, the add recurrence looks usable. 1064226890Sdim // Remember this PHI, even in post-inc mode. 1065226890Sdim InsertedValues.insert(PN); 1066226890Sdim // Remember the increment. 1067226890Sdim rememberInstruction(IncV); 1068226890Sdim return PN; 1069226890Sdim } 1070226890Sdim } 1071203954Srdivacky 1072202878Srdivacky // Save the original insertion point so we can restore it when we're done. 1073263509Sdim BuilderType::InsertPointGuard Guard(Builder); 1074202878Srdivacky 1075235633Sdim // Another AddRec may need to be recursively expanded below. For example, if 1076235633Sdim // this AddRec is quadratic, the StepV may itself be an AddRec in this 1077235633Sdim // loop. Remove this loop from the PostIncLoops set before expanding such 1078235633Sdim // AddRecs. Otherwise, we cannot find a valid position for the step 1079235633Sdim // (i.e. StepV can never dominate its loop header). Ideally, we could do 1080235633Sdim // SavedIncLoops.swap(PostIncLoops), but we generally have a single element, 1081235633Sdim // so it's not worth implementing SmallPtrSet::swap. 1082235633Sdim PostIncLoopSet SavedPostIncLoops = PostIncLoops; 1083235633Sdim PostIncLoops.clear(); 1084235633Sdim 1085202878Srdivacky // Expand code for the start value. 1086202878Srdivacky Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy, 1087202878Srdivacky L->getHeader()->begin()); 1088202878Srdivacky 1089224145Sdim // StartV must be hoisted into L's preheader to dominate the new phi. 1090224145Sdim assert(!isa<Instruction>(StartV) || 1091224145Sdim SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(), 1092224145Sdim L->getHeader())); 1093224145Sdim 1094235633Sdim // Expand code for the step value. Do this before creating the PHI so that PHI 1095235633Sdim // reuse code doesn't see an incomplete PHI. 1096202878Srdivacky const SCEV *Step = Normalized->getStepRecurrence(SE); 1097235633Sdim // If the stride is negative, insert a sub instead of an add for the increment 1098235633Sdim // (unless it's a constant, because subtracts of constants are canonicalized 1099235633Sdim // to adds). 1100235633Sdim bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1101235633Sdim if (useSubtract) 1102202878Srdivacky Step = SE.getNegativeSCEV(Step); 1103235633Sdim // Expand the step somewhere that dominates the loop header. 1104202878Srdivacky Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1105202878Srdivacky 1106202878Srdivacky // Create the PHI. 1107221345Sdim BasicBlock *Header = L->getHeader(); 1108221345Sdim Builder.SetInsertPoint(Header, Header->begin()); 1109221345Sdim pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1110224145Sdim PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE), 1111224145Sdim Twine(IVName) + ".iv"); 1112202878Srdivacky rememberInstruction(PN); 1113202878Srdivacky 1114202878Srdivacky // Create the step instructions and populate the PHI. 1115221345Sdim for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1116202878Srdivacky BasicBlock *Pred = *HPI; 1117202878Srdivacky 1118202878Srdivacky // Add a start value. 1119202878Srdivacky if (!L->contains(Pred)) { 1120202878Srdivacky PN->addIncoming(StartV, Pred); 1121202878Srdivacky continue; 1122202878Srdivacky } 1123202878Srdivacky 1124235633Sdim // Create a step value and add it to the PHI. 1125235633Sdim // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the 1126235633Sdim // instructions at IVIncInsertPos. 1127202878Srdivacky Instruction *InsertPos = L == IVIncInsertLoop ? 1128202878Srdivacky IVIncInsertPos : Pred->getTerminator(); 1129224145Sdim Builder.SetInsertPoint(InsertPos); 1130235633Sdim Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1131263509Sdim if (isa<OverflowingBinaryOperator>(IncV)) { 1132263509Sdim if (Normalized->getNoWrapFlags(SCEV::FlagNUW)) 1133263509Sdim cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap(); 1134263509Sdim if (Normalized->getNoWrapFlags(SCEV::FlagNSW)) 1135263509Sdim cast<BinaryOperator>(IncV)->setHasNoSignedWrap(); 1136263509Sdim } 1137202878Srdivacky PN->addIncoming(IncV, Pred); 1138202878Srdivacky } 1139202878Srdivacky 1140235633Sdim // After expanding subexpressions, restore the PostIncLoops set so the caller 1141235633Sdim // can ensure that IVIncrement dominates the current uses. 1142235633Sdim PostIncLoops = SavedPostIncLoops; 1143235633Sdim 1144202878Srdivacky // Remember this PHI, even in post-inc mode. 1145202878Srdivacky InsertedValues.insert(PN); 1146202878Srdivacky 1147202878Srdivacky return PN; 1148202878Srdivacky} 1149202878Srdivacky 1150202878SrdivackyValue *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { 1151226890Sdim Type *STy = S->getType(); 1152226890Sdim Type *IntTy = SE.getEffectiveSCEVType(STy); 1153202878Srdivacky const Loop *L = S->getLoop(); 1154202878Srdivacky 1155202878Srdivacky // Determine a normalized form of this expression, which is the expression 1156202878Srdivacky // before any post-inc adjustment is made. 1157202878Srdivacky const SCEVAddRecExpr *Normalized = S; 1158207618Srdivacky if (PostIncLoops.count(L)) { 1159207618Srdivacky PostIncLoopSet Loops; 1160207618Srdivacky Loops.insert(L); 1161207618Srdivacky Normalized = 1162207618Srdivacky cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0, 1163207618Srdivacky Loops, SE, *SE.DT)); 1164202878Srdivacky } 1165202878Srdivacky 1166202878Srdivacky // Strip off any non-loop-dominating component from the addrec start. 1167202878Srdivacky const SCEV *Start = Normalized->getStart(); 1168202878Srdivacky const SCEV *PostLoopOffset = 0; 1169218893Sdim if (!SE.properlyDominates(Start, L->getHeader())) { 1170202878Srdivacky PostLoopOffset = Start; 1171207618Srdivacky Start = SE.getConstant(Normalized->getType(), 0); 1172221345Sdim Normalized = cast<SCEVAddRecExpr>( 1173221345Sdim SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE), 1174221345Sdim Normalized->getLoop(), 1175263509Sdim Normalized->getNoWrapFlags(SCEV::FlagNW))); 1176202878Srdivacky } 1177202878Srdivacky 1178202878Srdivacky // Strip off any non-loop-dominating component from the addrec step. 1179202878Srdivacky const SCEV *Step = Normalized->getStepRecurrence(SE); 1180202878Srdivacky const SCEV *PostLoopScale = 0; 1181218893Sdim if (!SE.dominates(Step, L->getHeader())) { 1182202878Srdivacky PostLoopScale = Step; 1183207618Srdivacky Step = SE.getConstant(Normalized->getType(), 1); 1184202878Srdivacky Normalized = 1185263509Sdim cast<SCEVAddRecExpr>(SE.getAddRecExpr( 1186263509Sdim Start, Step, Normalized->getLoop(), 1187263509Sdim Normalized->getNoWrapFlags(SCEV::FlagNW))); 1188202878Srdivacky } 1189202878Srdivacky 1190202878Srdivacky // Expand the core addrec. If we need post-loop scaling, force it to 1191202878Srdivacky // expand to an integer type to avoid the need for additional casting. 1192226890Sdim Type *ExpandTy = PostLoopScale ? IntTy : STy; 1193202878Srdivacky PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy); 1194202878Srdivacky 1195204642Srdivacky // Accommodate post-inc mode, if necessary. 1196202878Srdivacky Value *Result; 1197207618Srdivacky if (!PostIncLoops.count(L)) 1198202878Srdivacky Result = PN; 1199202878Srdivacky else { 1200202878Srdivacky // In PostInc mode, use the post-incremented value. 1201202878Srdivacky BasicBlock *LatchBlock = L->getLoopLatch(); 1202202878Srdivacky assert(LatchBlock && "PostInc mode requires a unique loop latch!"); 1203202878Srdivacky Result = PN->getIncomingValueForBlock(LatchBlock); 1204226890Sdim 1205226890Sdim // For an expansion to use the postinc form, the client must call 1206226890Sdim // expandCodeFor with an InsertPoint that is either outside the PostIncLoop 1207226890Sdim // or dominated by IVIncInsertPos. 1208235633Sdim if (isa<Instruction>(Result) 1209235633Sdim && !SE.DT->dominates(cast<Instruction>(Result), 1210235633Sdim Builder.GetInsertPoint())) { 1211235633Sdim // The induction variable's postinc expansion does not dominate this use. 1212235633Sdim // IVUsers tries to prevent this case, so it is rare. However, it can 1213235633Sdim // happen when an IVUser outside the loop is not dominated by the latch 1214235633Sdim // block. Adjusting IVIncInsertPos before expansion begins cannot handle 1215235633Sdim // all cases. Consider a phi outide whose operand is replaced during 1216235633Sdim // expansion with the value of the postinc user. Without fundamentally 1217235633Sdim // changing the way postinc users are tracked, the only remedy is 1218235633Sdim // inserting an extra IV increment. StepV might fold into PostLoopOffset, 1219235633Sdim // but hopefully expandCodeFor handles that. 1220235633Sdim bool useSubtract = 1221235633Sdim !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1222235633Sdim if (useSubtract) 1223235633Sdim Step = SE.getNegativeSCEV(Step); 1224263509Sdim Value *StepV; 1225263509Sdim { 1226263509Sdim // Expand the step somewhere that dominates the loop header. 1227263509Sdim BuilderType::InsertPointGuard Guard(Builder); 1228263509Sdim StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1229263509Sdim } 1230235633Sdim Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1231235633Sdim } 1232202878Srdivacky } 1233202878Srdivacky 1234202878Srdivacky // Re-apply any non-loop-dominating scale. 1235202878Srdivacky if (PostLoopScale) { 1236263509Sdim assert(S->isAffine() && "Can't linearly scale non-affine recurrences."); 1237203954Srdivacky Result = InsertNoopCastOfTo(Result, IntTy); 1238202878Srdivacky Result = Builder.CreateMul(Result, 1239202878Srdivacky expandCodeFor(PostLoopScale, IntTy)); 1240202878Srdivacky rememberInstruction(Result); 1241202878Srdivacky } 1242202878Srdivacky 1243202878Srdivacky // Re-apply any non-loop-dominating offset. 1244202878Srdivacky if (PostLoopOffset) { 1245226890Sdim if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) { 1246202878Srdivacky const SCEV *const OffsetArray[1] = { PostLoopOffset }; 1247202878Srdivacky Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result); 1248202878Srdivacky } else { 1249203954Srdivacky Result = InsertNoopCastOfTo(Result, IntTy); 1250202878Srdivacky Result = Builder.CreateAdd(Result, 1251202878Srdivacky expandCodeFor(PostLoopOffset, IntTy)); 1252202878Srdivacky rememberInstruction(Result); 1253202878Srdivacky } 1254202878Srdivacky } 1255202878Srdivacky 1256202878Srdivacky return Result; 1257202878Srdivacky} 1258202878Srdivacky 1259193323SedValue *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { 1260202878Srdivacky if (!CanonicalMode) return expandAddRecExprLiterally(S); 1261202878Srdivacky 1262226890Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1263193323Sed const Loop *L = S->getLoop(); 1264193323Sed 1265194178Sed // First check for an existing canonical IV in a suitable type. 1266194178Sed PHINode *CanonicalIV = 0; 1267194178Sed if (PHINode *PN = L->getCanonicalInductionVariable()) 1268212904Sdim if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) 1269194178Sed CanonicalIV = PN; 1270194178Sed 1271194178Sed // Rewrite an AddRec in terms of the canonical induction variable, if 1272194178Sed // its type is more narrow. 1273194178Sed if (CanonicalIV && 1274194178Sed SE.getTypeSizeInBits(CanonicalIV->getType()) > 1275194178Sed SE.getTypeSizeInBits(Ty)) { 1276205407Srdivacky SmallVector<const SCEV *, 4> NewOps(S->getNumOperands()); 1277205407Srdivacky for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) 1278205407Srdivacky NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType()); 1279221345Sdim Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(), 1280263509Sdim S->getNoWrapFlags(SCEV::FlagNW))); 1281194178Sed BasicBlock::iterator NewInsertPt = 1282200581Srdivacky llvm::next(BasicBlock::iterator(cast<Instruction>(V))); 1283263509Sdim BuilderType::InsertPointGuard Guard(Builder); 1284226890Sdim while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) || 1285226890Sdim isa<LandingPadInst>(NewInsertPt)) 1286210299Sed ++NewInsertPt; 1287194178Sed V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0, 1288194178Sed NewInsertPt); 1289194178Sed return V; 1290194178Sed } 1291194178Sed 1292193323Sed // {X,+,F} --> X + {0,+,F} 1293193323Sed if (!S->getStart()->isZero()) { 1294205407Srdivacky SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end()); 1295207618Srdivacky NewOps[0] = SE.getConstant(Ty, 0); 1296263509Sdim const SCEV *Rest = SE.getAddRecExpr(NewOps, L, 1297263509Sdim S->getNoWrapFlags(SCEV::FlagNW)); 1298193323Sed 1299193323Sed // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 1300193323Sed // comments on expandAddToGEP for details. 1301198090Srdivacky const SCEV *Base = S->getStart(); 1302198090Srdivacky const SCEV *RestArray[1] = { Rest }; 1303198090Srdivacky // Dig into the expression to find the pointer base for a GEP. 1304198090Srdivacky ExposePointerBase(Base, RestArray[0], SE); 1305198090Srdivacky // If we found a pointer, expand the AddRec with a GEP. 1306226890Sdim if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) { 1307198090Srdivacky // Make sure the Base isn't something exotic, such as a multiplied 1308198090Srdivacky // or divided pointer value. In those cases, the result type isn't 1309198090Srdivacky // actually a pointer type. 1310198090Srdivacky if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) { 1311198090Srdivacky Value *StartV = expand(Base); 1312198090Srdivacky assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!"); 1313198090Srdivacky return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV); 1314193323Sed } 1315193323Sed } 1316193323Sed 1317195098Sed // Just do a normal add. Pre-expand the operands to suppress folding. 1318195098Sed return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())), 1319195098Sed SE.getUnknown(expand(Rest)))); 1320193323Sed } 1321193323Sed 1322212904Sdim // If we don't yet have a canonical IV, create one. 1323212904Sdim if (!CanonicalIV) { 1324193323Sed // Create and insert the PHI node for the induction variable in the 1325193323Sed // specified loop. 1326193323Sed BasicBlock *Header = L->getHeader(); 1327221345Sdim pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1328221345Sdim CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar", 1329221345Sdim Header->begin()); 1330212904Sdim rememberInstruction(CanonicalIV); 1331193323Sed 1332263509Sdim SmallSet<BasicBlock *, 4> PredSeen; 1333193323Sed Constant *One = ConstantInt::get(Ty, 1); 1334221345Sdim for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1335210299Sed BasicBlock *HP = *HPI; 1336263509Sdim if (!PredSeen.insert(HP)) 1337263509Sdim continue; 1338263509Sdim 1339210299Sed if (L->contains(HP)) { 1340202878Srdivacky // Insert a unit add instruction right before the terminator 1341202878Srdivacky // corresponding to the back-edge. 1342212904Sdim Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One, 1343212904Sdim "indvar.next", 1344212904Sdim HP->getTerminator()); 1345224145Sdim Add->setDebugLoc(HP->getTerminator()->getDebugLoc()); 1346202878Srdivacky rememberInstruction(Add); 1347212904Sdim CanonicalIV->addIncoming(Add, HP); 1348198090Srdivacky } else { 1349212904Sdim CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP); 1350198090Srdivacky } 1351210299Sed } 1352193323Sed } 1353193323Sed 1354212904Sdim // {0,+,1} --> Insert a canonical induction variable into the loop! 1355212904Sdim if (S->isAffine() && S->getOperand(1)->isOne()) { 1356212904Sdim assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) && 1357212904Sdim "IVs with types different from the canonical IV should " 1358212904Sdim "already have been handled!"); 1359212904Sdim return CanonicalIV; 1360212904Sdim } 1361212904Sdim 1362194178Sed // {0,+,F} --> {0,+,1} * F 1363193323Sed 1364193323Sed // If this is a simple linear addrec, emit it now as a special case. 1365195098Sed if (S->isAffine()) // {0,+,F} --> i*F 1366195098Sed return 1367195098Sed expand(SE.getTruncateOrNoop( 1368212904Sdim SE.getMulExpr(SE.getUnknown(CanonicalIV), 1369195098Sed SE.getNoopOrAnyExtend(S->getOperand(1), 1370212904Sdim CanonicalIV->getType())), 1371195098Sed Ty)); 1372194178Sed 1373193323Sed // If this is a chain of recurrences, turn it into a closed form, using the 1374193323Sed // folders, then expandCodeFor the closed form. This allows the folders to 1375193323Sed // simplify the expression without having to build a bunch of special code 1376193323Sed // into this folder. 1377212904Sdim const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV. 1378193323Sed 1379194178Sed // Promote S up to the canonical IV type, if the cast is foldable. 1380198090Srdivacky const SCEV *NewS = S; 1381212904Sdim const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType()); 1382194178Sed if (isa<SCEVAddRecExpr>(Ext)) 1383194178Sed NewS = Ext; 1384194178Sed 1385198090Srdivacky const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE); 1386193323Sed //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; 1387193323Sed 1388194178Sed // Truncate the result down to the original type, if needed. 1389198090Srdivacky const SCEV *T = SE.getTruncateOrNoop(V, Ty); 1390194710Sed return expand(T); 1391193323Sed} 1392193323Sed 1393193323SedValue *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) { 1394226890Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1395194178Sed Value *V = expandCodeFor(S->getOperand(), 1396194178Sed SE.getEffectiveSCEVType(S->getOperand()->getType())); 1397226890Sdim Value *I = Builder.CreateTrunc(V, Ty); 1398202878Srdivacky rememberInstruction(I); 1399193323Sed return I; 1400193323Sed} 1401193323Sed 1402193323SedValue *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) { 1403226890Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1404194178Sed Value *V = expandCodeFor(S->getOperand(), 1405194178Sed SE.getEffectiveSCEVType(S->getOperand()->getType())); 1406226890Sdim Value *I = Builder.CreateZExt(V, Ty); 1407202878Srdivacky rememberInstruction(I); 1408193323Sed return I; 1409193323Sed} 1410193323Sed 1411193323SedValue *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) { 1412226890Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1413194178Sed Value *V = expandCodeFor(S->getOperand(), 1414194178Sed SE.getEffectiveSCEVType(S->getOperand()->getType())); 1415226890Sdim Value *I = Builder.CreateSExt(V, Ty); 1416202878Srdivacky rememberInstruction(I); 1417193323Sed return I; 1418193323Sed} 1419193323Sed 1420193323SedValue *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) { 1421198090Srdivacky Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1422226890Sdim Type *Ty = LHS->getType(); 1423198090Srdivacky for (int i = S->getNumOperands()-2; i >= 0; --i) { 1424198090Srdivacky // In the case of mixed integer and pointer types, do the 1425198090Srdivacky // rest of the comparisons as integer. 1426198090Srdivacky if (S->getOperand(i)->getType() != Ty) { 1427198090Srdivacky Ty = SE.getEffectiveSCEVType(Ty); 1428198090Srdivacky LHS = InsertNoopCastOfTo(LHS, Ty); 1429198090Srdivacky } 1430194178Sed Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1431226890Sdim Value *ICmp = Builder.CreateICmpSGT(LHS, RHS); 1432202878Srdivacky rememberInstruction(ICmp); 1433195340Sed Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax"); 1434202878Srdivacky rememberInstruction(Sel); 1435193323Sed LHS = Sel; 1436193323Sed } 1437198090Srdivacky // In the case of mixed integer and pointer types, cast the 1438198090Srdivacky // final result back to the pointer type. 1439198090Srdivacky if (LHS->getType() != S->getType()) 1440198090Srdivacky LHS = InsertNoopCastOfTo(LHS, S->getType()); 1441193323Sed return LHS; 1442193323Sed} 1443193323Sed 1444193323SedValue *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { 1445198090Srdivacky Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1446226890Sdim Type *Ty = LHS->getType(); 1447198090Srdivacky for (int i = S->getNumOperands()-2; i >= 0; --i) { 1448198090Srdivacky // In the case of mixed integer and pointer types, do the 1449198090Srdivacky // rest of the comparisons as integer. 1450198090Srdivacky if (S->getOperand(i)->getType() != Ty) { 1451198090Srdivacky Ty = SE.getEffectiveSCEVType(Ty); 1452198090Srdivacky LHS = InsertNoopCastOfTo(LHS, Ty); 1453198090Srdivacky } 1454194178Sed Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1455226890Sdim Value *ICmp = Builder.CreateICmpUGT(LHS, RHS); 1456202878Srdivacky rememberInstruction(ICmp); 1457195340Sed Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax"); 1458202878Srdivacky rememberInstruction(Sel); 1459193323Sed LHS = Sel; 1460193323Sed } 1461198090Srdivacky // In the case of mixed integer and pointer types, cast the 1462198090Srdivacky // final result back to the pointer type. 1463198090Srdivacky if (LHS->getType() != S->getType()) 1464198090Srdivacky LHS = InsertNoopCastOfTo(LHS, S->getType()); 1465193323Sed return LHS; 1466193323Sed} 1467193323Sed 1468226890SdimValue *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty, 1469235633Sdim Instruction *IP) { 1470205407Srdivacky Builder.SetInsertPoint(IP->getParent(), IP); 1471205407Srdivacky return expandCodeFor(SH, Ty); 1472205407Srdivacky} 1473205407Srdivacky 1474226890SdimValue *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) { 1475193323Sed // Expand the code for this SCEV. 1476193323Sed Value *V = expand(SH); 1477193323Sed if (Ty) { 1478193323Sed assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) && 1479193323Sed "non-trivial casts should be done with the SCEVs directly!"); 1480193323Sed V = InsertNoopCastOfTo(V, Ty); 1481193323Sed } 1482193323Sed return V; 1483193323Sed} 1484193323Sed 1485193323SedValue *SCEVExpander::expand(const SCEV *S) { 1486195098Sed // Compute an insertion point for this SCEV object. Hoist the instructions 1487195098Sed // as far out in the loop nest as possible. 1488195340Sed Instruction *InsertPt = Builder.GetInsertPoint(); 1489195340Sed for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ; 1490195098Sed L = L->getParentLoop()) 1491218893Sdim if (SE.isLoopInvariant(S, L)) { 1492195098Sed if (!L) break; 1493206083Srdivacky if (BasicBlock *Preheader = L->getLoopPreheader()) 1494195098Sed InsertPt = Preheader->getTerminator(); 1495235633Sdim else { 1496235633Sdim // LSR sets the insertion point for AddRec start/step values to the 1497235633Sdim // block start to simplify value reuse, even though it's an invalid 1498235633Sdim // position. SCEVExpander must correct for this in all cases. 1499235633Sdim InsertPt = L->getHeader()->getFirstInsertionPt(); 1500235633Sdim } 1501195098Sed } else { 1502195098Sed // If the SCEV is computable at this level, insert it into the header 1503195098Sed // after the PHIs (and after any other instructions that we've inserted 1504195098Sed // there) so that it is guaranteed to dominate any user inside the loop. 1505218893Sdim if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L)) 1506226890Sdim InsertPt = L->getHeader()->getFirstInsertionPt(); 1507235633Sdim while (InsertPt != Builder.GetInsertPoint() 1508235633Sdim && (isInsertedInstruction(InsertPt) 1509235633Sdim || isa<DbgInfoIntrinsic>(InsertPt))) { 1510204961Srdivacky InsertPt = llvm::next(BasicBlock::iterator(InsertPt)); 1511235633Sdim } 1512195098Sed break; 1513195098Sed } 1514195098Sed 1515195098Sed // Check to see if we already expanded this here. 1516252723Sdim std::map<std::pair<const SCEV *, Instruction *>, TrackingVH<Value> >::iterator 1517252723Sdim I = InsertedExpressions.find(std::make_pair(S, InsertPt)); 1518195340Sed if (I != InsertedExpressions.end()) 1519193323Sed return I->second; 1520195098Sed 1521263509Sdim BuilderType::InsertPointGuard Guard(Builder); 1522195340Sed Builder.SetInsertPoint(InsertPt->getParent(), InsertPt); 1523195340Sed 1524195098Sed // Expand the expression into instructions. 1525193323Sed Value *V = visit(S); 1526195098Sed 1527195098Sed // Remember the expanded value for this SCEV at this location. 1528226890Sdim // 1529226890Sdim // This is independent of PostIncLoops. The mapped value simply materializes 1530226890Sdim // the expression at this insertion point. If the mapped value happened to be 1531226890Sdim // a postinc expansion, it could be reused by a non postinc user, but only if 1532226890Sdim // its insertion point was already at the head of the loop. 1533226890Sdim InsertedExpressions[std::make_pair(S, InsertPt)] = V; 1534193323Sed return V; 1535193323Sed} 1536193574Sed 1537203954Srdivackyvoid SCEVExpander::rememberInstruction(Value *I) { 1538210299Sed if (!PostIncLoops.empty()) 1539210299Sed InsertedPostIncValues.insert(I); 1540210299Sed else 1541203954Srdivacky InsertedValues.insert(I); 1542203954Srdivacky} 1543203954Srdivacky 1544193574Sed/// getOrInsertCanonicalInductionVariable - This method returns the 1545193574Sed/// canonical induction variable of the specified type for the specified 1546193574Sed/// loop (inserting one if there is none). A canonical induction variable 1547193574Sed/// starts at zero and steps by one on each iteration. 1548212904SdimPHINode * 1549193574SedSCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, 1550226890Sdim Type *Ty) { 1551203954Srdivacky assert(Ty->isIntegerTy() && "Can only insert integer induction variables!"); 1552212904Sdim 1553212904Sdim // Build a SCEV for {0,+,1}<L>. 1554221345Sdim // Conservatively use FlagAnyWrap for now. 1555207618Srdivacky const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0), 1556221345Sdim SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap); 1557212904Sdim 1558212904Sdim // Emit code for it. 1559263509Sdim BuilderType::InsertPointGuard Guard(Builder); 1560212904Sdim PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin())); 1561212904Sdim 1562195098Sed return V; 1563193574Sed} 1564226890Sdim 1565235633Sdim/// Sort values by integer width for replaceCongruentIVs. 1566235633Sdimstatic bool width_descending(Value *lhs, Value *rhs) { 1567235633Sdim // Put pointers at the back and make sure pointer < pointer = false. 1568235633Sdim if (!lhs->getType()->isIntegerTy() || !rhs->getType()->isIntegerTy()) 1569235633Sdim return rhs->getType()->isIntegerTy() && !lhs->getType()->isIntegerTy(); 1570235633Sdim return rhs->getType()->getPrimitiveSizeInBits() 1571235633Sdim < lhs->getType()->getPrimitiveSizeInBits(); 1572226890Sdim} 1573226890Sdim 1574226890Sdim/// replaceCongruentIVs - Check for congruent phis in this loop header and 1575226890Sdim/// replace them with their most canonical representative. Return the number of 1576226890Sdim/// phis eliminated. 1577226890Sdim/// 1578226890Sdim/// This does not depend on any SCEVExpander state but should be used in 1579226890Sdim/// the same context that SCEVExpander is used. 1580226890Sdimunsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, 1581235633Sdim SmallVectorImpl<WeakVH> &DeadInsts, 1582252723Sdim const TargetTransformInfo *TTI) { 1583235633Sdim // Find integer phis in order of increasing width. 1584235633Sdim SmallVector<PHINode*, 8> Phis; 1585235633Sdim for (BasicBlock::iterator I = L->getHeader()->begin(); 1586235633Sdim PHINode *Phi = dyn_cast<PHINode>(I); ++I) { 1587235633Sdim Phis.push_back(Phi); 1588235633Sdim } 1589252723Sdim if (TTI) 1590235633Sdim std::sort(Phis.begin(), Phis.end(), width_descending); 1591235633Sdim 1592226890Sdim unsigned NumElim = 0; 1593226890Sdim DenseMap<const SCEV *, PHINode *> ExprToIVMap; 1594235633Sdim // Process phis from wide to narrow. Mapping wide phis to the their truncation 1595235633Sdim // so narrow phis can reuse them. 1596235633Sdim for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(), 1597235633Sdim PEnd = Phis.end(); PIter != PEnd; ++PIter) { 1598235633Sdim PHINode *Phi = *PIter; 1599235633Sdim 1600245431Sdim // Fold constant phis. They may be congruent to other constant phis and 1601245431Sdim // would confuse the logic below that expects proper IVs. 1602245431Sdim if (Value *V = Phi->hasConstantValue()) { 1603245431Sdim Phi->replaceAllUsesWith(V); 1604245431Sdim DeadInsts.push_back(Phi); 1605245431Sdim ++NumElim; 1606245431Sdim DEBUG_WITH_TYPE(DebugType, dbgs() 1607245431Sdim << "INDVARS: Eliminated constant iv: " << *Phi << '\n'); 1608245431Sdim continue; 1609245431Sdim } 1610245431Sdim 1611226890Sdim if (!SE.isSCEVable(Phi->getType())) 1612226890Sdim continue; 1613226890Sdim 1614226890Sdim PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)]; 1615226890Sdim if (!OrigPhiRef) { 1616226890Sdim OrigPhiRef = Phi; 1617252723Sdim if (Phi->getType()->isIntegerTy() && TTI 1618252723Sdim && TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) { 1619235633Sdim // This phi can be freely truncated to the narrowest phi type. Map the 1620235633Sdim // truncated expression to it so it will be reused for narrow types. 1621235633Sdim const SCEV *TruncExpr = 1622235633Sdim SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType()); 1623235633Sdim ExprToIVMap[TruncExpr] = Phi; 1624235633Sdim } 1625226890Sdim continue; 1626226890Sdim } 1627226890Sdim 1628235633Sdim // Replacing a pointer phi with an integer phi or vice-versa doesn't make 1629235633Sdim // sense. 1630235633Sdim if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy()) 1631226890Sdim continue; 1632226890Sdim 1633226890Sdim if (BasicBlock *LatchBlock = L->getLoopLatch()) { 1634226890Sdim Instruction *OrigInc = 1635226890Sdim cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock)); 1636226890Sdim Instruction *IsomorphicInc = 1637226890Sdim cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock)); 1638226890Sdim 1639235633Sdim // If this phi has the same width but is more canonical, replace the 1640235633Sdim // original with it. As part of the "more canonical" determination, 1641235633Sdim // respect a prior decision to use an IV chain. 1642235633Sdim if (OrigPhiRef->getType() == Phi->getType() 1643235633Sdim && !(ChainedPhis.count(Phi) 1644235633Sdim || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) 1645235633Sdim && (ChainedPhis.count(Phi) 1646235633Sdim || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) { 1647226890Sdim std::swap(OrigPhiRef, Phi); 1648226890Sdim std::swap(OrigInc, IsomorphicInc); 1649226890Sdim } 1650226890Sdim // Replacing the congruent phi is sufficient because acyclic redundancy 1651226890Sdim // elimination, CSE/GVN, should handle the rest. However, once SCEV proves 1652226890Sdim // that a phi is congruent, it's often the head of an IV user cycle that 1653235633Sdim // is isomorphic with the original phi. It's worth eagerly cleaning up the 1654235633Sdim // common case of a single IV increment so that DeleteDeadPHIs can remove 1655235633Sdim // cycles that had postinc uses. 1656235633Sdim const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc), 1657235633Sdim IsomorphicInc->getType()); 1658235633Sdim if (OrigInc != IsomorphicInc 1659235633Sdim && TruncExpr == SE.getSCEV(IsomorphicInc) 1660235633Sdim && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc)) 1661235633Sdim || hoistIVInc(OrigInc, IsomorphicInc))) { 1662226890Sdim DEBUG_WITH_TYPE(DebugType, dbgs() 1663226890Sdim << "INDVARS: Eliminated congruent iv.inc: " 1664226890Sdim << *IsomorphicInc << '\n'); 1665235633Sdim Value *NewInc = OrigInc; 1666235633Sdim if (OrigInc->getType() != IsomorphicInc->getType()) { 1667235633Sdim Instruction *IP = isa<PHINode>(OrigInc) 1668235633Sdim ? (Instruction*)L->getHeader()->getFirstInsertionPt() 1669235633Sdim : OrigInc->getNextNode(); 1670235633Sdim IRBuilder<> Builder(IP); 1671235633Sdim Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc()); 1672235633Sdim NewInc = Builder. 1673235633Sdim CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName); 1674235633Sdim } 1675235633Sdim IsomorphicInc->replaceAllUsesWith(NewInc); 1676226890Sdim DeadInsts.push_back(IsomorphicInc); 1677226890Sdim } 1678226890Sdim } 1679226890Sdim DEBUG_WITH_TYPE(DebugType, dbgs() 1680226890Sdim << "INDVARS: Eliminated congruent iv: " << *Phi << '\n'); 1681226890Sdim ++NumElim; 1682235633Sdim Value *NewIV = OrigPhiRef; 1683235633Sdim if (OrigPhiRef->getType() != Phi->getType()) { 1684235633Sdim IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt()); 1685235633Sdim Builder.SetCurrentDebugLocation(Phi->getDebugLoc()); 1686235633Sdim NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName); 1687235633Sdim } 1688235633Sdim Phi->replaceAllUsesWith(NewIV); 1689226890Sdim DeadInsts.push_back(Phi); 1690226890Sdim } 1691226890Sdim return NumElim; 1692226890Sdim} 1693245431Sdim 1694245431Sdimnamespace { 1695245431Sdim// Search for a SCEV subexpression that is not safe to expand. Any expression 1696245431Sdim// that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely 1697245431Sdim// UDiv expressions. We don't know if the UDiv is derived from an IR divide 1698245431Sdim// instruction, but the important thing is that we prove the denominator is 1699245431Sdim// nonzero before expansion. 1700245431Sdim// 1701245431Sdim// IVUsers already checks that IV-derived expressions are safe. So this check is 1702245431Sdim// only needed when the expression includes some subexpression that is not IV 1703245431Sdim// derived. 1704245431Sdim// 1705245431Sdim// Currently, we only allow division by a nonzero constant here. If this is 1706245431Sdim// inadequate, we could easily allow division by SCEVUnknown by using 1707245431Sdim// ValueTracking to check isKnownNonZero(). 1708263509Sdim// 1709263509Sdim// We cannot generally expand recurrences unless the step dominates the loop 1710263509Sdim// header. The expander handles the special case of affine recurrences by 1711263509Sdim// scaling the recurrence outside the loop, but this technique isn't generally 1712263509Sdim// applicable. Expanding a nested recurrence outside a loop requires computing 1713263509Sdim// binomial coefficients. This could be done, but the recurrence has to be in a 1714263509Sdim// perfectly reduced form, which can't be guaranteed. 1715245431Sdimstruct SCEVFindUnsafe { 1716263509Sdim ScalarEvolution &SE; 1717245431Sdim bool IsUnsafe; 1718245431Sdim 1719263509Sdim SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {} 1720245431Sdim 1721245431Sdim bool follow(const SCEV *S) { 1722263509Sdim if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 1723263509Sdim const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS()); 1724263509Sdim if (!SC || SC->getValue()->isZero()) { 1725263509Sdim IsUnsafe = true; 1726263509Sdim return false; 1727263509Sdim } 1728263509Sdim } 1729263509Sdim if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 1730263509Sdim const SCEV *Step = AR->getStepRecurrence(SE); 1731263509Sdim if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) { 1732263509Sdim IsUnsafe = true; 1733263509Sdim return false; 1734263509Sdim } 1735263509Sdim } 1736263509Sdim return true; 1737245431Sdim } 1738245431Sdim bool isDone() const { return IsUnsafe; } 1739245431Sdim}; 1740245431Sdim} 1741245431Sdim 1742245431Sdimnamespace llvm { 1743263509Sdimbool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) { 1744263509Sdim SCEVFindUnsafe Search(SE); 1745245431Sdim visitAll(S, Search); 1746245431Sdim return !Search.IsUnsafe; 1747245431Sdim} 1748245431Sdim} 1749