ScalarEvolutionExpander.cpp revision 239462
1193323Sed//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===// 2193323Sed// 3193323Sed// The LLVM Compiler Infrastructure 4193323Sed// 5193323Sed// This file is distributed under the University of Illinois Open Source 6193323Sed// License. See LICENSE.TXT for details. 7193323Sed// 8193323Sed//===----------------------------------------------------------------------===// 9193323Sed// 10193323Sed// This file contains the implementation of the scalar evolution expander, 11193323Sed// which is used to generate the code corresponding to a given scalar evolution 12193323Sed// expression. 13193323Sed// 14193323Sed//===----------------------------------------------------------------------===// 15193323Sed 16193323Sed#include "llvm/Analysis/ScalarEvolutionExpander.h" 17193323Sed#include "llvm/Analysis/LoopInfo.h" 18204792Srdivacky#include "llvm/IntrinsicInst.h" 19198090Srdivacky#include "llvm/LLVMContext.h" 20226633Sdim#include "llvm/Support/Debug.h" 21193323Sed#include "llvm/Target/TargetData.h" 22234353Sdim#include "llvm/Target/TargetLowering.h" 23194178Sed#include "llvm/ADT/STLExtras.h" 24224145Sdim 25193323Sedusing namespace llvm; 26193323Sed 27210299Sed/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP, 28210299Sed/// reusing an existing cast if a suitable one exists, moving an existing 29210299Sed/// cast if a suitable one exists but isn't in the right place, or 30210299Sed/// creating a new one. 31226633SdimValue *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty, 32210299Sed Instruction::CastOps Op, 33210299Sed BasicBlock::iterator IP) { 34234353Sdim // This function must be called with the builder having a valid insertion 35234353Sdim // point. It doesn't need to be the actual IP where the uses of the returned 36234353Sdim // cast will be added, but it must dominate such IP. 37234353Sdim // We use this precondition to produce a cast that will dominate all its 38234353Sdim // uses. In particular, this is crucial for the case where the builder's 39234353Sdim // insertion point *is* the point where we were asked to put the cast. 40239462Sdim // Since we don't know the builder's insertion point is actually 41234353Sdim // where the uses will be added (only that it dominates it), we are 42234353Sdim // not allowed to move it. 43234353Sdim BasicBlock::iterator BIP = Builder.GetInsertPoint(); 44234353Sdim 45234353Sdim Instruction *Ret = NULL; 46234353Sdim 47210299Sed // Check to see if there is already a cast! 48210299Sed for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); 49210299Sed UI != E; ++UI) { 50210299Sed User *U = *UI; 51210299Sed if (U->getType() == Ty) 52210299Sed if (CastInst *CI = dyn_cast<CastInst>(U)) 53210299Sed if (CI->getOpcode() == Op) { 54234353Sdim // If the cast isn't where we want it, create a new cast at IP. 55234353Sdim // Likewise, do not reuse a cast at BIP because it must dominate 56234353Sdim // instructions that might be inserted before BIP. 57234353Sdim if (BasicBlock::iterator(CI) != IP || BIP == IP) { 58210299Sed // Create a new cast, and leave the old cast in place in case 59210299Sed // it is being used as an insert point. Clear its operand 60210299Sed // so that it doesn't hold anything live. 61234353Sdim Ret = CastInst::Create(Op, V, Ty, "", IP); 62234353Sdim Ret->takeName(CI); 63234353Sdim CI->replaceAllUsesWith(Ret); 64210299Sed CI->setOperand(0, UndefValue::get(V->getType())); 65234353Sdim break; 66210299Sed } 67234353Sdim Ret = CI; 68234353Sdim break; 69210299Sed } 70210299Sed } 71210299Sed 72210299Sed // Create a new cast. 73234353Sdim if (!Ret) 74234353Sdim Ret = CastInst::Create(Op, V, Ty, V->getName(), IP); 75234353Sdim 76234353Sdim // We assert at the end of the function since IP might point to an 77234353Sdim // instruction with different dominance properties than a cast 78234353Sdim // (an invoke for example) and not dominate BIP (but the cast does). 79234353Sdim assert(SE.DT->dominates(Ret, BIP)); 80234353Sdim 81234353Sdim rememberInstruction(Ret); 82234353Sdim return Ret; 83210299Sed} 84210299Sed 85195340Sed/// InsertNoopCastOfTo - Insert a cast of V to the specified type, 86195340Sed/// which must be possible with a noop cast, doing what we can to share 87195340Sed/// the casts. 88226633SdimValue *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) { 89195340Sed Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false); 90195340Sed assert((Op == Instruction::BitCast || 91195340Sed Op == Instruction::PtrToInt || 92195340Sed Op == Instruction::IntToPtr) && 93195340Sed "InsertNoopCastOfTo cannot perform non-noop casts!"); 94195340Sed assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) && 95195340Sed "InsertNoopCastOfTo cannot change sizes!"); 96195340Sed 97193323Sed // Short-circuit unnecessary bitcasts. 98234353Sdim if (Op == Instruction::BitCast) { 99234353Sdim if (V->getType() == Ty) 100234353Sdim return V; 101234353Sdim if (CastInst *CI = dyn_cast<CastInst>(V)) { 102234353Sdim if (CI->getOperand(0)->getType() == Ty) 103234353Sdim return CI->getOperand(0); 104234353Sdim } 105234353Sdim } 106193323Sed // Short-circuit unnecessary inttoptr<->ptrtoint casts. 107195340Sed if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) && 108193323Sed SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { 109193323Sed if (CastInst *CI = dyn_cast<CastInst>(V)) 110193323Sed if ((CI->getOpcode() == Instruction::PtrToInt || 111193323Sed CI->getOpcode() == Instruction::IntToPtr) && 112193323Sed SE.getTypeSizeInBits(CI->getType()) == 113193323Sed SE.getTypeSizeInBits(CI->getOperand(0)->getType())) 114193323Sed return CI->getOperand(0); 115193323Sed if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 116193323Sed if ((CE->getOpcode() == Instruction::PtrToInt || 117193323Sed CE->getOpcode() == Instruction::IntToPtr) && 118193323Sed SE.getTypeSizeInBits(CE->getType()) == 119193323Sed SE.getTypeSizeInBits(CE->getOperand(0)->getType())) 120193323Sed return CE->getOperand(0); 121193323Sed } 122193323Sed 123210299Sed // Fold a cast of a constant. 124193323Sed if (Constant *C = dyn_cast<Constant>(V)) 125195340Sed return ConstantExpr::getCast(Op, C, Ty); 126198090Srdivacky 127210299Sed // Cast the argument at the beginning of the entry block, after 128210299Sed // any bitcasts of other arguments. 129193323Sed if (Argument *A = dyn_cast<Argument>(V)) { 130210299Sed BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin(); 131210299Sed while ((isa<BitCastInst>(IP) && 132210299Sed isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) && 133210299Sed cast<BitCastInst>(IP)->getOperand(0) != A) || 134226633Sdim isa<DbgInfoIntrinsic>(IP) || 135226633Sdim isa<LandingPadInst>(IP)) 136210299Sed ++IP; 137210299Sed return ReuseOrCreateCast(A, Ty, Op, IP); 138193323Sed } 139193323Sed 140210299Sed // Cast the instruction immediately after the instruction. 141193323Sed Instruction *I = cast<Instruction>(V); 142193323Sed BasicBlock::iterator IP = I; ++IP; 143193323Sed if (InvokeInst *II = dyn_cast<InvokeInst>(I)) 144193323Sed IP = II->getNormalDest()->begin(); 145234353Sdim while (isa<PHINode>(IP) || isa<LandingPadInst>(IP)) 146226633Sdim ++IP; 147210299Sed return ReuseOrCreateCast(I, Ty, Op, IP); 148193323Sed} 149193323Sed 150193323Sed/// InsertBinop - Insert the specified binary operator, doing a small amount 151193323Sed/// of work to avoid inserting an obviously redundant operation. 152195340SedValue *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, 153195340Sed Value *LHS, Value *RHS) { 154193323Sed // Fold a binop with constant operands. 155193323Sed if (Constant *CLHS = dyn_cast<Constant>(LHS)) 156193323Sed if (Constant *CRHS = dyn_cast<Constant>(RHS)) 157193323Sed return ConstantExpr::get(Opcode, CLHS, CRHS); 158193323Sed 159193323Sed // Do a quick scan to see if we have this binop nearby. If so, reuse it. 160193323Sed unsigned ScanLimit = 6; 161195340Sed BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 162195340Sed // Scanning starts from the last instruction before the insertion point. 163195340Sed BasicBlock::iterator IP = Builder.GetInsertPoint(); 164195340Sed if (IP != BlockBegin) { 165193323Sed --IP; 166193323Sed for (; ScanLimit; --IP, --ScanLimit) { 167204792Srdivacky // Don't count dbg.value against the ScanLimit, to avoid perturbing the 168204792Srdivacky // generated code. 169204792Srdivacky if (isa<DbgInfoIntrinsic>(IP)) 170204792Srdivacky ScanLimit++; 171193323Sed if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && 172193323Sed IP->getOperand(1) == RHS) 173193323Sed return IP; 174193323Sed if (IP == BlockBegin) break; 175193323Sed } 176193323Sed } 177195340Sed 178204642Srdivacky // Save the original insertion point so we can restore it when we're done. 179204642Srdivacky BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 180204642Srdivacky BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 181204642Srdivacky 182204642Srdivacky // Move the insertion point out of as many loops as we can. 183204642Srdivacky while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 184204642Srdivacky if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break; 185204642Srdivacky BasicBlock *Preheader = L->getLoopPreheader(); 186204642Srdivacky if (!Preheader) break; 187204642Srdivacky 188204642Srdivacky // Ok, move up a level. 189204642Srdivacky Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 190204642Srdivacky } 191204642Srdivacky 192193323Sed // If we haven't found this binop, insert it. 193226633Sdim Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS)); 194224145Sdim BO->setDebugLoc(SaveInsertPt->getDebugLoc()); 195202878Srdivacky rememberInstruction(BO); 196204642Srdivacky 197204642Srdivacky // Restore the original insert point. 198204642Srdivacky if (SaveInsertBB) 199204642Srdivacky restoreInsertPoint(SaveInsertBB, SaveInsertPt); 200204642Srdivacky 201193323Sed return BO; 202193323Sed} 203193323Sed 204193323Sed/// FactorOutConstant - Test if S is divisible by Factor, using signed 205193323Sed/// division. If so, update S with Factor divided out and return true. 206204642Srdivacky/// S need not be evenly divisible if a reasonable remainder can be 207193323Sed/// computed. 208193323Sed/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made 209193323Sed/// unnecessary; in its place, just signed-divide Ops[i] by the scale and 210193323Sed/// check to see if the divide was folded. 211198090Srdivackystatic bool FactorOutConstant(const SCEV *&S, 212198090Srdivacky const SCEV *&Remainder, 213198090Srdivacky const SCEV *Factor, 214198090Srdivacky ScalarEvolution &SE, 215198090Srdivacky const TargetData *TD) { 216193323Sed // Everything is divisible by one. 217198090Srdivacky if (Factor->isOne()) 218193323Sed return true; 219193323Sed 220198090Srdivacky // x/x == 1. 221198090Srdivacky if (S == Factor) { 222207618Srdivacky S = SE.getConstant(S->getType(), 1); 223198090Srdivacky return true; 224198090Srdivacky } 225198090Srdivacky 226193323Sed // For a Constant, check for a multiple of the given factor. 227193323Sed if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 228198090Srdivacky // 0/x == 0. 229198090Srdivacky if (C->isZero()) 230193323Sed return true; 231198090Srdivacky // Check for divisibility. 232198090Srdivacky if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) { 233198090Srdivacky ConstantInt *CI = 234198090Srdivacky ConstantInt::get(SE.getContext(), 235198090Srdivacky C->getValue()->getValue().sdiv( 236198090Srdivacky FC->getValue()->getValue())); 237198090Srdivacky // If the quotient is zero and the remainder is non-zero, reject 238198090Srdivacky // the value at this scale. It will be considered for subsequent 239198090Srdivacky // smaller scales. 240198090Srdivacky if (!CI->isZero()) { 241198090Srdivacky const SCEV *Div = SE.getConstant(CI); 242198090Srdivacky S = Div; 243198090Srdivacky Remainder = 244198090Srdivacky SE.getAddExpr(Remainder, 245198090Srdivacky SE.getConstant(C->getValue()->getValue().srem( 246198090Srdivacky FC->getValue()->getValue()))); 247198090Srdivacky return true; 248198090Srdivacky } 249193323Sed } 250193323Sed } 251193323Sed 252193323Sed // In a Mul, check if there is a constant operand which is a multiple 253193323Sed // of the given factor. 254198090Srdivacky if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 255198090Srdivacky if (TD) { 256198090Srdivacky // With TargetData, the size is known. Check if there is a constant 257198090Srdivacky // operand which is a multiple of the given factor. If so, we can 258198090Srdivacky // factor it. 259198090Srdivacky const SCEVConstant *FC = cast<SCEVConstant>(Factor); 260198090Srdivacky if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0))) 261198090Srdivacky if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) { 262205407Srdivacky SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 263198090Srdivacky NewMulOps[0] = 264198090Srdivacky SE.getConstant(C->getValue()->getValue().sdiv( 265198090Srdivacky FC->getValue()->getValue())); 266198090Srdivacky S = SE.getMulExpr(NewMulOps); 267198090Srdivacky return true; 268198090Srdivacky } 269198090Srdivacky } else { 270198090Srdivacky // Without TargetData, check if Factor can be factored out of any of the 271198090Srdivacky // Mul's operands. If so, we can just remove it. 272198090Srdivacky for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 273198090Srdivacky const SCEV *SOp = M->getOperand(i); 274207618Srdivacky const SCEV *Remainder = SE.getConstant(SOp->getType(), 0); 275198090Srdivacky if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) && 276198090Srdivacky Remainder->isZero()) { 277205407Srdivacky SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 278198090Srdivacky NewMulOps[i] = SOp; 279198090Srdivacky S = SE.getMulExpr(NewMulOps); 280198090Srdivacky return true; 281198090Srdivacky } 282193323Sed } 283198090Srdivacky } 284198090Srdivacky } 285193323Sed 286193323Sed // In an AddRec, check if both start and step are divisible. 287193323Sed if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 288198090Srdivacky const SCEV *Step = A->getStepRecurrence(SE); 289207618Srdivacky const SCEV *StepRem = SE.getConstant(Step->getType(), 0); 290198090Srdivacky if (!FactorOutConstant(Step, StepRem, Factor, SE, TD)) 291193323Sed return false; 292193323Sed if (!StepRem->isZero()) 293193323Sed return false; 294198090Srdivacky const SCEV *Start = A->getStart(); 295198090Srdivacky if (!FactorOutConstant(Start, Remainder, Factor, SE, TD)) 296193323Sed return false; 297221345Sdim // FIXME: can use A->getNoWrapFlags(FlagNW) 298221345Sdim S = SE.getAddRecExpr(Start, Step, A->getLoop(), SCEV::FlagAnyWrap); 299193323Sed return true; 300193323Sed } 301193323Sed 302193323Sed return false; 303193323Sed} 304193323Sed 305198090Srdivacky/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs 306198090Srdivacky/// is the number of SCEVAddRecExprs present, which are kept at the end of 307198090Srdivacky/// the list. 308193323Sed/// 309198090Srdivackystatic void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops, 310226633Sdim Type *Ty, 311198090Srdivacky ScalarEvolution &SE) { 312198090Srdivacky unsigned NumAddRecs = 0; 313198090Srdivacky for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i) 314198090Srdivacky ++NumAddRecs; 315198090Srdivacky // Group Ops into non-addrecs and addrecs. 316198090Srdivacky SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs); 317198090Srdivacky SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end()); 318198090Srdivacky // Let ScalarEvolution sort and simplify the non-addrecs list. 319198090Srdivacky const SCEV *Sum = NoAddRecs.empty() ? 320207618Srdivacky SE.getConstant(Ty, 0) : 321198090Srdivacky SE.getAddExpr(NoAddRecs); 322198090Srdivacky // If it returned an add, use the operands. Otherwise it simplified 323198090Srdivacky // the sum into a single value, so just use that. 324205407Srdivacky Ops.clear(); 325198090Srdivacky if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum)) 326210299Sed Ops.append(Add->op_begin(), Add->op_end()); 327205407Srdivacky else if (!Sum->isZero()) 328205407Srdivacky Ops.push_back(Sum); 329198090Srdivacky // Then append the addrecs. 330210299Sed Ops.append(AddRecs.begin(), AddRecs.end()); 331198090Srdivacky} 332198090Srdivacky 333198090Srdivacky/// SplitAddRecs - Flatten a list of add operands, moving addrec start values 334198090Srdivacky/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}. 335198090Srdivacky/// This helps expose more opportunities for folding parts of the expressions 336198090Srdivacky/// into GEP indices. 337198090Srdivacky/// 338198090Srdivackystatic void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops, 339226633Sdim Type *Ty, 340198090Srdivacky ScalarEvolution &SE) { 341198090Srdivacky // Find the addrecs. 342198090Srdivacky SmallVector<const SCEV *, 8> AddRecs; 343198090Srdivacky for (unsigned i = 0, e = Ops.size(); i != e; ++i) 344198090Srdivacky while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) { 345198090Srdivacky const SCEV *Start = A->getStart(); 346198090Srdivacky if (Start->isZero()) break; 347207618Srdivacky const SCEV *Zero = SE.getConstant(Ty, 0); 348198090Srdivacky AddRecs.push_back(SE.getAddRecExpr(Zero, 349198090Srdivacky A->getStepRecurrence(SE), 350221345Sdim A->getLoop(), 351221345Sdim // FIXME: A->getNoWrapFlags(FlagNW) 352221345Sdim SCEV::FlagAnyWrap)); 353198090Srdivacky if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) { 354198090Srdivacky Ops[i] = Zero; 355210299Sed Ops.append(Add->op_begin(), Add->op_end()); 356198090Srdivacky e += Add->getNumOperands(); 357198090Srdivacky } else { 358198090Srdivacky Ops[i] = Start; 359198090Srdivacky } 360198090Srdivacky } 361198090Srdivacky if (!AddRecs.empty()) { 362198090Srdivacky // Add the addrecs onto the end of the list. 363210299Sed Ops.append(AddRecs.begin(), AddRecs.end()); 364198090Srdivacky // Resort the operand list, moving any constants to the front. 365198090Srdivacky SimplifyAddOperands(Ops, Ty, SE); 366198090Srdivacky } 367198090Srdivacky} 368198090Srdivacky 369198090Srdivacky/// expandAddToGEP - Expand an addition expression with a pointer type into 370198090Srdivacky/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps 371198090Srdivacky/// BasicAliasAnalysis and other passes analyze the result. See the rules 372198090Srdivacky/// for getelementptr vs. inttoptr in 373198090Srdivacky/// http://llvm.org/docs/LangRef.html#pointeraliasing 374198090Srdivacky/// for details. 375198090Srdivacky/// 376202878Srdivacky/// Design note: The correctness of using getelementptr here depends on 377198090Srdivacky/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as 378198090Srdivacky/// they may introduce pointer arithmetic which may not be safely converted 379198090Srdivacky/// into getelementptr. 380198090Srdivacky/// 381193323Sed/// Design note: It might seem desirable for this function to be more 382193323Sed/// loop-aware. If some of the indices are loop-invariant while others 383193323Sed/// aren't, it might seem desirable to emit multiple GEPs, keeping the 384193323Sed/// loop-invariant portions of the overall computation outside the loop. 385193323Sed/// However, there are a few reasons this is not done here. Hoisting simple 386193323Sed/// arithmetic is a low-level optimization that often isn't very 387193323Sed/// important until late in the optimization process. In fact, passes 388193323Sed/// like InstructionCombining will combine GEPs, even if it means 389193323Sed/// pushing loop-invariant computation down into loops, so even if the 390193323Sed/// GEPs were split here, the work would quickly be undone. The 391193323Sed/// LoopStrengthReduction pass, which is usually run quite late (and 392193323Sed/// after the last InstructionCombining pass), takes care of hoisting 393193323Sed/// loop-invariant portions of expressions, after considering what 394193323Sed/// can be folded using target addressing modes. 395193323Sed/// 396198090SrdivackyValue *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, 397198090Srdivacky const SCEV *const *op_end, 398226633Sdim PointerType *PTy, 399226633Sdim Type *Ty, 400193323Sed Value *V) { 401226633Sdim Type *ElTy = PTy->getElementType(); 402193323Sed SmallVector<Value *, 4> GepIndices; 403198090Srdivacky SmallVector<const SCEV *, 8> Ops(op_begin, op_end); 404193323Sed bool AnyNonZeroIndices = false; 405193323Sed 406198090Srdivacky // Split AddRecs up into parts as either of the parts may be usable 407198090Srdivacky // without the other. 408198090Srdivacky SplitAddRecs(Ops, Ty, SE); 409198090Srdivacky 410200581Srdivacky // Descend down the pointer's type and attempt to convert the other 411193323Sed // operands into GEP indices, at each level. The first index in a GEP 412193323Sed // indexes into the array implied by the pointer operand; the rest of 413193323Sed // the indices index into the element or field type selected by the 414193323Sed // preceding index. 415193323Sed for (;;) { 416198090Srdivacky // If the scale size is not 0, attempt to factor out a scale for 417198090Srdivacky // array indexing. 418198090Srdivacky SmallVector<const SCEV *, 8> ScaledOps; 419203954Srdivacky if (ElTy->isSized()) { 420203954Srdivacky const SCEV *ElSize = SE.getSizeOfExpr(ElTy); 421203954Srdivacky if (!ElSize->isZero()) { 422203954Srdivacky SmallVector<const SCEV *, 8> NewOps; 423203954Srdivacky for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 424203954Srdivacky const SCEV *Op = Ops[i]; 425207618Srdivacky const SCEV *Remainder = SE.getConstant(Ty, 0); 426203954Srdivacky if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) { 427203954Srdivacky // Op now has ElSize factored out. 428203954Srdivacky ScaledOps.push_back(Op); 429203954Srdivacky if (!Remainder->isZero()) 430203954Srdivacky NewOps.push_back(Remainder); 431203954Srdivacky AnyNonZeroIndices = true; 432203954Srdivacky } else { 433203954Srdivacky // The operand was not divisible, so add it to the list of operands 434203954Srdivacky // we'll scan next iteration. 435203954Srdivacky NewOps.push_back(Ops[i]); 436203954Srdivacky } 437193323Sed } 438203954Srdivacky // If we made any changes, update Ops. 439203954Srdivacky if (!ScaledOps.empty()) { 440203954Srdivacky Ops = NewOps; 441203954Srdivacky SimplifyAddOperands(Ops, Ty, SE); 442203954Srdivacky } 443193323Sed } 444193323Sed } 445198090Srdivacky 446198090Srdivacky // Record the scaled array index for this level of the type. If 447198090Srdivacky // we didn't find any operands that could be factored, tentatively 448198090Srdivacky // assume that element zero was selected (since the zero offset 449198090Srdivacky // would obviously be folded away). 450193323Sed Value *Scaled = ScaledOps.empty() ? 451193323Sed Constant::getNullValue(Ty) : 452193323Sed expandCodeFor(SE.getAddExpr(ScaledOps), Ty); 453193323Sed GepIndices.push_back(Scaled); 454193323Sed 455193323Sed // Collect struct field index operands. 456226633Sdim while (StructType *STy = dyn_cast<StructType>(ElTy)) { 457198090Srdivacky bool FoundFieldNo = false; 458198090Srdivacky // An empty struct has no fields. 459198090Srdivacky if (STy->getNumElements() == 0) break; 460198090Srdivacky if (SE.TD) { 461198090Srdivacky // With TargetData, field offsets are known. See if a constant offset 462198090Srdivacky // falls within any of the struct fields. 463198090Srdivacky if (Ops.empty()) break; 464193323Sed if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) 465193323Sed if (SE.getTypeSizeInBits(C->getType()) <= 64) { 466193323Sed const StructLayout &SL = *SE.TD->getStructLayout(STy); 467193323Sed uint64_t FullOffset = C->getValue()->getZExtValue(); 468193323Sed if (FullOffset < SL.getSizeInBytes()) { 469193323Sed unsigned ElIdx = SL.getElementContainingOffset(FullOffset); 470198090Srdivacky GepIndices.push_back( 471198090Srdivacky ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx)); 472193323Sed ElTy = STy->getTypeAtIndex(ElIdx); 473193323Sed Ops[0] = 474194612Sed SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx)); 475193323Sed AnyNonZeroIndices = true; 476198090Srdivacky FoundFieldNo = true; 477193323Sed } 478193323Sed } 479198090Srdivacky } else { 480203954Srdivacky // Without TargetData, just check for an offsetof expression of the 481198090Srdivacky // appropriate struct type. 482198090Srdivacky for (unsigned i = 0, e = Ops.size(); i != e; ++i) 483203954Srdivacky if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) { 484226633Sdim Type *CTy; 485203954Srdivacky Constant *FieldNo; 486203954Srdivacky if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) { 487203954Srdivacky GepIndices.push_back(FieldNo); 488203954Srdivacky ElTy = 489203954Srdivacky STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue()); 490198090Srdivacky Ops[i] = SE.getConstant(Ty, 0); 491198090Srdivacky AnyNonZeroIndices = true; 492198090Srdivacky FoundFieldNo = true; 493198090Srdivacky break; 494198090Srdivacky } 495203954Srdivacky } 496193323Sed } 497198090Srdivacky // If no struct field offsets were found, tentatively assume that 498198090Srdivacky // field zero was selected (since the zero offset would obviously 499198090Srdivacky // be folded away). 500198090Srdivacky if (!FoundFieldNo) { 501198090Srdivacky ElTy = STy->getTypeAtIndex(0u); 502198090Srdivacky GepIndices.push_back( 503198090Srdivacky Constant::getNullValue(Type::getInt32Ty(Ty->getContext()))); 504198090Srdivacky } 505198090Srdivacky } 506193323Sed 507226633Sdim if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) 508193323Sed ElTy = ATy->getElementType(); 509198090Srdivacky else 510198090Srdivacky break; 511193323Sed } 512193323Sed 513204642Srdivacky // If none of the operands were convertible to proper GEP indices, cast 514193323Sed // the base to i8* and do an ugly getelementptr with that. It's still 515193323Sed // better than ptrtoint+arithmetic+inttoptr at least. 516193323Sed if (!AnyNonZeroIndices) { 517198090Srdivacky // Cast the base to i8*. 518193323Sed V = InsertNoopCastOfTo(V, 519198090Srdivacky Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace())); 520198090Srdivacky 521234353Sdim assert(!isa<Instruction>(V) || 522234353Sdim SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint())); 523234353Sdim 524198090Srdivacky // Expand the operands for a plain byte offset. 525194178Sed Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); 526193323Sed 527193323Sed // Fold a GEP with constant operands. 528193323Sed if (Constant *CLHS = dyn_cast<Constant>(V)) 529193323Sed if (Constant *CRHS = dyn_cast<Constant>(Idx)) 530226633Sdim return ConstantExpr::getGetElementPtr(CLHS, CRHS); 531193323Sed 532193323Sed // Do a quick scan to see if we have this GEP nearby. If so, reuse it. 533193323Sed unsigned ScanLimit = 6; 534195340Sed BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 535195340Sed // Scanning starts from the last instruction before the insertion point. 536195340Sed BasicBlock::iterator IP = Builder.GetInsertPoint(); 537195340Sed if (IP != BlockBegin) { 538193323Sed --IP; 539193323Sed for (; ScanLimit; --IP, --ScanLimit) { 540204792Srdivacky // Don't count dbg.value against the ScanLimit, to avoid perturbing the 541204792Srdivacky // generated code. 542204792Srdivacky if (isa<DbgInfoIntrinsic>(IP)) 543204792Srdivacky ScanLimit++; 544193323Sed if (IP->getOpcode() == Instruction::GetElementPtr && 545193323Sed IP->getOperand(0) == V && IP->getOperand(1) == Idx) 546193323Sed return IP; 547193323Sed if (IP == BlockBegin) break; 548193323Sed } 549193323Sed } 550193323Sed 551204642Srdivacky // Save the original insertion point so we can restore it when we're done. 552204642Srdivacky BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 553204642Srdivacky BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 554204642Srdivacky 555204642Srdivacky // Move the insertion point out of as many loops as we can. 556204642Srdivacky while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 557204642Srdivacky if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break; 558204642Srdivacky BasicBlock *Preheader = L->getLoopPreheader(); 559204642Srdivacky if (!Preheader) break; 560204642Srdivacky 561204642Srdivacky // Ok, move up a level. 562204642Srdivacky Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 563204642Srdivacky } 564204642Srdivacky 565198090Srdivacky // Emit a GEP. 566198090Srdivacky Value *GEP = Builder.CreateGEP(V, Idx, "uglygep"); 567202878Srdivacky rememberInstruction(GEP); 568204642Srdivacky 569204642Srdivacky // Restore the original insert point. 570204642Srdivacky if (SaveInsertBB) 571204642Srdivacky restoreInsertPoint(SaveInsertBB, SaveInsertPt); 572204642Srdivacky 573193323Sed return GEP; 574193323Sed } 575193323Sed 576204642Srdivacky // Save the original insertion point so we can restore it when we're done. 577204642Srdivacky BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 578204642Srdivacky BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 579204642Srdivacky 580204642Srdivacky // Move the insertion point out of as many loops as we can. 581204642Srdivacky while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 582204642Srdivacky if (!L->isLoopInvariant(V)) break; 583204642Srdivacky 584204642Srdivacky bool AnyIndexNotLoopInvariant = false; 585204642Srdivacky for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(), 586204642Srdivacky E = GepIndices.end(); I != E; ++I) 587204642Srdivacky if (!L->isLoopInvariant(*I)) { 588204642Srdivacky AnyIndexNotLoopInvariant = true; 589204642Srdivacky break; 590204642Srdivacky } 591204642Srdivacky if (AnyIndexNotLoopInvariant) 592204642Srdivacky break; 593204642Srdivacky 594204642Srdivacky BasicBlock *Preheader = L->getLoopPreheader(); 595204642Srdivacky if (!Preheader) break; 596204642Srdivacky 597204642Srdivacky // Ok, move up a level. 598204642Srdivacky Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 599204642Srdivacky } 600204642Srdivacky 601198090Srdivacky // Insert a pretty getelementptr. Note that this GEP is not marked inbounds, 602198090Srdivacky // because ScalarEvolution may have changed the address arithmetic to 603198090Srdivacky // compute a value which is beyond the end of the allocated object. 604202878Srdivacky Value *Casted = V; 605202878Srdivacky if (V->getType() != PTy) 606202878Srdivacky Casted = InsertNoopCastOfTo(Casted, PTy); 607202878Srdivacky Value *GEP = Builder.CreateGEP(Casted, 608226633Sdim GepIndices, 609195340Sed "scevgep"); 610193323Sed Ops.push_back(SE.getUnknown(GEP)); 611202878Srdivacky rememberInstruction(GEP); 612204642Srdivacky 613204642Srdivacky // Restore the original insert point. 614204642Srdivacky if (SaveInsertBB) 615204642Srdivacky restoreInsertPoint(SaveInsertBB, SaveInsertPt); 616204642Srdivacky 617193323Sed return expand(SE.getAddExpr(Ops)); 618193323Sed} 619193323Sed 620204642Srdivacky/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for 621204642Srdivacky/// SCEV expansion. If they are nested, this is the most nested. If they are 622204642Srdivacky/// neighboring, pick the later. 623204642Srdivackystatic const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B, 624204642Srdivacky DominatorTree &DT) { 625204642Srdivacky if (!A) return B; 626204642Srdivacky if (!B) return A; 627204642Srdivacky if (A->contains(B)) return B; 628204642Srdivacky if (B->contains(A)) return A; 629204642Srdivacky if (DT.dominates(A->getHeader(), B->getHeader())) return B; 630204642Srdivacky if (DT.dominates(B->getHeader(), A->getHeader())) return A; 631204642Srdivacky return A; // Arbitrarily break the tie. 632204642Srdivacky} 633193323Sed 634218893Sdim/// getRelevantLoop - Get the most relevant loop associated with the given 635204642Srdivacky/// expression, according to PickMostRelevantLoop. 636218893Sdimconst Loop *SCEVExpander::getRelevantLoop(const SCEV *S) { 637218893Sdim // Test whether we've already computed the most relevant loop for this SCEV. 638218893Sdim std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair = 639218893Sdim RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0))); 640218893Sdim if (!Pair.second) 641218893Sdim return Pair.first->second; 642218893Sdim 643204642Srdivacky if (isa<SCEVConstant>(S)) 644218893Sdim // A constant has no relevant loops. 645204642Srdivacky return 0; 646204642Srdivacky if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 647204642Srdivacky if (const Instruction *I = dyn_cast<Instruction>(U->getValue())) 648218893Sdim return Pair.first->second = SE.LI->getLoopFor(I->getParent()); 649218893Sdim // A non-instruction has no relevant loops. 650204642Srdivacky return 0; 651204642Srdivacky } 652204642Srdivacky if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) { 653204642Srdivacky const Loop *L = 0; 654204642Srdivacky if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 655204642Srdivacky L = AR->getLoop(); 656204642Srdivacky for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end(); 657204642Srdivacky I != E; ++I) 658218893Sdim L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT); 659218893Sdim return RelevantLoops[N] = L; 660204642Srdivacky } 661218893Sdim if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) { 662218893Sdim const Loop *Result = getRelevantLoop(C->getOperand()); 663218893Sdim return RelevantLoops[C] = Result; 664218893Sdim } 665218893Sdim if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 666218893Sdim const Loop *Result = 667218893Sdim PickMostRelevantLoop(getRelevantLoop(D->getLHS()), 668218893Sdim getRelevantLoop(D->getRHS()), 669218893Sdim *SE.DT); 670218893Sdim return RelevantLoops[D] = Result; 671218893Sdim } 672204642Srdivacky llvm_unreachable("Unexpected SCEV type!"); 673204642Srdivacky} 674198090Srdivacky 675207618Srdivackynamespace { 676207618Srdivacky 677204642Srdivacky/// LoopCompare - Compare loops by PickMostRelevantLoop. 678204642Srdivackyclass LoopCompare { 679204642Srdivacky DominatorTree &DT; 680204642Srdivackypublic: 681204642Srdivacky explicit LoopCompare(DominatorTree &dt) : DT(dt) {} 682198090Srdivacky 683204642Srdivacky bool operator()(std::pair<const Loop *, const SCEV *> LHS, 684204642Srdivacky std::pair<const Loop *, const SCEV *> RHS) const { 685212904Sdim // Keep pointer operands sorted at the end. 686212904Sdim if (LHS.second->getType()->isPointerTy() != 687212904Sdim RHS.second->getType()->isPointerTy()) 688212904Sdim return LHS.second->getType()->isPointerTy(); 689212904Sdim 690204642Srdivacky // Compare loops with PickMostRelevantLoop. 691204642Srdivacky if (LHS.first != RHS.first) 692204642Srdivacky return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first; 693204642Srdivacky 694204642Srdivacky // If one operand is a non-constant negative and the other is not, 695204642Srdivacky // put the non-constant negative on the right so that a sub can 696204642Srdivacky // be used instead of a negate and add. 697234353Sdim if (LHS.second->isNonConstantNegative()) { 698234353Sdim if (!RHS.second->isNonConstantNegative()) 699204642Srdivacky return false; 700234353Sdim } else if (RHS.second->isNonConstantNegative()) 701204642Srdivacky return true; 702204642Srdivacky 703204642Srdivacky // Otherwise they are equivalent according to this comparison. 704204642Srdivacky return false; 705198090Srdivacky } 706204642Srdivacky}; 707193323Sed 708207618Srdivacky} 709207618Srdivacky 710204642SrdivackyValue *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { 711226633Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 712193323Sed 713204642Srdivacky // Collect all the add operands in a loop, along with their associated loops. 714204642Srdivacky // Iterate in reverse so that constants are emitted last, all else equal, and 715204642Srdivacky // so that pointer operands are inserted first, which the code below relies on 716204642Srdivacky // to form more involved GEPs. 717204642Srdivacky SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 718204642Srdivacky for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()), 719204642Srdivacky E(S->op_begin()); I != E; ++I) 720218893Sdim OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 721204642Srdivacky 722204642Srdivacky // Sort by loop. Use a stable sort so that constants follow non-constants and 723204642Srdivacky // pointer operands precede non-pointer operands. 724204642Srdivacky std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 725204642Srdivacky 726204642Srdivacky // Emit instructions to add all the operands. Hoist as much as possible 727204642Srdivacky // out of loops, and form meaningful getelementptrs where possible. 728204642Srdivacky Value *Sum = 0; 729204642Srdivacky for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 730204642Srdivacky I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 731204642Srdivacky const Loop *CurLoop = I->first; 732204642Srdivacky const SCEV *Op = I->second; 733204642Srdivacky if (!Sum) { 734204642Srdivacky // This is the first operand. Just expand it. 735204642Srdivacky Sum = expand(Op); 736204642Srdivacky ++I; 737226633Sdim } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) { 738204642Srdivacky // The running sum expression is a pointer. Try to form a getelementptr 739204642Srdivacky // at this level with that as the base. 740204642Srdivacky SmallVector<const SCEV *, 4> NewOps; 741212904Sdim for (; I != E && I->first == CurLoop; ++I) { 742212904Sdim // If the operand is SCEVUnknown and not instructions, peek through 743212904Sdim // it, to enable more of it to be folded into the GEP. 744212904Sdim const SCEV *X = I->second; 745212904Sdim if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X)) 746212904Sdim if (!isa<Instruction>(U->getValue())) 747212904Sdim X = SE.getSCEV(U->getValue()); 748212904Sdim NewOps.push_back(X); 749212904Sdim } 750204642Srdivacky Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum); 751226633Sdim } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) { 752204642Srdivacky // The running sum is an integer, and there's a pointer at this level. 753207618Srdivacky // Try to form a getelementptr. If the running sum is instructions, 754207618Srdivacky // use a SCEVUnknown to avoid re-analyzing them. 755204642Srdivacky SmallVector<const SCEV *, 4> NewOps; 756207618Srdivacky NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) : 757207618Srdivacky SE.getSCEV(Sum)); 758204642Srdivacky for (++I; I != E && I->first == CurLoop; ++I) 759204642Srdivacky NewOps.push_back(I->second); 760204642Srdivacky Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op)); 761234353Sdim } else if (Op->isNonConstantNegative()) { 762204642Srdivacky // Instead of doing a negate and add, just do a subtract. 763202878Srdivacky Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty); 764204642Srdivacky Sum = InsertNoopCastOfTo(Sum, Ty); 765204642Srdivacky Sum = InsertBinop(Instruction::Sub, Sum, W); 766204642Srdivacky ++I; 767202878Srdivacky } else { 768204642Srdivacky // A simple add. 769202878Srdivacky Value *W = expandCodeFor(Op, Ty); 770204642Srdivacky Sum = InsertNoopCastOfTo(Sum, Ty); 771204642Srdivacky // Canonicalize a constant to the RHS. 772204642Srdivacky if (isa<Constant>(Sum)) std::swap(Sum, W); 773204642Srdivacky Sum = InsertBinop(Instruction::Add, Sum, W); 774204642Srdivacky ++I; 775202878Srdivacky } 776193323Sed } 777204642Srdivacky 778204642Srdivacky return Sum; 779193323Sed} 780193323Sed 781193323SedValue *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { 782226633Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 783193323Sed 784204642Srdivacky // Collect all the mul operands in a loop, along with their associated loops. 785204642Srdivacky // Iterate in reverse so that constants are emitted last, all else equal. 786204642Srdivacky SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 787204642Srdivacky for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()), 788204642Srdivacky E(S->op_begin()); I != E; ++I) 789218893Sdim OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 790193323Sed 791204642Srdivacky // Sort by loop. Use a stable sort so that constants follow non-constants. 792204642Srdivacky std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 793204642Srdivacky 794204642Srdivacky // Emit instructions to mul all the operands. Hoist as much as possible 795204642Srdivacky // out of loops. 796204642Srdivacky Value *Prod = 0; 797204642Srdivacky for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 798204642Srdivacky I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 799204642Srdivacky const SCEV *Op = I->second; 800204642Srdivacky if (!Prod) { 801204642Srdivacky // This is the first operand. Just expand it. 802204642Srdivacky Prod = expand(Op); 803204642Srdivacky ++I; 804204642Srdivacky } else if (Op->isAllOnesValue()) { 805204642Srdivacky // Instead of doing a multiply by negative one, just do a negate. 806204642Srdivacky Prod = InsertNoopCastOfTo(Prod, Ty); 807204642Srdivacky Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod); 808204642Srdivacky ++I; 809204642Srdivacky } else { 810204642Srdivacky // A simple mul. 811204642Srdivacky Value *W = expandCodeFor(Op, Ty); 812204642Srdivacky Prod = InsertNoopCastOfTo(Prod, Ty); 813204642Srdivacky // Canonicalize a constant to the RHS. 814204642Srdivacky if (isa<Constant>(Prod)) std::swap(Prod, W); 815204642Srdivacky Prod = InsertBinop(Instruction::Mul, Prod, W); 816204642Srdivacky ++I; 817204642Srdivacky } 818193323Sed } 819193323Sed 820204642Srdivacky return Prod; 821193323Sed} 822193323Sed 823193323SedValue *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { 824226633Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 825193323Sed 826194178Sed Value *LHS = expandCodeFor(S->getLHS(), Ty); 827193323Sed if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) { 828193323Sed const APInt &RHS = SC->getValue()->getValue(); 829193323Sed if (RHS.isPowerOf2()) 830193323Sed return InsertBinop(Instruction::LShr, LHS, 831195340Sed ConstantInt::get(Ty, RHS.logBase2())); 832193323Sed } 833193323Sed 834194178Sed Value *RHS = expandCodeFor(S->getRHS(), Ty); 835195340Sed return InsertBinop(Instruction::UDiv, LHS, RHS); 836193323Sed} 837193323Sed 838193323Sed/// Move parts of Base into Rest to leave Base with the minimal 839193323Sed/// expression that provides a pointer operand suitable for a 840193323Sed/// GEP expansion. 841198090Srdivackystatic void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, 842193323Sed ScalarEvolution &SE) { 843193323Sed while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) { 844193323Sed Base = A->getStart(); 845193323Sed Rest = SE.getAddExpr(Rest, 846207618Srdivacky SE.getAddRecExpr(SE.getConstant(A->getType(), 0), 847193323Sed A->getStepRecurrence(SE), 848221345Sdim A->getLoop(), 849221345Sdim // FIXME: A->getNoWrapFlags(FlagNW) 850221345Sdim SCEV::FlagAnyWrap)); 851193323Sed } 852193323Sed if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) { 853193323Sed Base = A->getOperand(A->getNumOperands()-1); 854198090Srdivacky SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end()); 855193323Sed NewAddOps.back() = Rest; 856193323Sed Rest = SE.getAddExpr(NewAddOps); 857193323Sed ExposePointerBase(Base, Rest, SE); 858193323Sed } 859193323Sed} 860193323Sed 861226633Sdim/// Determine if this is a well-behaved chain of instructions leading back to 862226633Sdim/// the PHI. If so, it may be reused by expanded expressions. 863226633Sdimbool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, 864226633Sdim const Loop *L) { 865226633Sdim if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) || 866226633Sdim (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV))) 867226633Sdim return false; 868226633Sdim // If any of the operands don't dominate the insert position, bail. 869226633Sdim // Addrec operands are always loop-invariant, so this can only happen 870226633Sdim // if there are instructions which haven't been hoisted. 871226633Sdim if (L == IVIncInsertLoop) { 872226633Sdim for (User::op_iterator OI = IncV->op_begin()+1, 873226633Sdim OE = IncV->op_end(); OI != OE; ++OI) 874226633Sdim if (Instruction *OInst = dyn_cast<Instruction>(OI)) 875226633Sdim if (!SE.DT->dominates(OInst, IVIncInsertPos)) 876226633Sdim return false; 877226633Sdim } 878226633Sdim // Advance to the next instruction. 879226633Sdim IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 880226633Sdim if (!IncV) 881226633Sdim return false; 882226633Sdim 883226633Sdim if (IncV->mayHaveSideEffects()) 884226633Sdim return false; 885226633Sdim 886226633Sdim if (IncV != PN) 887226633Sdim return true; 888226633Sdim 889226633Sdim return isNormalAddRecExprPHI(PN, IncV, L); 890226633Sdim} 891226633Sdim 892234353Sdim/// getIVIncOperand returns an induction variable increment's induction 893234353Sdim/// variable operand. 894234353Sdim/// 895234353Sdim/// If allowScale is set, any type of GEP is allowed as long as the nonIV 896234353Sdim/// operands dominate InsertPos. 897234353Sdim/// 898234353Sdim/// If allowScale is not set, ensure that a GEP increment conforms to one of the 899234353Sdim/// simple patterns generated by getAddRecExprPHILiterally and 900234353Sdim/// expandAddtoGEP. If the pattern isn't recognized, return NULL. 901234353SdimInstruction *SCEVExpander::getIVIncOperand(Instruction *IncV, 902234353Sdim Instruction *InsertPos, 903234353Sdim bool allowScale) { 904234353Sdim if (IncV == InsertPos) 905234353Sdim return NULL; 906234353Sdim 907226633Sdim switch (IncV->getOpcode()) { 908234353Sdim default: 909234353Sdim return NULL; 910226633Sdim // Check for a simple Add/Sub or GEP of a loop invariant step. 911226633Sdim case Instruction::Add: 912234353Sdim case Instruction::Sub: { 913234353Sdim Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1)); 914234353Sdim if (!OInst || SE.DT->dominates(OInst, InsertPos)) 915234353Sdim return dyn_cast<Instruction>(IncV->getOperand(0)); 916234353Sdim return NULL; 917234353Sdim } 918226633Sdim case Instruction::BitCast: 919234353Sdim return dyn_cast<Instruction>(IncV->getOperand(0)); 920234353Sdim case Instruction::GetElementPtr: 921226633Sdim for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end(); 922226633Sdim I != E; ++I) { 923226633Sdim if (isa<Constant>(*I)) 924226633Sdim continue; 925234353Sdim if (Instruction *OInst = dyn_cast<Instruction>(*I)) { 926234353Sdim if (!SE.DT->dominates(OInst, InsertPos)) 927234353Sdim return NULL; 928234353Sdim } 929234353Sdim if (allowScale) { 930234353Sdim // allow any kind of GEP as long as it can be hoisted. 931234353Sdim continue; 932234353Sdim } 933234353Sdim // This must be a pointer addition of constants (pretty), which is already 934234353Sdim // handled, or some number of address-size elements (ugly). Ugly geps 935234353Sdim // have 2 operands. i1* is used by the expander to represent an 936234353Sdim // address-size element. 937226633Sdim if (IncV->getNumOperands() != 2) 938234353Sdim return NULL; 939226633Sdim unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace(); 940226633Sdim if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS) 941226633Sdim && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS)) 942234353Sdim return NULL; 943226633Sdim break; 944226633Sdim } 945234353Sdim return dyn_cast<Instruction>(IncV->getOperand(0)); 946226633Sdim } 947234353Sdim} 948234353Sdim 949234353Sdim/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make 950234353Sdim/// it available to other uses in this loop. Recursively hoist any operands, 951234353Sdim/// until we reach a value that dominates InsertPos. 952234353Sdimbool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) { 953234353Sdim if (SE.DT->dominates(IncV, InsertPos)) 954234353Sdim return true; 955234353Sdim 956234353Sdim // InsertPos must itself dominate IncV so that IncV's new position satisfies 957234353Sdim // its existing users. 958239462Sdim if (isa<PHINode>(InsertPos) 959239462Sdim || !SE.DT->dominates(InsertPos->getParent(), IncV->getParent())) 960226633Sdim return false; 961234353Sdim 962234353Sdim // Check that the chain of IV operands leading back to Phi can be hoisted. 963234353Sdim SmallVector<Instruction*, 4> IVIncs; 964234353Sdim for(;;) { 965234353Sdim Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true); 966234353Sdim if (!Oper) 967234353Sdim return false; 968234353Sdim // IncV is safe to hoist. 969234353Sdim IVIncs.push_back(IncV); 970234353Sdim IncV = Oper; 971234353Sdim if (SE.DT->dominates(IncV, InsertPos)) 972234353Sdim break; 973226633Sdim } 974234353Sdim for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(), 975234353Sdim E = IVIncs.rend(); I != E; ++I) { 976234353Sdim (*I)->moveBefore(InsertPos); 977234353Sdim } 978234353Sdim return true; 979226633Sdim} 980226633Sdim 981234353Sdim/// Determine if this cyclic phi is in a form that would have been generated by 982234353Sdim/// LSR. We don't care if the phi was actually expanded in this pass, as long 983234353Sdim/// as it is in a low-cost form, for example, no implied multiplication. This 984234353Sdim/// should match any patterns generated by getAddRecExprPHILiterally and 985234353Sdim/// expandAddtoGEP. 986234353Sdimbool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, 987234353Sdim const Loop *L) { 988234353Sdim for(Instruction *IVOper = IncV; 989234353Sdim (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(), 990234353Sdim /*allowScale=*/false));) { 991234353Sdim if (IVOper == PN) 992234353Sdim return true; 993234353Sdim } 994234353Sdim return false; 995234353Sdim} 996234353Sdim 997234353Sdim/// expandIVInc - Expand an IV increment at Builder's current InsertPos. 998234353Sdim/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may 999234353Sdim/// need to materialize IV increments elsewhere to handle difficult situations. 1000234353SdimValue *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L, 1001234353Sdim Type *ExpandTy, Type *IntTy, 1002234353Sdim bool useSubtract) { 1003234353Sdim Value *IncV; 1004234353Sdim // If the PHI is a pointer, use a GEP, otherwise use an add or sub. 1005234353Sdim if (ExpandTy->isPointerTy()) { 1006234353Sdim PointerType *GEPPtrTy = cast<PointerType>(ExpandTy); 1007234353Sdim // If the step isn't constant, don't use an implicitly scaled GEP, because 1008234353Sdim // that would require a multiply inside the loop. 1009234353Sdim if (!isa<ConstantInt>(StepV)) 1010234353Sdim GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()), 1011234353Sdim GEPPtrTy->getAddressSpace()); 1012234353Sdim const SCEV *const StepArray[1] = { SE.getSCEV(StepV) }; 1013234353Sdim IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN); 1014234353Sdim if (IncV->getType() != PN->getType()) { 1015234353Sdim IncV = Builder.CreateBitCast(IncV, PN->getType()); 1016234353Sdim rememberInstruction(IncV); 1017234353Sdim } 1018234353Sdim } else { 1019234353Sdim IncV = useSubtract ? 1020234353Sdim Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") : 1021234353Sdim Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next"); 1022234353Sdim rememberInstruction(IncV); 1023234353Sdim } 1024234353Sdim return IncV; 1025234353Sdim} 1026234353Sdim 1027202878Srdivacky/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand 1028202878Srdivacky/// the base addrec, which is the addrec without any non-loop-dominating 1029202878Srdivacky/// values, and return the PHI. 1030202878SrdivackyPHINode * 1031202878SrdivackySCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, 1032202878Srdivacky const Loop *L, 1033226633Sdim Type *ExpandTy, 1034226633Sdim Type *IntTy) { 1035224145Sdim assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position"); 1036224145Sdim 1037202878Srdivacky // Reuse a previously-inserted PHI, if present. 1038226633Sdim BasicBlock *LatchBlock = L->getLoopLatch(); 1039226633Sdim if (LatchBlock) { 1040226633Sdim for (BasicBlock::iterator I = L->getHeader()->begin(); 1041226633Sdim PHINode *PN = dyn_cast<PHINode>(I); ++I) { 1042226633Sdim if (!SE.isSCEVable(PN->getType()) || 1043226633Sdim (SE.getEffectiveSCEVType(PN->getType()) != 1044226633Sdim SE.getEffectiveSCEVType(Normalized->getType())) || 1045226633Sdim SE.getSCEV(PN) != Normalized) 1046226633Sdim continue; 1047202878Srdivacky 1048226633Sdim Instruction *IncV = 1049226633Sdim cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock)); 1050226633Sdim 1051226633Sdim if (LSRMode) { 1052226633Sdim if (!isExpandedAddRecExprPHI(PN, IncV, L)) 1053226633Sdim continue; 1054234353Sdim if (L == IVIncInsertLoop && !hoistIVInc(IncV, IVIncInsertPos)) 1055234353Sdim continue; 1056226633Sdim } 1057226633Sdim else { 1058226633Sdim if (!isNormalAddRecExprPHI(PN, IncV, L)) 1059226633Sdim continue; 1060234353Sdim if (L == IVIncInsertLoop) 1061234353Sdim do { 1062234353Sdim if (SE.DT->dominates(IncV, IVIncInsertPos)) 1063234353Sdim break; 1064234353Sdim // Make sure the increment is where we want it. But don't move it 1065234353Sdim // down past a potential existing post-inc user. 1066234353Sdim IncV->moveBefore(IVIncInsertPos); 1067234353Sdim IVIncInsertPos = IncV; 1068234353Sdim IncV = cast<Instruction>(IncV->getOperand(0)); 1069234353Sdim } while (IncV != PN); 1070226633Sdim } 1071226633Sdim // Ok, the add recurrence looks usable. 1072226633Sdim // Remember this PHI, even in post-inc mode. 1073226633Sdim InsertedValues.insert(PN); 1074226633Sdim // Remember the increment. 1075226633Sdim rememberInstruction(IncV); 1076226633Sdim return PN; 1077226633Sdim } 1078226633Sdim } 1079203954Srdivacky 1080202878Srdivacky // Save the original insertion point so we can restore it when we're done. 1081202878Srdivacky BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1082202878Srdivacky BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1083202878Srdivacky 1084234353Sdim // Another AddRec may need to be recursively expanded below. For example, if 1085234353Sdim // this AddRec is quadratic, the StepV may itself be an AddRec in this 1086234353Sdim // loop. Remove this loop from the PostIncLoops set before expanding such 1087234353Sdim // AddRecs. Otherwise, we cannot find a valid position for the step 1088234353Sdim // (i.e. StepV can never dominate its loop header). Ideally, we could do 1089234353Sdim // SavedIncLoops.swap(PostIncLoops), but we generally have a single element, 1090234353Sdim // so it's not worth implementing SmallPtrSet::swap. 1091234353Sdim PostIncLoopSet SavedPostIncLoops = PostIncLoops; 1092234353Sdim PostIncLoops.clear(); 1093234353Sdim 1094202878Srdivacky // Expand code for the start value. 1095202878Srdivacky Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy, 1096202878Srdivacky L->getHeader()->begin()); 1097202878Srdivacky 1098224145Sdim // StartV must be hoisted into L's preheader to dominate the new phi. 1099224145Sdim assert(!isa<Instruction>(StartV) || 1100224145Sdim SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(), 1101224145Sdim L->getHeader())); 1102224145Sdim 1103234353Sdim // Expand code for the step value. Do this before creating the PHI so that PHI 1104234353Sdim // reuse code doesn't see an incomplete PHI. 1105202878Srdivacky const SCEV *Step = Normalized->getStepRecurrence(SE); 1106234353Sdim // If the stride is negative, insert a sub instead of an add for the increment 1107234353Sdim // (unless it's a constant, because subtracts of constants are canonicalized 1108234353Sdim // to adds). 1109234353Sdim bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1110234353Sdim if (useSubtract) 1111202878Srdivacky Step = SE.getNegativeSCEV(Step); 1112234353Sdim // Expand the step somewhere that dominates the loop header. 1113202878Srdivacky Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1114202878Srdivacky 1115202878Srdivacky // Create the PHI. 1116221345Sdim BasicBlock *Header = L->getHeader(); 1117221345Sdim Builder.SetInsertPoint(Header, Header->begin()); 1118221345Sdim pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1119224145Sdim PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE), 1120224145Sdim Twine(IVName) + ".iv"); 1121202878Srdivacky rememberInstruction(PN); 1122202878Srdivacky 1123202878Srdivacky // Create the step instructions and populate the PHI. 1124221345Sdim for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1125202878Srdivacky BasicBlock *Pred = *HPI; 1126202878Srdivacky 1127202878Srdivacky // Add a start value. 1128202878Srdivacky if (!L->contains(Pred)) { 1129202878Srdivacky PN->addIncoming(StartV, Pred); 1130202878Srdivacky continue; 1131202878Srdivacky } 1132202878Srdivacky 1133234353Sdim // Create a step value and add it to the PHI. 1134234353Sdim // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the 1135234353Sdim // instructions at IVIncInsertPos. 1136202878Srdivacky Instruction *InsertPos = L == IVIncInsertLoop ? 1137202878Srdivacky IVIncInsertPos : Pred->getTerminator(); 1138224145Sdim Builder.SetInsertPoint(InsertPos); 1139234353Sdim Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1140234353Sdim 1141202878Srdivacky PN->addIncoming(IncV, Pred); 1142202878Srdivacky } 1143202878Srdivacky 1144202878Srdivacky // Restore the original insert point. 1145202878Srdivacky if (SaveInsertBB) 1146203954Srdivacky restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1147202878Srdivacky 1148234353Sdim // After expanding subexpressions, restore the PostIncLoops set so the caller 1149234353Sdim // can ensure that IVIncrement dominates the current uses. 1150234353Sdim PostIncLoops = SavedPostIncLoops; 1151234353Sdim 1152202878Srdivacky // Remember this PHI, even in post-inc mode. 1153202878Srdivacky InsertedValues.insert(PN); 1154202878Srdivacky 1155202878Srdivacky return PN; 1156202878Srdivacky} 1157202878Srdivacky 1158202878SrdivackyValue *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { 1159226633Sdim Type *STy = S->getType(); 1160226633Sdim Type *IntTy = SE.getEffectiveSCEVType(STy); 1161202878Srdivacky const Loop *L = S->getLoop(); 1162202878Srdivacky 1163202878Srdivacky // Determine a normalized form of this expression, which is the expression 1164202878Srdivacky // before any post-inc adjustment is made. 1165202878Srdivacky const SCEVAddRecExpr *Normalized = S; 1166207618Srdivacky if (PostIncLoops.count(L)) { 1167207618Srdivacky PostIncLoopSet Loops; 1168207618Srdivacky Loops.insert(L); 1169207618Srdivacky Normalized = 1170207618Srdivacky cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0, 1171207618Srdivacky Loops, SE, *SE.DT)); 1172202878Srdivacky } 1173202878Srdivacky 1174202878Srdivacky // Strip off any non-loop-dominating component from the addrec start. 1175202878Srdivacky const SCEV *Start = Normalized->getStart(); 1176202878Srdivacky const SCEV *PostLoopOffset = 0; 1177218893Sdim if (!SE.properlyDominates(Start, L->getHeader())) { 1178202878Srdivacky PostLoopOffset = Start; 1179207618Srdivacky Start = SE.getConstant(Normalized->getType(), 0); 1180221345Sdim Normalized = cast<SCEVAddRecExpr>( 1181221345Sdim SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE), 1182221345Sdim Normalized->getLoop(), 1183221345Sdim // FIXME: Normalized->getNoWrapFlags(FlagNW) 1184221345Sdim SCEV::FlagAnyWrap)); 1185202878Srdivacky } 1186202878Srdivacky 1187202878Srdivacky // Strip off any non-loop-dominating component from the addrec step. 1188202878Srdivacky const SCEV *Step = Normalized->getStepRecurrence(SE); 1189202878Srdivacky const SCEV *PostLoopScale = 0; 1190218893Sdim if (!SE.dominates(Step, L->getHeader())) { 1191202878Srdivacky PostLoopScale = Step; 1192207618Srdivacky Step = SE.getConstant(Normalized->getType(), 1); 1193202878Srdivacky Normalized = 1194202878Srdivacky cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, Step, 1195221345Sdim Normalized->getLoop(), 1196221345Sdim // FIXME: Normalized 1197221345Sdim // ->getNoWrapFlags(FlagNW) 1198221345Sdim SCEV::FlagAnyWrap)); 1199202878Srdivacky } 1200202878Srdivacky 1201202878Srdivacky // Expand the core addrec. If we need post-loop scaling, force it to 1202202878Srdivacky // expand to an integer type to avoid the need for additional casting. 1203226633Sdim Type *ExpandTy = PostLoopScale ? IntTy : STy; 1204202878Srdivacky PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy); 1205202878Srdivacky 1206204642Srdivacky // Accommodate post-inc mode, if necessary. 1207202878Srdivacky Value *Result; 1208207618Srdivacky if (!PostIncLoops.count(L)) 1209202878Srdivacky Result = PN; 1210202878Srdivacky else { 1211202878Srdivacky // In PostInc mode, use the post-incremented value. 1212202878Srdivacky BasicBlock *LatchBlock = L->getLoopLatch(); 1213202878Srdivacky assert(LatchBlock && "PostInc mode requires a unique loop latch!"); 1214202878Srdivacky Result = PN->getIncomingValueForBlock(LatchBlock); 1215226633Sdim 1216226633Sdim // For an expansion to use the postinc form, the client must call 1217226633Sdim // expandCodeFor with an InsertPoint that is either outside the PostIncLoop 1218226633Sdim // or dominated by IVIncInsertPos. 1219234353Sdim if (isa<Instruction>(Result) 1220234353Sdim && !SE.DT->dominates(cast<Instruction>(Result), 1221234353Sdim Builder.GetInsertPoint())) { 1222234353Sdim // The induction variable's postinc expansion does not dominate this use. 1223234353Sdim // IVUsers tries to prevent this case, so it is rare. However, it can 1224234353Sdim // happen when an IVUser outside the loop is not dominated by the latch 1225234353Sdim // block. Adjusting IVIncInsertPos before expansion begins cannot handle 1226234353Sdim // all cases. Consider a phi outide whose operand is replaced during 1227234353Sdim // expansion with the value of the postinc user. Without fundamentally 1228234353Sdim // changing the way postinc users are tracked, the only remedy is 1229234353Sdim // inserting an extra IV increment. StepV might fold into PostLoopOffset, 1230234353Sdim // but hopefully expandCodeFor handles that. 1231234353Sdim bool useSubtract = 1232234353Sdim !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1233234353Sdim if (useSubtract) 1234234353Sdim Step = SE.getNegativeSCEV(Step); 1235234353Sdim // Expand the step somewhere that dominates the loop header. 1236234353Sdim BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1237234353Sdim BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1238234353Sdim Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1239234353Sdim // Restore the insertion point to the place where the caller has 1240234353Sdim // determined dominates all uses. 1241234353Sdim restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1242234353Sdim Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1243234353Sdim } 1244202878Srdivacky } 1245202878Srdivacky 1246202878Srdivacky // Re-apply any non-loop-dominating scale. 1247202878Srdivacky if (PostLoopScale) { 1248203954Srdivacky Result = InsertNoopCastOfTo(Result, IntTy); 1249202878Srdivacky Result = Builder.CreateMul(Result, 1250202878Srdivacky expandCodeFor(PostLoopScale, IntTy)); 1251202878Srdivacky rememberInstruction(Result); 1252202878Srdivacky } 1253202878Srdivacky 1254202878Srdivacky // Re-apply any non-loop-dominating offset. 1255202878Srdivacky if (PostLoopOffset) { 1256226633Sdim if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) { 1257202878Srdivacky const SCEV *const OffsetArray[1] = { PostLoopOffset }; 1258202878Srdivacky Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result); 1259202878Srdivacky } else { 1260203954Srdivacky Result = InsertNoopCastOfTo(Result, IntTy); 1261202878Srdivacky Result = Builder.CreateAdd(Result, 1262202878Srdivacky expandCodeFor(PostLoopOffset, IntTy)); 1263202878Srdivacky rememberInstruction(Result); 1264202878Srdivacky } 1265202878Srdivacky } 1266202878Srdivacky 1267202878Srdivacky return Result; 1268202878Srdivacky} 1269202878Srdivacky 1270193323SedValue *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { 1271202878Srdivacky if (!CanonicalMode) return expandAddRecExprLiterally(S); 1272202878Srdivacky 1273226633Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1274193323Sed const Loop *L = S->getLoop(); 1275193323Sed 1276194178Sed // First check for an existing canonical IV in a suitable type. 1277194178Sed PHINode *CanonicalIV = 0; 1278194178Sed if (PHINode *PN = L->getCanonicalInductionVariable()) 1279212904Sdim if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) 1280194178Sed CanonicalIV = PN; 1281194178Sed 1282194178Sed // Rewrite an AddRec in terms of the canonical induction variable, if 1283194178Sed // its type is more narrow. 1284194178Sed if (CanonicalIV && 1285194178Sed SE.getTypeSizeInBits(CanonicalIV->getType()) > 1286194178Sed SE.getTypeSizeInBits(Ty)) { 1287205407Srdivacky SmallVector<const SCEV *, 4> NewOps(S->getNumOperands()); 1288205407Srdivacky for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) 1289205407Srdivacky NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType()); 1290221345Sdim Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(), 1291221345Sdim // FIXME: S->getNoWrapFlags(FlagNW) 1292221345Sdim SCEV::FlagAnyWrap)); 1293195340Sed BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1294195340Sed BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1295194178Sed BasicBlock::iterator NewInsertPt = 1296200581Srdivacky llvm::next(BasicBlock::iterator(cast<Instruction>(V))); 1297226633Sdim while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) || 1298226633Sdim isa<LandingPadInst>(NewInsertPt)) 1299210299Sed ++NewInsertPt; 1300194178Sed V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0, 1301194178Sed NewInsertPt); 1302203954Srdivacky restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1303194178Sed return V; 1304194178Sed } 1305194178Sed 1306193323Sed // {X,+,F} --> X + {0,+,F} 1307193323Sed if (!S->getStart()->isZero()) { 1308205407Srdivacky SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end()); 1309207618Srdivacky NewOps[0] = SE.getConstant(Ty, 0); 1310221345Sdim // FIXME: can use S->getNoWrapFlags() 1311221345Sdim const SCEV *Rest = SE.getAddRecExpr(NewOps, L, SCEV::FlagAnyWrap); 1312193323Sed 1313193323Sed // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 1314193323Sed // comments on expandAddToGEP for details. 1315198090Srdivacky const SCEV *Base = S->getStart(); 1316198090Srdivacky const SCEV *RestArray[1] = { Rest }; 1317198090Srdivacky // Dig into the expression to find the pointer base for a GEP. 1318198090Srdivacky ExposePointerBase(Base, RestArray[0], SE); 1319198090Srdivacky // If we found a pointer, expand the AddRec with a GEP. 1320226633Sdim if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) { 1321198090Srdivacky // Make sure the Base isn't something exotic, such as a multiplied 1322198090Srdivacky // or divided pointer value. In those cases, the result type isn't 1323198090Srdivacky // actually a pointer type. 1324198090Srdivacky if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) { 1325198090Srdivacky Value *StartV = expand(Base); 1326198090Srdivacky assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!"); 1327198090Srdivacky return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV); 1328193323Sed } 1329193323Sed } 1330193323Sed 1331195098Sed // Just do a normal add. Pre-expand the operands to suppress folding. 1332195098Sed return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())), 1333195098Sed SE.getUnknown(expand(Rest)))); 1334193323Sed } 1335193323Sed 1336212904Sdim // If we don't yet have a canonical IV, create one. 1337212904Sdim if (!CanonicalIV) { 1338193323Sed // Create and insert the PHI node for the induction variable in the 1339193323Sed // specified loop. 1340193323Sed BasicBlock *Header = L->getHeader(); 1341221345Sdim pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1342221345Sdim CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar", 1343221345Sdim Header->begin()); 1344212904Sdim rememberInstruction(CanonicalIV); 1345193323Sed 1346193323Sed Constant *One = ConstantInt::get(Ty, 1); 1347221345Sdim for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1348210299Sed BasicBlock *HP = *HPI; 1349210299Sed if (L->contains(HP)) { 1350202878Srdivacky // Insert a unit add instruction right before the terminator 1351202878Srdivacky // corresponding to the back-edge. 1352212904Sdim Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One, 1353212904Sdim "indvar.next", 1354212904Sdim HP->getTerminator()); 1355224145Sdim Add->setDebugLoc(HP->getTerminator()->getDebugLoc()); 1356202878Srdivacky rememberInstruction(Add); 1357212904Sdim CanonicalIV->addIncoming(Add, HP); 1358198090Srdivacky } else { 1359212904Sdim CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP); 1360198090Srdivacky } 1361210299Sed } 1362193323Sed } 1363193323Sed 1364212904Sdim // {0,+,1} --> Insert a canonical induction variable into the loop! 1365212904Sdim if (S->isAffine() && S->getOperand(1)->isOne()) { 1366212904Sdim assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) && 1367212904Sdim "IVs with types different from the canonical IV should " 1368212904Sdim "already have been handled!"); 1369212904Sdim return CanonicalIV; 1370212904Sdim } 1371212904Sdim 1372194178Sed // {0,+,F} --> {0,+,1} * F 1373193323Sed 1374193323Sed // If this is a simple linear addrec, emit it now as a special case. 1375195098Sed if (S->isAffine()) // {0,+,F} --> i*F 1376195098Sed return 1377195098Sed expand(SE.getTruncateOrNoop( 1378212904Sdim SE.getMulExpr(SE.getUnknown(CanonicalIV), 1379195098Sed SE.getNoopOrAnyExtend(S->getOperand(1), 1380212904Sdim CanonicalIV->getType())), 1381195098Sed Ty)); 1382194178Sed 1383193323Sed // If this is a chain of recurrences, turn it into a closed form, using the 1384193323Sed // folders, then expandCodeFor the closed form. This allows the folders to 1385193323Sed // simplify the expression without having to build a bunch of special code 1386193323Sed // into this folder. 1387212904Sdim const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV. 1388193323Sed 1389194178Sed // Promote S up to the canonical IV type, if the cast is foldable. 1390198090Srdivacky const SCEV *NewS = S; 1391212904Sdim const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType()); 1392194178Sed if (isa<SCEVAddRecExpr>(Ext)) 1393194178Sed NewS = Ext; 1394194178Sed 1395198090Srdivacky const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE); 1396193323Sed //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; 1397193323Sed 1398194178Sed // Truncate the result down to the original type, if needed. 1399198090Srdivacky const SCEV *T = SE.getTruncateOrNoop(V, Ty); 1400194710Sed return expand(T); 1401193323Sed} 1402193323Sed 1403193323SedValue *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) { 1404226633Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1405194178Sed Value *V = expandCodeFor(S->getOperand(), 1406194178Sed SE.getEffectiveSCEVType(S->getOperand()->getType())); 1407226633Sdim Value *I = Builder.CreateTrunc(V, Ty); 1408202878Srdivacky rememberInstruction(I); 1409193323Sed return I; 1410193323Sed} 1411193323Sed 1412193323SedValue *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) { 1413226633Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1414194178Sed Value *V = expandCodeFor(S->getOperand(), 1415194178Sed SE.getEffectiveSCEVType(S->getOperand()->getType())); 1416226633Sdim Value *I = Builder.CreateZExt(V, Ty); 1417202878Srdivacky rememberInstruction(I); 1418193323Sed return I; 1419193323Sed} 1420193323Sed 1421193323SedValue *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) { 1422226633Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1423194178Sed Value *V = expandCodeFor(S->getOperand(), 1424194178Sed SE.getEffectiveSCEVType(S->getOperand()->getType())); 1425226633Sdim Value *I = Builder.CreateSExt(V, Ty); 1426202878Srdivacky rememberInstruction(I); 1427193323Sed return I; 1428193323Sed} 1429193323Sed 1430193323SedValue *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) { 1431198090Srdivacky Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1432226633Sdim Type *Ty = LHS->getType(); 1433198090Srdivacky for (int i = S->getNumOperands()-2; i >= 0; --i) { 1434198090Srdivacky // In the case of mixed integer and pointer types, do the 1435198090Srdivacky // rest of the comparisons as integer. 1436198090Srdivacky if (S->getOperand(i)->getType() != Ty) { 1437198090Srdivacky Ty = SE.getEffectiveSCEVType(Ty); 1438198090Srdivacky LHS = InsertNoopCastOfTo(LHS, Ty); 1439198090Srdivacky } 1440194178Sed Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1441226633Sdim Value *ICmp = Builder.CreateICmpSGT(LHS, RHS); 1442202878Srdivacky rememberInstruction(ICmp); 1443195340Sed Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax"); 1444202878Srdivacky rememberInstruction(Sel); 1445193323Sed LHS = Sel; 1446193323Sed } 1447198090Srdivacky // In the case of mixed integer and pointer types, cast the 1448198090Srdivacky // final result back to the pointer type. 1449198090Srdivacky if (LHS->getType() != S->getType()) 1450198090Srdivacky LHS = InsertNoopCastOfTo(LHS, S->getType()); 1451193323Sed return LHS; 1452193323Sed} 1453193323Sed 1454193323SedValue *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { 1455198090Srdivacky Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1456226633Sdim Type *Ty = LHS->getType(); 1457198090Srdivacky for (int i = S->getNumOperands()-2; i >= 0; --i) { 1458198090Srdivacky // In the case of mixed integer and pointer types, do the 1459198090Srdivacky // rest of the comparisons as integer. 1460198090Srdivacky if (S->getOperand(i)->getType() != Ty) { 1461198090Srdivacky Ty = SE.getEffectiveSCEVType(Ty); 1462198090Srdivacky LHS = InsertNoopCastOfTo(LHS, Ty); 1463198090Srdivacky } 1464194178Sed Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1465226633Sdim Value *ICmp = Builder.CreateICmpUGT(LHS, RHS); 1466202878Srdivacky rememberInstruction(ICmp); 1467195340Sed Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax"); 1468202878Srdivacky rememberInstruction(Sel); 1469193323Sed LHS = Sel; 1470193323Sed } 1471198090Srdivacky // In the case of mixed integer and pointer types, cast the 1472198090Srdivacky // final result back to the pointer type. 1473198090Srdivacky if (LHS->getType() != S->getType()) 1474198090Srdivacky LHS = InsertNoopCastOfTo(LHS, S->getType()); 1475193323Sed return LHS; 1476193323Sed} 1477193323Sed 1478226633SdimValue *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty, 1479234353Sdim Instruction *IP) { 1480205407Srdivacky Builder.SetInsertPoint(IP->getParent(), IP); 1481205407Srdivacky return expandCodeFor(SH, Ty); 1482205407Srdivacky} 1483205407Srdivacky 1484226633SdimValue *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) { 1485193323Sed // Expand the code for this SCEV. 1486193323Sed Value *V = expand(SH); 1487193323Sed if (Ty) { 1488193323Sed assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) && 1489193323Sed "non-trivial casts should be done with the SCEVs directly!"); 1490193323Sed V = InsertNoopCastOfTo(V, Ty); 1491193323Sed } 1492193323Sed return V; 1493193323Sed} 1494193323Sed 1495193323SedValue *SCEVExpander::expand(const SCEV *S) { 1496195098Sed // Compute an insertion point for this SCEV object. Hoist the instructions 1497195098Sed // as far out in the loop nest as possible. 1498195340Sed Instruction *InsertPt = Builder.GetInsertPoint(); 1499195340Sed for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ; 1500195098Sed L = L->getParentLoop()) 1501218893Sdim if (SE.isLoopInvariant(S, L)) { 1502195098Sed if (!L) break; 1503206083Srdivacky if (BasicBlock *Preheader = L->getLoopPreheader()) 1504195098Sed InsertPt = Preheader->getTerminator(); 1505234353Sdim else { 1506234353Sdim // LSR sets the insertion point for AddRec start/step values to the 1507234353Sdim // block start to simplify value reuse, even though it's an invalid 1508234353Sdim // position. SCEVExpander must correct for this in all cases. 1509234353Sdim InsertPt = L->getHeader()->getFirstInsertionPt(); 1510234353Sdim } 1511195098Sed } else { 1512195098Sed // If the SCEV is computable at this level, insert it into the header 1513195098Sed // after the PHIs (and after any other instructions that we've inserted 1514195098Sed // there) so that it is guaranteed to dominate any user inside the loop. 1515218893Sdim if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L)) 1516226633Sdim InsertPt = L->getHeader()->getFirstInsertionPt(); 1517234353Sdim while (InsertPt != Builder.GetInsertPoint() 1518234353Sdim && (isInsertedInstruction(InsertPt) 1519234353Sdim || isa<DbgInfoIntrinsic>(InsertPt))) { 1520204961Srdivacky InsertPt = llvm::next(BasicBlock::iterator(InsertPt)); 1521234353Sdim } 1522195098Sed break; 1523195098Sed } 1524195098Sed 1525195098Sed // Check to see if we already expanded this here. 1526195098Sed std::map<std::pair<const SCEV *, Instruction *>, 1527195098Sed AssertingVH<Value> >::iterator I = 1528195098Sed InsertedExpressions.find(std::make_pair(S, InsertPt)); 1529195340Sed if (I != InsertedExpressions.end()) 1530193323Sed return I->second; 1531195098Sed 1532195340Sed BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1533195340Sed BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1534195340Sed Builder.SetInsertPoint(InsertPt->getParent(), InsertPt); 1535195340Sed 1536195098Sed // Expand the expression into instructions. 1537193323Sed Value *V = visit(S); 1538195098Sed 1539195098Sed // Remember the expanded value for this SCEV at this location. 1540226633Sdim // 1541226633Sdim // This is independent of PostIncLoops. The mapped value simply materializes 1542226633Sdim // the expression at this insertion point. If the mapped value happened to be 1543226633Sdim // a postinc expansion, it could be reused by a non postinc user, but only if 1544226633Sdim // its insertion point was already at the head of the loop. 1545226633Sdim InsertedExpressions[std::make_pair(S, InsertPt)] = V; 1546195098Sed 1547203954Srdivacky restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1548193323Sed return V; 1549193323Sed} 1550193574Sed 1551203954Srdivackyvoid SCEVExpander::rememberInstruction(Value *I) { 1552210299Sed if (!PostIncLoops.empty()) 1553210299Sed InsertedPostIncValues.insert(I); 1554210299Sed else 1555203954Srdivacky InsertedValues.insert(I); 1556203954Srdivacky} 1557203954Srdivacky 1558203954Srdivackyvoid SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) { 1559203954Srdivacky Builder.SetInsertPoint(BB, I); 1560203954Srdivacky} 1561203954Srdivacky 1562193574Sed/// getOrInsertCanonicalInductionVariable - This method returns the 1563193574Sed/// canonical induction variable of the specified type for the specified 1564193574Sed/// loop (inserting one if there is none). A canonical induction variable 1565193574Sed/// starts at zero and steps by one on each iteration. 1566212904SdimPHINode * 1567193574SedSCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, 1568226633Sdim Type *Ty) { 1569203954Srdivacky assert(Ty->isIntegerTy() && "Can only insert integer induction variables!"); 1570212904Sdim 1571212904Sdim // Build a SCEV for {0,+,1}<L>. 1572221345Sdim // Conservatively use FlagAnyWrap for now. 1573207618Srdivacky const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0), 1574221345Sdim SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap); 1575212904Sdim 1576212904Sdim // Emit code for it. 1577195340Sed BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1578195340Sed BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1579212904Sdim PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin())); 1580195340Sed if (SaveInsertBB) 1581203954Srdivacky restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1582212904Sdim 1583195098Sed return V; 1584193574Sed} 1585226633Sdim 1586234353Sdim/// Sort values by integer width for replaceCongruentIVs. 1587234353Sdimstatic bool width_descending(Value *lhs, Value *rhs) { 1588234353Sdim // Put pointers at the back and make sure pointer < pointer = false. 1589234353Sdim if (!lhs->getType()->isIntegerTy() || !rhs->getType()->isIntegerTy()) 1590234353Sdim return rhs->getType()->isIntegerTy() && !lhs->getType()->isIntegerTy(); 1591234353Sdim return rhs->getType()->getPrimitiveSizeInBits() 1592234353Sdim < lhs->getType()->getPrimitiveSizeInBits(); 1593226633Sdim} 1594226633Sdim 1595226633Sdim/// replaceCongruentIVs - Check for congruent phis in this loop header and 1596226633Sdim/// replace them with their most canonical representative. Return the number of 1597226633Sdim/// phis eliminated. 1598226633Sdim/// 1599226633Sdim/// This does not depend on any SCEVExpander state but should be used in 1600226633Sdim/// the same context that SCEVExpander is used. 1601226633Sdimunsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, 1602234353Sdim SmallVectorImpl<WeakVH> &DeadInsts, 1603234353Sdim const TargetLowering *TLI) { 1604234353Sdim // Find integer phis in order of increasing width. 1605234353Sdim SmallVector<PHINode*, 8> Phis; 1606234353Sdim for (BasicBlock::iterator I = L->getHeader()->begin(); 1607234353Sdim PHINode *Phi = dyn_cast<PHINode>(I); ++I) { 1608234353Sdim Phis.push_back(Phi); 1609234353Sdim } 1610234353Sdim if (TLI) 1611234353Sdim std::sort(Phis.begin(), Phis.end(), width_descending); 1612234353Sdim 1613226633Sdim unsigned NumElim = 0; 1614226633Sdim DenseMap<const SCEV *, PHINode *> ExprToIVMap; 1615234353Sdim // Process phis from wide to narrow. Mapping wide phis to the their truncation 1616234353Sdim // so narrow phis can reuse them. 1617234353Sdim for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(), 1618234353Sdim PEnd = Phis.end(); PIter != PEnd; ++PIter) { 1619234353Sdim PHINode *Phi = *PIter; 1620234353Sdim 1621226633Sdim if (!SE.isSCEVable(Phi->getType())) 1622226633Sdim continue; 1623226633Sdim 1624226633Sdim PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)]; 1625226633Sdim if (!OrigPhiRef) { 1626226633Sdim OrigPhiRef = Phi; 1627234353Sdim if (Phi->getType()->isIntegerTy() && TLI 1628234353Sdim && TLI->isTruncateFree(Phi->getType(), Phis.back()->getType())) { 1629234353Sdim // This phi can be freely truncated to the narrowest phi type. Map the 1630234353Sdim // truncated expression to it so it will be reused for narrow types. 1631234353Sdim const SCEV *TruncExpr = 1632234353Sdim SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType()); 1633234353Sdim ExprToIVMap[TruncExpr] = Phi; 1634234353Sdim } 1635226633Sdim continue; 1636226633Sdim } 1637226633Sdim 1638234353Sdim // Replacing a pointer phi with an integer phi or vice-versa doesn't make 1639234353Sdim // sense. 1640234353Sdim if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy()) 1641226633Sdim continue; 1642226633Sdim 1643226633Sdim if (BasicBlock *LatchBlock = L->getLoopLatch()) { 1644226633Sdim Instruction *OrigInc = 1645226633Sdim cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock)); 1646226633Sdim Instruction *IsomorphicInc = 1647226633Sdim cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock)); 1648226633Sdim 1649234353Sdim // If this phi has the same width but is more canonical, replace the 1650234353Sdim // original with it. As part of the "more canonical" determination, 1651234353Sdim // respect a prior decision to use an IV chain. 1652234353Sdim if (OrigPhiRef->getType() == Phi->getType() 1653234353Sdim && !(ChainedPhis.count(Phi) 1654234353Sdim || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) 1655234353Sdim && (ChainedPhis.count(Phi) 1656234353Sdim || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) { 1657226633Sdim std::swap(OrigPhiRef, Phi); 1658226633Sdim std::swap(OrigInc, IsomorphicInc); 1659226633Sdim } 1660226633Sdim // Replacing the congruent phi is sufficient because acyclic redundancy 1661226633Sdim // elimination, CSE/GVN, should handle the rest. However, once SCEV proves 1662226633Sdim // that a phi is congruent, it's often the head of an IV user cycle that 1663234353Sdim // is isomorphic with the original phi. It's worth eagerly cleaning up the 1664234353Sdim // common case of a single IV increment so that DeleteDeadPHIs can remove 1665234353Sdim // cycles that had postinc uses. 1666234353Sdim const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc), 1667234353Sdim IsomorphicInc->getType()); 1668234353Sdim if (OrigInc != IsomorphicInc 1669234353Sdim && TruncExpr == SE.getSCEV(IsomorphicInc) 1670234353Sdim && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc)) 1671234353Sdim || hoistIVInc(OrigInc, IsomorphicInc))) { 1672226633Sdim DEBUG_WITH_TYPE(DebugType, dbgs() 1673226633Sdim << "INDVARS: Eliminated congruent iv.inc: " 1674226633Sdim << *IsomorphicInc << '\n'); 1675234353Sdim Value *NewInc = OrigInc; 1676234353Sdim if (OrigInc->getType() != IsomorphicInc->getType()) { 1677234353Sdim Instruction *IP = isa<PHINode>(OrigInc) 1678234353Sdim ? (Instruction*)L->getHeader()->getFirstInsertionPt() 1679234353Sdim : OrigInc->getNextNode(); 1680234353Sdim IRBuilder<> Builder(IP); 1681234353Sdim Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc()); 1682234353Sdim NewInc = Builder. 1683234353Sdim CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName); 1684234353Sdim } 1685234353Sdim IsomorphicInc->replaceAllUsesWith(NewInc); 1686226633Sdim DeadInsts.push_back(IsomorphicInc); 1687226633Sdim } 1688226633Sdim } 1689226633Sdim DEBUG_WITH_TYPE(DebugType, dbgs() 1690226633Sdim << "INDVARS: Eliminated congruent iv: " << *Phi << '\n'); 1691226633Sdim ++NumElim; 1692234353Sdim Value *NewIV = OrigPhiRef; 1693234353Sdim if (OrigPhiRef->getType() != Phi->getType()) { 1694234353Sdim IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt()); 1695234353Sdim Builder.SetCurrentDebugLocation(Phi->getDebugLoc()); 1696234353Sdim NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName); 1697234353Sdim } 1698234353Sdim Phi->replaceAllUsesWith(NewIV); 1699226633Sdim DeadInsts.push_back(Phi); 1700226633Sdim } 1701226633Sdim return NumElim; 1702226633Sdim} 1703239462Sdim 1704239462Sdimnamespace { 1705239462Sdim// Search for a SCEV subexpression that is not safe to expand. Any expression 1706239462Sdim// that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely 1707239462Sdim// UDiv expressions. We don't know if the UDiv is derived from an IR divide 1708239462Sdim// instruction, but the important thing is that we prove the denominator is 1709239462Sdim// nonzero before expansion. 1710239462Sdim// 1711239462Sdim// IVUsers already checks that IV-derived expressions are safe. So this check is 1712239462Sdim// only needed when the expression includes some subexpression that is not IV 1713239462Sdim// derived. 1714239462Sdim// 1715239462Sdim// Currently, we only allow division by a nonzero constant here. If this is 1716239462Sdim// inadequate, we could easily allow division by SCEVUnknown by using 1717239462Sdim// ValueTracking to check isKnownNonZero(). 1718239462Sdimstruct SCEVFindUnsafe { 1719239462Sdim bool IsUnsafe; 1720239462Sdim 1721239462Sdim SCEVFindUnsafe(): IsUnsafe(false) {} 1722239462Sdim 1723239462Sdim bool follow(const SCEV *S) { 1724239462Sdim const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S); 1725239462Sdim if (!D) 1726239462Sdim return true; 1727239462Sdim const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS()); 1728239462Sdim if (SC && !SC->getValue()->isZero()) 1729239462Sdim return true; 1730239462Sdim IsUnsafe = true; 1731239462Sdim return false; 1732239462Sdim } 1733239462Sdim bool isDone() const { return IsUnsafe; } 1734239462Sdim}; 1735239462Sdim} 1736239462Sdim 1737239462Sdimnamespace llvm { 1738239462Sdimbool isSafeToExpand(const SCEV *S) { 1739239462Sdim SCEVFindUnsafe Search; 1740239462Sdim visitAll(S, Search); 1741239462Sdim return !Search.IsUnsafe; 1742239462Sdim} 1743239462Sdim} 1744