ScalarEvolutionExpander.cpp revision 276479
1193323Sed//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===// 2193323Sed// 3193323Sed// The LLVM Compiler Infrastructure 4193323Sed// 5193323Sed// This file is distributed under the University of Illinois Open Source 6193323Sed// License. See LICENSE.TXT for details. 7193323Sed// 8193323Sed//===----------------------------------------------------------------------===// 9193323Sed// 10193323Sed// This file contains the implementation of the scalar evolution expander, 11193323Sed// which is used to generate the code corresponding to a given scalar evolution 12193323Sed// expression. 13193323Sed// 14193323Sed//===----------------------------------------------------------------------===// 15193323Sed 16193323Sed#include "llvm/Analysis/ScalarEvolutionExpander.h" 17276479Sdim#include "llvm/ADT/STLExtras.h" 18261991Sdim#include "llvm/ADT/SmallSet.h" 19276479Sdim#include "llvm/Analysis/InstructionSimplify.h" 20193323Sed#include "llvm/Analysis/LoopInfo.h" 21249423Sdim#include "llvm/Analysis/TargetTransformInfo.h" 22249423Sdim#include "llvm/IR/DataLayout.h" 23276479Sdim#include "llvm/IR/Dominators.h" 24249423Sdim#include "llvm/IR/IntrinsicInst.h" 25249423Sdim#include "llvm/IR/LLVMContext.h" 26226633Sdim#include "llvm/Support/Debug.h" 27224145Sdim 28193323Sedusing namespace llvm; 29193323Sed 30210299Sed/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP, 31210299Sed/// reusing an existing cast if a suitable one exists, moving an existing 32210299Sed/// cast if a suitable one exists but isn't in the right place, or 33210299Sed/// creating a new one. 34226633SdimValue *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty, 35210299Sed Instruction::CastOps Op, 36210299Sed BasicBlock::iterator IP) { 37234353Sdim // This function must be called with the builder having a valid insertion 38234353Sdim // point. It doesn't need to be the actual IP where the uses of the returned 39234353Sdim // cast will be added, but it must dominate such IP. 40234353Sdim // We use this precondition to produce a cast that will dominate all its 41234353Sdim // uses. In particular, this is crucial for the case where the builder's 42234353Sdim // insertion point *is* the point where we were asked to put the cast. 43239462Sdim // Since we don't know the builder's insertion point is actually 44234353Sdim // where the uses will be added (only that it dominates it), we are 45234353Sdim // not allowed to move it. 46234353Sdim BasicBlock::iterator BIP = Builder.GetInsertPoint(); 47234353Sdim 48276479Sdim Instruction *Ret = nullptr; 49234353Sdim 50210299Sed // Check to see if there is already a cast! 51276479Sdim for (User *U : V->users()) 52210299Sed if (U->getType() == Ty) 53210299Sed if (CastInst *CI = dyn_cast<CastInst>(U)) 54210299Sed if (CI->getOpcode() == Op) { 55234353Sdim // If the cast isn't where we want it, create a new cast at IP. 56234353Sdim // Likewise, do not reuse a cast at BIP because it must dominate 57234353Sdim // instructions that might be inserted before BIP. 58234353Sdim if (BasicBlock::iterator(CI) != IP || BIP == IP) { 59210299Sed // Create a new cast, and leave the old cast in place in case 60210299Sed // it is being used as an insert point. Clear its operand 61210299Sed // so that it doesn't hold anything live. 62234353Sdim Ret = CastInst::Create(Op, V, Ty, "", IP); 63234353Sdim Ret->takeName(CI); 64234353Sdim CI->replaceAllUsesWith(Ret); 65210299Sed CI->setOperand(0, UndefValue::get(V->getType())); 66234353Sdim break; 67210299Sed } 68234353Sdim Ret = CI; 69234353Sdim break; 70210299Sed } 71210299Sed 72210299Sed // Create a new cast. 73234353Sdim if (!Ret) 74234353Sdim Ret = CastInst::Create(Op, V, Ty, V->getName(), IP); 75234353Sdim 76234353Sdim // We assert at the end of the function since IP might point to an 77234353Sdim // instruction with different dominance properties than a cast 78234353Sdim // (an invoke for example) and not dominate BIP (but the cast does). 79234353Sdim assert(SE.DT->dominates(Ret, BIP)); 80234353Sdim 81234353Sdim rememberInstruction(Ret); 82234353Sdim return Ret; 83210299Sed} 84210299Sed 85195340Sed/// InsertNoopCastOfTo - Insert a cast of V to the specified type, 86195340Sed/// which must be possible with a noop cast, doing what we can to share 87195340Sed/// the casts. 88226633SdimValue *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) { 89195340Sed Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false); 90195340Sed assert((Op == Instruction::BitCast || 91195340Sed Op == Instruction::PtrToInt || 92195340Sed Op == Instruction::IntToPtr) && 93195340Sed "InsertNoopCastOfTo cannot perform non-noop casts!"); 94195340Sed assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) && 95195340Sed "InsertNoopCastOfTo cannot change sizes!"); 96195340Sed 97193323Sed // Short-circuit unnecessary bitcasts. 98234353Sdim if (Op == Instruction::BitCast) { 99234353Sdim if (V->getType() == Ty) 100234353Sdim return V; 101234353Sdim if (CastInst *CI = dyn_cast<CastInst>(V)) { 102234353Sdim if (CI->getOperand(0)->getType() == Ty) 103234353Sdim return CI->getOperand(0); 104234353Sdim } 105234353Sdim } 106193323Sed // Short-circuit unnecessary inttoptr<->ptrtoint casts. 107195340Sed if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) && 108193323Sed SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { 109193323Sed if (CastInst *CI = dyn_cast<CastInst>(V)) 110193323Sed if ((CI->getOpcode() == Instruction::PtrToInt || 111193323Sed CI->getOpcode() == Instruction::IntToPtr) && 112193323Sed SE.getTypeSizeInBits(CI->getType()) == 113193323Sed SE.getTypeSizeInBits(CI->getOperand(0)->getType())) 114193323Sed return CI->getOperand(0); 115193323Sed if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 116193323Sed if ((CE->getOpcode() == Instruction::PtrToInt || 117193323Sed CE->getOpcode() == Instruction::IntToPtr) && 118193323Sed SE.getTypeSizeInBits(CE->getType()) == 119193323Sed SE.getTypeSizeInBits(CE->getOperand(0)->getType())) 120193323Sed return CE->getOperand(0); 121193323Sed } 122193323Sed 123210299Sed // Fold a cast of a constant. 124193323Sed if (Constant *C = dyn_cast<Constant>(V)) 125195340Sed return ConstantExpr::getCast(Op, C, Ty); 126198090Srdivacky 127210299Sed // Cast the argument at the beginning of the entry block, after 128210299Sed // any bitcasts of other arguments. 129193323Sed if (Argument *A = dyn_cast<Argument>(V)) { 130210299Sed BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin(); 131210299Sed while ((isa<BitCastInst>(IP) && 132210299Sed isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) && 133210299Sed cast<BitCastInst>(IP)->getOperand(0) != A) || 134226633Sdim isa<DbgInfoIntrinsic>(IP) || 135226633Sdim isa<LandingPadInst>(IP)) 136210299Sed ++IP; 137210299Sed return ReuseOrCreateCast(A, Ty, Op, IP); 138193323Sed } 139193323Sed 140210299Sed // Cast the instruction immediately after the instruction. 141193323Sed Instruction *I = cast<Instruction>(V); 142193323Sed BasicBlock::iterator IP = I; ++IP; 143193323Sed if (InvokeInst *II = dyn_cast<InvokeInst>(I)) 144193323Sed IP = II->getNormalDest()->begin(); 145234353Sdim while (isa<PHINode>(IP) || isa<LandingPadInst>(IP)) 146226633Sdim ++IP; 147210299Sed return ReuseOrCreateCast(I, Ty, Op, IP); 148193323Sed} 149193323Sed 150193323Sed/// InsertBinop - Insert the specified binary operator, doing a small amount 151193323Sed/// of work to avoid inserting an obviously redundant operation. 152195340SedValue *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, 153195340Sed Value *LHS, Value *RHS) { 154193323Sed // Fold a binop with constant operands. 155193323Sed if (Constant *CLHS = dyn_cast<Constant>(LHS)) 156193323Sed if (Constant *CRHS = dyn_cast<Constant>(RHS)) 157193323Sed return ConstantExpr::get(Opcode, CLHS, CRHS); 158193323Sed 159193323Sed // Do a quick scan to see if we have this binop nearby. If so, reuse it. 160193323Sed unsigned ScanLimit = 6; 161195340Sed BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 162195340Sed // Scanning starts from the last instruction before the insertion point. 163195340Sed BasicBlock::iterator IP = Builder.GetInsertPoint(); 164195340Sed if (IP != BlockBegin) { 165193323Sed --IP; 166193323Sed for (; ScanLimit; --IP, --ScanLimit) { 167204792Srdivacky // Don't count dbg.value against the ScanLimit, to avoid perturbing the 168204792Srdivacky // generated code. 169204792Srdivacky if (isa<DbgInfoIntrinsic>(IP)) 170204792Srdivacky ScanLimit++; 171193323Sed if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && 172193323Sed IP->getOperand(1) == RHS) 173193323Sed return IP; 174193323Sed if (IP == BlockBegin) break; 175193323Sed } 176193323Sed } 177195340Sed 178204642Srdivacky // Save the original insertion point so we can restore it when we're done. 179261991Sdim DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc(); 180261991Sdim BuilderType::InsertPointGuard Guard(Builder); 181204642Srdivacky 182204642Srdivacky // Move the insertion point out of as many loops as we can. 183204642Srdivacky while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 184204642Srdivacky if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break; 185204642Srdivacky BasicBlock *Preheader = L->getLoopPreheader(); 186204642Srdivacky if (!Preheader) break; 187204642Srdivacky 188204642Srdivacky // Ok, move up a level. 189204642Srdivacky Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 190204642Srdivacky } 191204642Srdivacky 192193323Sed // If we haven't found this binop, insert it. 193226633Sdim Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS)); 194261991Sdim BO->setDebugLoc(Loc); 195202878Srdivacky rememberInstruction(BO); 196204642Srdivacky 197193323Sed return BO; 198193323Sed} 199193323Sed 200193323Sed/// FactorOutConstant - Test if S is divisible by Factor, using signed 201193323Sed/// division. If so, update S with Factor divided out and return true. 202204642Srdivacky/// S need not be evenly divisible if a reasonable remainder can be 203193323Sed/// computed. 204193323Sed/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made 205193323Sed/// unnecessary; in its place, just signed-divide Ops[i] by the scale and 206193323Sed/// check to see if the divide was folded. 207198090Srdivackystatic bool FactorOutConstant(const SCEV *&S, 208198090Srdivacky const SCEV *&Remainder, 209198090Srdivacky const SCEV *Factor, 210198090Srdivacky ScalarEvolution &SE, 211276479Sdim const DataLayout *DL) { 212193323Sed // Everything is divisible by one. 213198090Srdivacky if (Factor->isOne()) 214193323Sed return true; 215193323Sed 216198090Srdivacky // x/x == 1. 217198090Srdivacky if (S == Factor) { 218207618Srdivacky S = SE.getConstant(S->getType(), 1); 219198090Srdivacky return true; 220198090Srdivacky } 221198090Srdivacky 222193323Sed // For a Constant, check for a multiple of the given factor. 223193323Sed if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 224198090Srdivacky // 0/x == 0. 225198090Srdivacky if (C->isZero()) 226193323Sed return true; 227198090Srdivacky // Check for divisibility. 228198090Srdivacky if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) { 229198090Srdivacky ConstantInt *CI = 230198090Srdivacky ConstantInt::get(SE.getContext(), 231198090Srdivacky C->getValue()->getValue().sdiv( 232198090Srdivacky FC->getValue()->getValue())); 233198090Srdivacky // If the quotient is zero and the remainder is non-zero, reject 234198090Srdivacky // the value at this scale. It will be considered for subsequent 235198090Srdivacky // smaller scales. 236198090Srdivacky if (!CI->isZero()) { 237198090Srdivacky const SCEV *Div = SE.getConstant(CI); 238198090Srdivacky S = Div; 239198090Srdivacky Remainder = 240198090Srdivacky SE.getAddExpr(Remainder, 241198090Srdivacky SE.getConstant(C->getValue()->getValue().srem( 242198090Srdivacky FC->getValue()->getValue()))); 243198090Srdivacky return true; 244198090Srdivacky } 245193323Sed } 246193323Sed } 247193323Sed 248193323Sed // In a Mul, check if there is a constant operand which is a multiple 249193323Sed // of the given factor. 250198090Srdivacky if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 251276479Sdim if (DL) { 252243830Sdim // With DataLayout, the size is known. Check if there is a constant 253198090Srdivacky // operand which is a multiple of the given factor. If so, we can 254198090Srdivacky // factor it. 255198090Srdivacky const SCEVConstant *FC = cast<SCEVConstant>(Factor); 256198090Srdivacky if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0))) 257198090Srdivacky if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) { 258205407Srdivacky SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 259198090Srdivacky NewMulOps[0] = 260198090Srdivacky SE.getConstant(C->getValue()->getValue().sdiv( 261198090Srdivacky FC->getValue()->getValue())); 262198090Srdivacky S = SE.getMulExpr(NewMulOps); 263198090Srdivacky return true; 264198090Srdivacky } 265198090Srdivacky } else { 266243830Sdim // Without DataLayout, check if Factor can be factored out of any of the 267198090Srdivacky // Mul's operands. If so, we can just remove it. 268198090Srdivacky for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 269198090Srdivacky const SCEV *SOp = M->getOperand(i); 270207618Srdivacky const SCEV *Remainder = SE.getConstant(SOp->getType(), 0); 271276479Sdim if (FactorOutConstant(SOp, Remainder, Factor, SE, DL) && 272198090Srdivacky Remainder->isZero()) { 273205407Srdivacky SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 274198090Srdivacky NewMulOps[i] = SOp; 275198090Srdivacky S = SE.getMulExpr(NewMulOps); 276198090Srdivacky return true; 277198090Srdivacky } 278193323Sed } 279198090Srdivacky } 280198090Srdivacky } 281193323Sed 282193323Sed // In an AddRec, check if both start and step are divisible. 283193323Sed if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 284198090Srdivacky const SCEV *Step = A->getStepRecurrence(SE); 285207618Srdivacky const SCEV *StepRem = SE.getConstant(Step->getType(), 0); 286276479Sdim if (!FactorOutConstant(Step, StepRem, Factor, SE, DL)) 287193323Sed return false; 288193323Sed if (!StepRem->isZero()) 289193323Sed return false; 290198090Srdivacky const SCEV *Start = A->getStart(); 291276479Sdim if (!FactorOutConstant(Start, Remainder, Factor, SE, DL)) 292193323Sed return false; 293261991Sdim S = SE.getAddRecExpr(Start, Step, A->getLoop(), 294261991Sdim A->getNoWrapFlags(SCEV::FlagNW)); 295193323Sed return true; 296193323Sed } 297193323Sed 298193323Sed return false; 299193323Sed} 300193323Sed 301198090Srdivacky/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs 302198090Srdivacky/// is the number of SCEVAddRecExprs present, which are kept at the end of 303198090Srdivacky/// the list. 304193323Sed/// 305198090Srdivackystatic void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops, 306226633Sdim Type *Ty, 307198090Srdivacky ScalarEvolution &SE) { 308198090Srdivacky unsigned NumAddRecs = 0; 309198090Srdivacky for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i) 310198090Srdivacky ++NumAddRecs; 311198090Srdivacky // Group Ops into non-addrecs and addrecs. 312198090Srdivacky SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs); 313198090Srdivacky SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end()); 314198090Srdivacky // Let ScalarEvolution sort and simplify the non-addrecs list. 315198090Srdivacky const SCEV *Sum = NoAddRecs.empty() ? 316207618Srdivacky SE.getConstant(Ty, 0) : 317198090Srdivacky SE.getAddExpr(NoAddRecs); 318198090Srdivacky // If it returned an add, use the operands. Otherwise it simplified 319198090Srdivacky // the sum into a single value, so just use that. 320205407Srdivacky Ops.clear(); 321198090Srdivacky if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum)) 322210299Sed Ops.append(Add->op_begin(), Add->op_end()); 323205407Srdivacky else if (!Sum->isZero()) 324205407Srdivacky Ops.push_back(Sum); 325198090Srdivacky // Then append the addrecs. 326210299Sed Ops.append(AddRecs.begin(), AddRecs.end()); 327198090Srdivacky} 328198090Srdivacky 329198090Srdivacky/// SplitAddRecs - Flatten a list of add operands, moving addrec start values 330198090Srdivacky/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}. 331198090Srdivacky/// This helps expose more opportunities for folding parts of the expressions 332198090Srdivacky/// into GEP indices. 333198090Srdivacky/// 334198090Srdivackystatic void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops, 335226633Sdim Type *Ty, 336198090Srdivacky ScalarEvolution &SE) { 337198090Srdivacky // Find the addrecs. 338198090Srdivacky SmallVector<const SCEV *, 8> AddRecs; 339198090Srdivacky for (unsigned i = 0, e = Ops.size(); i != e; ++i) 340198090Srdivacky while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) { 341198090Srdivacky const SCEV *Start = A->getStart(); 342198090Srdivacky if (Start->isZero()) break; 343207618Srdivacky const SCEV *Zero = SE.getConstant(Ty, 0); 344198090Srdivacky AddRecs.push_back(SE.getAddRecExpr(Zero, 345198090Srdivacky A->getStepRecurrence(SE), 346221345Sdim A->getLoop(), 347261991Sdim A->getNoWrapFlags(SCEV::FlagNW))); 348198090Srdivacky if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) { 349198090Srdivacky Ops[i] = Zero; 350210299Sed Ops.append(Add->op_begin(), Add->op_end()); 351198090Srdivacky e += Add->getNumOperands(); 352198090Srdivacky } else { 353198090Srdivacky Ops[i] = Start; 354198090Srdivacky } 355198090Srdivacky } 356198090Srdivacky if (!AddRecs.empty()) { 357198090Srdivacky // Add the addrecs onto the end of the list. 358210299Sed Ops.append(AddRecs.begin(), AddRecs.end()); 359198090Srdivacky // Resort the operand list, moving any constants to the front. 360198090Srdivacky SimplifyAddOperands(Ops, Ty, SE); 361198090Srdivacky } 362198090Srdivacky} 363198090Srdivacky 364198090Srdivacky/// expandAddToGEP - Expand an addition expression with a pointer type into 365198090Srdivacky/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps 366198090Srdivacky/// BasicAliasAnalysis and other passes analyze the result. See the rules 367198090Srdivacky/// for getelementptr vs. inttoptr in 368198090Srdivacky/// http://llvm.org/docs/LangRef.html#pointeraliasing 369198090Srdivacky/// for details. 370198090Srdivacky/// 371202878Srdivacky/// Design note: The correctness of using getelementptr here depends on 372198090Srdivacky/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as 373198090Srdivacky/// they may introduce pointer arithmetic which may not be safely converted 374198090Srdivacky/// into getelementptr. 375198090Srdivacky/// 376193323Sed/// Design note: It might seem desirable for this function to be more 377193323Sed/// loop-aware. If some of the indices are loop-invariant while others 378193323Sed/// aren't, it might seem desirable to emit multiple GEPs, keeping the 379193323Sed/// loop-invariant portions of the overall computation outside the loop. 380193323Sed/// However, there are a few reasons this is not done here. Hoisting simple 381193323Sed/// arithmetic is a low-level optimization that often isn't very 382193323Sed/// important until late in the optimization process. In fact, passes 383193323Sed/// like InstructionCombining will combine GEPs, even if it means 384193323Sed/// pushing loop-invariant computation down into loops, so even if the 385193323Sed/// GEPs were split here, the work would quickly be undone. The 386193323Sed/// LoopStrengthReduction pass, which is usually run quite late (and 387193323Sed/// after the last InstructionCombining pass), takes care of hoisting 388193323Sed/// loop-invariant portions of expressions, after considering what 389193323Sed/// can be folded using target addressing modes. 390193323Sed/// 391198090SrdivackyValue *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, 392198090Srdivacky const SCEV *const *op_end, 393226633Sdim PointerType *PTy, 394226633Sdim Type *Ty, 395193323Sed Value *V) { 396226633Sdim Type *ElTy = PTy->getElementType(); 397193323Sed SmallVector<Value *, 4> GepIndices; 398198090Srdivacky SmallVector<const SCEV *, 8> Ops(op_begin, op_end); 399193323Sed bool AnyNonZeroIndices = false; 400193323Sed 401198090Srdivacky // Split AddRecs up into parts as either of the parts may be usable 402198090Srdivacky // without the other. 403198090Srdivacky SplitAddRecs(Ops, Ty, SE); 404198090Srdivacky 405276479Sdim Type *IntPtrTy = SE.DL 406276479Sdim ? SE.DL->getIntPtrType(PTy) 407261991Sdim : Type::getInt64Ty(PTy->getContext()); 408261991Sdim 409200581Srdivacky // Descend down the pointer's type and attempt to convert the other 410193323Sed // operands into GEP indices, at each level. The first index in a GEP 411193323Sed // indexes into the array implied by the pointer operand; the rest of 412193323Sed // the indices index into the element or field type selected by the 413193323Sed // preceding index. 414193323Sed for (;;) { 415198090Srdivacky // If the scale size is not 0, attempt to factor out a scale for 416198090Srdivacky // array indexing. 417198090Srdivacky SmallVector<const SCEV *, 8> ScaledOps; 418203954Srdivacky if (ElTy->isSized()) { 419261991Sdim const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy); 420203954Srdivacky if (!ElSize->isZero()) { 421203954Srdivacky SmallVector<const SCEV *, 8> NewOps; 422203954Srdivacky for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 423203954Srdivacky const SCEV *Op = Ops[i]; 424207618Srdivacky const SCEV *Remainder = SE.getConstant(Ty, 0); 425276479Sdim if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.DL)) { 426203954Srdivacky // Op now has ElSize factored out. 427203954Srdivacky ScaledOps.push_back(Op); 428203954Srdivacky if (!Remainder->isZero()) 429203954Srdivacky NewOps.push_back(Remainder); 430203954Srdivacky AnyNonZeroIndices = true; 431203954Srdivacky } else { 432203954Srdivacky // The operand was not divisible, so add it to the list of operands 433203954Srdivacky // we'll scan next iteration. 434203954Srdivacky NewOps.push_back(Ops[i]); 435203954Srdivacky } 436193323Sed } 437203954Srdivacky // If we made any changes, update Ops. 438203954Srdivacky if (!ScaledOps.empty()) { 439203954Srdivacky Ops = NewOps; 440203954Srdivacky SimplifyAddOperands(Ops, Ty, SE); 441203954Srdivacky } 442193323Sed } 443193323Sed } 444198090Srdivacky 445198090Srdivacky // Record the scaled array index for this level of the type. If 446198090Srdivacky // we didn't find any operands that could be factored, tentatively 447198090Srdivacky // assume that element zero was selected (since the zero offset 448198090Srdivacky // would obviously be folded away). 449193323Sed Value *Scaled = ScaledOps.empty() ? 450193323Sed Constant::getNullValue(Ty) : 451193323Sed expandCodeFor(SE.getAddExpr(ScaledOps), Ty); 452193323Sed GepIndices.push_back(Scaled); 453193323Sed 454193323Sed // Collect struct field index operands. 455226633Sdim while (StructType *STy = dyn_cast<StructType>(ElTy)) { 456198090Srdivacky bool FoundFieldNo = false; 457198090Srdivacky // An empty struct has no fields. 458198090Srdivacky if (STy->getNumElements() == 0) break; 459276479Sdim if (SE.DL) { 460243830Sdim // With DataLayout, field offsets are known. See if a constant offset 461198090Srdivacky // falls within any of the struct fields. 462198090Srdivacky if (Ops.empty()) break; 463193323Sed if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) 464193323Sed if (SE.getTypeSizeInBits(C->getType()) <= 64) { 465276479Sdim const StructLayout &SL = *SE.DL->getStructLayout(STy); 466193323Sed uint64_t FullOffset = C->getValue()->getZExtValue(); 467193323Sed if (FullOffset < SL.getSizeInBytes()) { 468193323Sed unsigned ElIdx = SL.getElementContainingOffset(FullOffset); 469198090Srdivacky GepIndices.push_back( 470198090Srdivacky ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx)); 471193323Sed ElTy = STy->getTypeAtIndex(ElIdx); 472193323Sed Ops[0] = 473194612Sed SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx)); 474193323Sed AnyNonZeroIndices = true; 475198090Srdivacky FoundFieldNo = true; 476193323Sed } 477193323Sed } 478198090Srdivacky } else { 479243830Sdim // Without DataLayout, just check for an offsetof expression of the 480198090Srdivacky // appropriate struct type. 481198090Srdivacky for (unsigned i = 0, e = Ops.size(); i != e; ++i) 482203954Srdivacky if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) { 483226633Sdim Type *CTy; 484203954Srdivacky Constant *FieldNo; 485203954Srdivacky if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) { 486203954Srdivacky GepIndices.push_back(FieldNo); 487203954Srdivacky ElTy = 488203954Srdivacky STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue()); 489198090Srdivacky Ops[i] = SE.getConstant(Ty, 0); 490198090Srdivacky AnyNonZeroIndices = true; 491198090Srdivacky FoundFieldNo = true; 492198090Srdivacky break; 493198090Srdivacky } 494203954Srdivacky } 495193323Sed } 496198090Srdivacky // If no struct field offsets were found, tentatively assume that 497198090Srdivacky // field zero was selected (since the zero offset would obviously 498198090Srdivacky // be folded away). 499198090Srdivacky if (!FoundFieldNo) { 500198090Srdivacky ElTy = STy->getTypeAtIndex(0u); 501198090Srdivacky GepIndices.push_back( 502198090Srdivacky Constant::getNullValue(Type::getInt32Ty(Ty->getContext()))); 503198090Srdivacky } 504198090Srdivacky } 505193323Sed 506226633Sdim if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) 507193323Sed ElTy = ATy->getElementType(); 508198090Srdivacky else 509198090Srdivacky break; 510193323Sed } 511193323Sed 512204642Srdivacky // If none of the operands were convertible to proper GEP indices, cast 513193323Sed // the base to i8* and do an ugly getelementptr with that. It's still 514193323Sed // better than ptrtoint+arithmetic+inttoptr at least. 515193323Sed if (!AnyNonZeroIndices) { 516198090Srdivacky // Cast the base to i8*. 517193323Sed V = InsertNoopCastOfTo(V, 518198090Srdivacky Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace())); 519198090Srdivacky 520234353Sdim assert(!isa<Instruction>(V) || 521234353Sdim SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint())); 522234353Sdim 523198090Srdivacky // Expand the operands for a plain byte offset. 524194178Sed Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); 525193323Sed 526193323Sed // Fold a GEP with constant operands. 527193323Sed if (Constant *CLHS = dyn_cast<Constant>(V)) 528193323Sed if (Constant *CRHS = dyn_cast<Constant>(Idx)) 529226633Sdim return ConstantExpr::getGetElementPtr(CLHS, CRHS); 530193323Sed 531193323Sed // Do a quick scan to see if we have this GEP nearby. If so, reuse it. 532193323Sed unsigned ScanLimit = 6; 533195340Sed BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 534195340Sed // Scanning starts from the last instruction before the insertion point. 535195340Sed BasicBlock::iterator IP = Builder.GetInsertPoint(); 536195340Sed if (IP != BlockBegin) { 537193323Sed --IP; 538193323Sed for (; ScanLimit; --IP, --ScanLimit) { 539204792Srdivacky // Don't count dbg.value against the ScanLimit, to avoid perturbing the 540204792Srdivacky // generated code. 541204792Srdivacky if (isa<DbgInfoIntrinsic>(IP)) 542204792Srdivacky ScanLimit++; 543193323Sed if (IP->getOpcode() == Instruction::GetElementPtr && 544193323Sed IP->getOperand(0) == V && IP->getOperand(1) == Idx) 545193323Sed return IP; 546193323Sed if (IP == BlockBegin) break; 547193323Sed } 548193323Sed } 549193323Sed 550204642Srdivacky // Save the original insertion point so we can restore it when we're done. 551261991Sdim BuilderType::InsertPointGuard Guard(Builder); 552204642Srdivacky 553204642Srdivacky // Move the insertion point out of as many loops as we can. 554204642Srdivacky while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 555204642Srdivacky if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break; 556204642Srdivacky BasicBlock *Preheader = L->getLoopPreheader(); 557204642Srdivacky if (!Preheader) break; 558204642Srdivacky 559204642Srdivacky // Ok, move up a level. 560204642Srdivacky Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 561204642Srdivacky } 562204642Srdivacky 563198090Srdivacky // Emit a GEP. 564198090Srdivacky Value *GEP = Builder.CreateGEP(V, Idx, "uglygep"); 565202878Srdivacky rememberInstruction(GEP); 566204642Srdivacky 567193323Sed return GEP; 568193323Sed } 569193323Sed 570204642Srdivacky // Save the original insertion point so we can restore it when we're done. 571261991Sdim BuilderType::InsertPoint SaveInsertPt = Builder.saveIP(); 572204642Srdivacky 573204642Srdivacky // Move the insertion point out of as many loops as we can. 574204642Srdivacky while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 575204642Srdivacky if (!L->isLoopInvariant(V)) break; 576204642Srdivacky 577204642Srdivacky bool AnyIndexNotLoopInvariant = false; 578204642Srdivacky for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(), 579204642Srdivacky E = GepIndices.end(); I != E; ++I) 580204642Srdivacky if (!L->isLoopInvariant(*I)) { 581204642Srdivacky AnyIndexNotLoopInvariant = true; 582204642Srdivacky break; 583204642Srdivacky } 584204642Srdivacky if (AnyIndexNotLoopInvariant) 585204642Srdivacky break; 586204642Srdivacky 587204642Srdivacky BasicBlock *Preheader = L->getLoopPreheader(); 588204642Srdivacky if (!Preheader) break; 589204642Srdivacky 590204642Srdivacky // Ok, move up a level. 591204642Srdivacky Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 592204642Srdivacky } 593204642Srdivacky 594198090Srdivacky // Insert a pretty getelementptr. Note that this GEP is not marked inbounds, 595198090Srdivacky // because ScalarEvolution may have changed the address arithmetic to 596198090Srdivacky // compute a value which is beyond the end of the allocated object. 597202878Srdivacky Value *Casted = V; 598202878Srdivacky if (V->getType() != PTy) 599202878Srdivacky Casted = InsertNoopCastOfTo(Casted, PTy); 600202878Srdivacky Value *GEP = Builder.CreateGEP(Casted, 601226633Sdim GepIndices, 602195340Sed "scevgep"); 603193323Sed Ops.push_back(SE.getUnknown(GEP)); 604202878Srdivacky rememberInstruction(GEP); 605204642Srdivacky 606204642Srdivacky // Restore the original insert point. 607261991Sdim Builder.restoreIP(SaveInsertPt); 608204642Srdivacky 609193323Sed return expand(SE.getAddExpr(Ops)); 610193323Sed} 611193323Sed 612204642Srdivacky/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for 613204642Srdivacky/// SCEV expansion. If they are nested, this is the most nested. If they are 614204642Srdivacky/// neighboring, pick the later. 615204642Srdivackystatic const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B, 616204642Srdivacky DominatorTree &DT) { 617204642Srdivacky if (!A) return B; 618204642Srdivacky if (!B) return A; 619204642Srdivacky if (A->contains(B)) return B; 620204642Srdivacky if (B->contains(A)) return A; 621204642Srdivacky if (DT.dominates(A->getHeader(), B->getHeader())) return B; 622204642Srdivacky if (DT.dominates(B->getHeader(), A->getHeader())) return A; 623204642Srdivacky return A; // Arbitrarily break the tie. 624204642Srdivacky} 625193323Sed 626218893Sdim/// getRelevantLoop - Get the most relevant loop associated with the given 627204642Srdivacky/// expression, according to PickMostRelevantLoop. 628218893Sdimconst Loop *SCEVExpander::getRelevantLoop(const SCEV *S) { 629218893Sdim // Test whether we've already computed the most relevant loop for this SCEV. 630218893Sdim std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair = 631276479Sdim RelevantLoops.insert(std::make_pair(S, nullptr)); 632218893Sdim if (!Pair.second) 633218893Sdim return Pair.first->second; 634218893Sdim 635204642Srdivacky if (isa<SCEVConstant>(S)) 636218893Sdim // A constant has no relevant loops. 637276479Sdim return nullptr; 638204642Srdivacky if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 639204642Srdivacky if (const Instruction *I = dyn_cast<Instruction>(U->getValue())) 640218893Sdim return Pair.first->second = SE.LI->getLoopFor(I->getParent()); 641218893Sdim // A non-instruction has no relevant loops. 642276479Sdim return nullptr; 643204642Srdivacky } 644204642Srdivacky if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) { 645276479Sdim const Loop *L = nullptr; 646204642Srdivacky if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 647204642Srdivacky L = AR->getLoop(); 648204642Srdivacky for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end(); 649204642Srdivacky I != E; ++I) 650218893Sdim L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT); 651218893Sdim return RelevantLoops[N] = L; 652204642Srdivacky } 653218893Sdim if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) { 654218893Sdim const Loop *Result = getRelevantLoop(C->getOperand()); 655218893Sdim return RelevantLoops[C] = Result; 656218893Sdim } 657218893Sdim if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 658218893Sdim const Loop *Result = 659218893Sdim PickMostRelevantLoop(getRelevantLoop(D->getLHS()), 660218893Sdim getRelevantLoop(D->getRHS()), 661218893Sdim *SE.DT); 662218893Sdim return RelevantLoops[D] = Result; 663218893Sdim } 664204642Srdivacky llvm_unreachable("Unexpected SCEV type!"); 665204642Srdivacky} 666198090Srdivacky 667207618Srdivackynamespace { 668207618Srdivacky 669204642Srdivacky/// LoopCompare - Compare loops by PickMostRelevantLoop. 670204642Srdivackyclass LoopCompare { 671204642Srdivacky DominatorTree &DT; 672204642Srdivackypublic: 673204642Srdivacky explicit LoopCompare(DominatorTree &dt) : DT(dt) {} 674198090Srdivacky 675204642Srdivacky bool operator()(std::pair<const Loop *, const SCEV *> LHS, 676204642Srdivacky std::pair<const Loop *, const SCEV *> RHS) const { 677212904Sdim // Keep pointer operands sorted at the end. 678212904Sdim if (LHS.second->getType()->isPointerTy() != 679212904Sdim RHS.second->getType()->isPointerTy()) 680212904Sdim return LHS.second->getType()->isPointerTy(); 681212904Sdim 682204642Srdivacky // Compare loops with PickMostRelevantLoop. 683204642Srdivacky if (LHS.first != RHS.first) 684204642Srdivacky return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first; 685204642Srdivacky 686204642Srdivacky // If one operand is a non-constant negative and the other is not, 687204642Srdivacky // put the non-constant negative on the right so that a sub can 688204642Srdivacky // be used instead of a negate and add. 689234353Sdim if (LHS.second->isNonConstantNegative()) { 690234353Sdim if (!RHS.second->isNonConstantNegative()) 691204642Srdivacky return false; 692234353Sdim } else if (RHS.second->isNonConstantNegative()) 693204642Srdivacky return true; 694204642Srdivacky 695204642Srdivacky // Otherwise they are equivalent according to this comparison. 696204642Srdivacky return false; 697198090Srdivacky } 698204642Srdivacky}; 699193323Sed 700207618Srdivacky} 701207618Srdivacky 702204642SrdivackyValue *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { 703226633Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 704193323Sed 705204642Srdivacky // Collect all the add operands in a loop, along with their associated loops. 706204642Srdivacky // Iterate in reverse so that constants are emitted last, all else equal, and 707204642Srdivacky // so that pointer operands are inserted first, which the code below relies on 708204642Srdivacky // to form more involved GEPs. 709204642Srdivacky SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 710204642Srdivacky for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()), 711204642Srdivacky E(S->op_begin()); I != E; ++I) 712218893Sdim OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 713204642Srdivacky 714204642Srdivacky // Sort by loop. Use a stable sort so that constants follow non-constants and 715204642Srdivacky // pointer operands precede non-pointer operands. 716204642Srdivacky std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 717204642Srdivacky 718204642Srdivacky // Emit instructions to add all the operands. Hoist as much as possible 719204642Srdivacky // out of loops, and form meaningful getelementptrs where possible. 720276479Sdim Value *Sum = nullptr; 721204642Srdivacky for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 722204642Srdivacky I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 723204642Srdivacky const Loop *CurLoop = I->first; 724204642Srdivacky const SCEV *Op = I->second; 725204642Srdivacky if (!Sum) { 726204642Srdivacky // This is the first operand. Just expand it. 727204642Srdivacky Sum = expand(Op); 728204642Srdivacky ++I; 729226633Sdim } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) { 730204642Srdivacky // The running sum expression is a pointer. Try to form a getelementptr 731204642Srdivacky // at this level with that as the base. 732204642Srdivacky SmallVector<const SCEV *, 4> NewOps; 733212904Sdim for (; I != E && I->first == CurLoop; ++I) { 734212904Sdim // If the operand is SCEVUnknown and not instructions, peek through 735212904Sdim // it, to enable more of it to be folded into the GEP. 736212904Sdim const SCEV *X = I->second; 737212904Sdim if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X)) 738212904Sdim if (!isa<Instruction>(U->getValue())) 739212904Sdim X = SE.getSCEV(U->getValue()); 740212904Sdim NewOps.push_back(X); 741212904Sdim } 742204642Srdivacky Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum); 743226633Sdim } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) { 744204642Srdivacky // The running sum is an integer, and there's a pointer at this level. 745207618Srdivacky // Try to form a getelementptr. If the running sum is instructions, 746207618Srdivacky // use a SCEVUnknown to avoid re-analyzing them. 747204642Srdivacky SmallVector<const SCEV *, 4> NewOps; 748207618Srdivacky NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) : 749207618Srdivacky SE.getSCEV(Sum)); 750204642Srdivacky for (++I; I != E && I->first == CurLoop; ++I) 751204642Srdivacky NewOps.push_back(I->second); 752204642Srdivacky Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op)); 753234353Sdim } else if (Op->isNonConstantNegative()) { 754204642Srdivacky // Instead of doing a negate and add, just do a subtract. 755202878Srdivacky Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty); 756204642Srdivacky Sum = InsertNoopCastOfTo(Sum, Ty); 757204642Srdivacky Sum = InsertBinop(Instruction::Sub, Sum, W); 758204642Srdivacky ++I; 759202878Srdivacky } else { 760204642Srdivacky // A simple add. 761202878Srdivacky Value *W = expandCodeFor(Op, Ty); 762204642Srdivacky Sum = InsertNoopCastOfTo(Sum, Ty); 763204642Srdivacky // Canonicalize a constant to the RHS. 764204642Srdivacky if (isa<Constant>(Sum)) std::swap(Sum, W); 765204642Srdivacky Sum = InsertBinop(Instruction::Add, Sum, W); 766204642Srdivacky ++I; 767202878Srdivacky } 768193323Sed } 769204642Srdivacky 770204642Srdivacky return Sum; 771193323Sed} 772193323Sed 773193323SedValue *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { 774226633Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 775193323Sed 776204642Srdivacky // Collect all the mul operands in a loop, along with their associated loops. 777204642Srdivacky // Iterate in reverse so that constants are emitted last, all else equal. 778204642Srdivacky SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 779204642Srdivacky for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()), 780204642Srdivacky E(S->op_begin()); I != E; ++I) 781218893Sdim OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 782193323Sed 783204642Srdivacky // Sort by loop. Use a stable sort so that constants follow non-constants. 784204642Srdivacky std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 785204642Srdivacky 786204642Srdivacky // Emit instructions to mul all the operands. Hoist as much as possible 787204642Srdivacky // out of loops. 788276479Sdim Value *Prod = nullptr; 789204642Srdivacky for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 790204642Srdivacky I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 791204642Srdivacky const SCEV *Op = I->second; 792204642Srdivacky if (!Prod) { 793204642Srdivacky // This is the first operand. Just expand it. 794204642Srdivacky Prod = expand(Op); 795204642Srdivacky ++I; 796204642Srdivacky } else if (Op->isAllOnesValue()) { 797204642Srdivacky // Instead of doing a multiply by negative one, just do a negate. 798204642Srdivacky Prod = InsertNoopCastOfTo(Prod, Ty); 799204642Srdivacky Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod); 800204642Srdivacky ++I; 801204642Srdivacky } else { 802204642Srdivacky // A simple mul. 803204642Srdivacky Value *W = expandCodeFor(Op, Ty); 804204642Srdivacky Prod = InsertNoopCastOfTo(Prod, Ty); 805204642Srdivacky // Canonicalize a constant to the RHS. 806204642Srdivacky if (isa<Constant>(Prod)) std::swap(Prod, W); 807204642Srdivacky Prod = InsertBinop(Instruction::Mul, Prod, W); 808204642Srdivacky ++I; 809204642Srdivacky } 810193323Sed } 811193323Sed 812204642Srdivacky return Prod; 813193323Sed} 814193323Sed 815193323SedValue *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { 816226633Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 817193323Sed 818194178Sed Value *LHS = expandCodeFor(S->getLHS(), Ty); 819193323Sed if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) { 820193323Sed const APInt &RHS = SC->getValue()->getValue(); 821193323Sed if (RHS.isPowerOf2()) 822193323Sed return InsertBinop(Instruction::LShr, LHS, 823195340Sed ConstantInt::get(Ty, RHS.logBase2())); 824193323Sed } 825193323Sed 826194178Sed Value *RHS = expandCodeFor(S->getRHS(), Ty); 827195340Sed return InsertBinop(Instruction::UDiv, LHS, RHS); 828193323Sed} 829193323Sed 830193323Sed/// Move parts of Base into Rest to leave Base with the minimal 831193323Sed/// expression that provides a pointer operand suitable for a 832193323Sed/// GEP expansion. 833198090Srdivackystatic void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, 834193323Sed ScalarEvolution &SE) { 835193323Sed while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) { 836193323Sed Base = A->getStart(); 837193323Sed Rest = SE.getAddExpr(Rest, 838207618Srdivacky SE.getAddRecExpr(SE.getConstant(A->getType(), 0), 839193323Sed A->getStepRecurrence(SE), 840221345Sdim A->getLoop(), 841261991Sdim A->getNoWrapFlags(SCEV::FlagNW))); 842193323Sed } 843193323Sed if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) { 844193323Sed Base = A->getOperand(A->getNumOperands()-1); 845198090Srdivacky SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end()); 846193323Sed NewAddOps.back() = Rest; 847193323Sed Rest = SE.getAddExpr(NewAddOps); 848193323Sed ExposePointerBase(Base, Rest, SE); 849193323Sed } 850193323Sed} 851193323Sed 852226633Sdim/// Determine if this is a well-behaved chain of instructions leading back to 853226633Sdim/// the PHI. If so, it may be reused by expanded expressions. 854226633Sdimbool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, 855226633Sdim const Loop *L) { 856226633Sdim if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) || 857226633Sdim (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV))) 858226633Sdim return false; 859226633Sdim // If any of the operands don't dominate the insert position, bail. 860226633Sdim // Addrec operands are always loop-invariant, so this can only happen 861226633Sdim // if there are instructions which haven't been hoisted. 862226633Sdim if (L == IVIncInsertLoop) { 863226633Sdim for (User::op_iterator OI = IncV->op_begin()+1, 864226633Sdim OE = IncV->op_end(); OI != OE; ++OI) 865226633Sdim if (Instruction *OInst = dyn_cast<Instruction>(OI)) 866226633Sdim if (!SE.DT->dominates(OInst, IVIncInsertPos)) 867226633Sdim return false; 868226633Sdim } 869226633Sdim // Advance to the next instruction. 870226633Sdim IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 871226633Sdim if (!IncV) 872226633Sdim return false; 873226633Sdim 874226633Sdim if (IncV->mayHaveSideEffects()) 875226633Sdim return false; 876226633Sdim 877226633Sdim if (IncV != PN) 878226633Sdim return true; 879226633Sdim 880226633Sdim return isNormalAddRecExprPHI(PN, IncV, L); 881226633Sdim} 882226633Sdim 883234353Sdim/// getIVIncOperand returns an induction variable increment's induction 884234353Sdim/// variable operand. 885234353Sdim/// 886234353Sdim/// If allowScale is set, any type of GEP is allowed as long as the nonIV 887234353Sdim/// operands dominate InsertPos. 888234353Sdim/// 889234353Sdim/// If allowScale is not set, ensure that a GEP increment conforms to one of the 890234353Sdim/// simple patterns generated by getAddRecExprPHILiterally and 891234353Sdim/// expandAddtoGEP. If the pattern isn't recognized, return NULL. 892234353SdimInstruction *SCEVExpander::getIVIncOperand(Instruction *IncV, 893234353Sdim Instruction *InsertPos, 894234353Sdim bool allowScale) { 895234353Sdim if (IncV == InsertPos) 896276479Sdim return nullptr; 897234353Sdim 898226633Sdim switch (IncV->getOpcode()) { 899234353Sdim default: 900276479Sdim return nullptr; 901226633Sdim // Check for a simple Add/Sub or GEP of a loop invariant step. 902226633Sdim case Instruction::Add: 903234353Sdim case Instruction::Sub: { 904234353Sdim Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1)); 905234353Sdim if (!OInst || SE.DT->dominates(OInst, InsertPos)) 906234353Sdim return dyn_cast<Instruction>(IncV->getOperand(0)); 907276479Sdim return nullptr; 908234353Sdim } 909226633Sdim case Instruction::BitCast: 910234353Sdim return dyn_cast<Instruction>(IncV->getOperand(0)); 911234353Sdim case Instruction::GetElementPtr: 912226633Sdim for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end(); 913226633Sdim I != E; ++I) { 914226633Sdim if (isa<Constant>(*I)) 915226633Sdim continue; 916234353Sdim if (Instruction *OInst = dyn_cast<Instruction>(*I)) { 917234353Sdim if (!SE.DT->dominates(OInst, InsertPos)) 918276479Sdim return nullptr; 919234353Sdim } 920234353Sdim if (allowScale) { 921234353Sdim // allow any kind of GEP as long as it can be hoisted. 922234353Sdim continue; 923234353Sdim } 924234353Sdim // This must be a pointer addition of constants (pretty), which is already 925234353Sdim // handled, or some number of address-size elements (ugly). Ugly geps 926234353Sdim // have 2 operands. i1* is used by the expander to represent an 927234353Sdim // address-size element. 928226633Sdim if (IncV->getNumOperands() != 2) 929276479Sdim return nullptr; 930226633Sdim unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace(); 931226633Sdim if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS) 932226633Sdim && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS)) 933276479Sdim return nullptr; 934226633Sdim break; 935226633Sdim } 936234353Sdim return dyn_cast<Instruction>(IncV->getOperand(0)); 937226633Sdim } 938234353Sdim} 939234353Sdim 940234353Sdim/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make 941234353Sdim/// it available to other uses in this loop. Recursively hoist any operands, 942234353Sdim/// until we reach a value that dominates InsertPos. 943234353Sdimbool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) { 944234353Sdim if (SE.DT->dominates(IncV, InsertPos)) 945234353Sdim return true; 946234353Sdim 947234353Sdim // InsertPos must itself dominate IncV so that IncV's new position satisfies 948234353Sdim // its existing users. 949239462Sdim if (isa<PHINode>(InsertPos) 950239462Sdim || !SE.DT->dominates(InsertPos->getParent(), IncV->getParent())) 951226633Sdim return false; 952234353Sdim 953234353Sdim // Check that the chain of IV operands leading back to Phi can be hoisted. 954234353Sdim SmallVector<Instruction*, 4> IVIncs; 955234353Sdim for(;;) { 956234353Sdim Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true); 957234353Sdim if (!Oper) 958234353Sdim return false; 959234353Sdim // IncV is safe to hoist. 960234353Sdim IVIncs.push_back(IncV); 961234353Sdim IncV = Oper; 962234353Sdim if (SE.DT->dominates(IncV, InsertPos)) 963234353Sdim break; 964226633Sdim } 965234353Sdim for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(), 966234353Sdim E = IVIncs.rend(); I != E; ++I) { 967234353Sdim (*I)->moveBefore(InsertPos); 968234353Sdim } 969234353Sdim return true; 970226633Sdim} 971226633Sdim 972234353Sdim/// Determine if this cyclic phi is in a form that would have been generated by 973234353Sdim/// LSR. We don't care if the phi was actually expanded in this pass, as long 974234353Sdim/// as it is in a low-cost form, for example, no implied multiplication. This 975234353Sdim/// should match any patterns generated by getAddRecExprPHILiterally and 976234353Sdim/// expandAddtoGEP. 977234353Sdimbool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, 978234353Sdim const Loop *L) { 979234353Sdim for(Instruction *IVOper = IncV; 980234353Sdim (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(), 981234353Sdim /*allowScale=*/false));) { 982234353Sdim if (IVOper == PN) 983234353Sdim return true; 984234353Sdim } 985234353Sdim return false; 986234353Sdim} 987234353Sdim 988234353Sdim/// expandIVInc - Expand an IV increment at Builder's current InsertPos. 989234353Sdim/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may 990234353Sdim/// need to materialize IV increments elsewhere to handle difficult situations. 991234353SdimValue *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L, 992234353Sdim Type *ExpandTy, Type *IntTy, 993234353Sdim bool useSubtract) { 994234353Sdim Value *IncV; 995234353Sdim // If the PHI is a pointer, use a GEP, otherwise use an add or sub. 996234353Sdim if (ExpandTy->isPointerTy()) { 997234353Sdim PointerType *GEPPtrTy = cast<PointerType>(ExpandTy); 998234353Sdim // If the step isn't constant, don't use an implicitly scaled GEP, because 999234353Sdim // that would require a multiply inside the loop. 1000234353Sdim if (!isa<ConstantInt>(StepV)) 1001234353Sdim GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()), 1002234353Sdim GEPPtrTy->getAddressSpace()); 1003234353Sdim const SCEV *const StepArray[1] = { SE.getSCEV(StepV) }; 1004234353Sdim IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN); 1005234353Sdim if (IncV->getType() != PN->getType()) { 1006234353Sdim IncV = Builder.CreateBitCast(IncV, PN->getType()); 1007234353Sdim rememberInstruction(IncV); 1008234353Sdim } 1009234353Sdim } else { 1010234353Sdim IncV = useSubtract ? 1011234353Sdim Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") : 1012234353Sdim Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next"); 1013234353Sdim rememberInstruction(IncV); 1014234353Sdim } 1015234353Sdim return IncV; 1016234353Sdim} 1017234353Sdim 1018276479Sdim/// \brief Hoist the addrec instruction chain rooted in the loop phi above the 1019276479Sdim/// position. This routine assumes that this is possible (has been checked). 1020276479Sdimstatic void hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist, 1021276479Sdim Instruction *Pos, PHINode *LoopPhi) { 1022276479Sdim do { 1023276479Sdim if (DT->dominates(InstToHoist, Pos)) 1024276479Sdim break; 1025276479Sdim // Make sure the increment is where we want it. But don't move it 1026276479Sdim // down past a potential existing post-inc user. 1027276479Sdim InstToHoist->moveBefore(Pos); 1028276479Sdim Pos = InstToHoist; 1029276479Sdim InstToHoist = cast<Instruction>(InstToHoist->getOperand(0)); 1030276479Sdim } while (InstToHoist != LoopPhi); 1031276479Sdim} 1032276479Sdim 1033276479Sdim/// \brief Check whether we can cheaply express the requested SCEV in terms of 1034276479Sdim/// the available PHI SCEV by truncation and/or invertion of the step. 1035276479Sdimstatic bool canBeCheaplyTransformed(ScalarEvolution &SE, 1036276479Sdim const SCEVAddRecExpr *Phi, 1037276479Sdim const SCEVAddRecExpr *Requested, 1038276479Sdim bool &InvertStep) { 1039276479Sdim Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType()); 1040276479Sdim Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType()); 1041276479Sdim 1042276479Sdim if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth()) 1043276479Sdim return false; 1044276479Sdim 1045276479Sdim // Try truncate it if necessary. 1046276479Sdim Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy)); 1047276479Sdim if (!Phi) 1048276479Sdim return false; 1049276479Sdim 1050276479Sdim // Check whether truncation will help. 1051276479Sdim if (Phi == Requested) { 1052276479Sdim InvertStep = false; 1053276479Sdim return true; 1054276479Sdim } 1055276479Sdim 1056276479Sdim // Check whether inverting will help: {R,+,-1} == R - {0,+,1}. 1057276479Sdim if (SE.getAddExpr(Requested->getStart(), 1058276479Sdim SE.getNegativeSCEV(Requested)) == Phi) { 1059276479Sdim InvertStep = true; 1060276479Sdim return true; 1061276479Sdim } 1062276479Sdim 1063276479Sdim return false; 1064276479Sdim} 1065276479Sdim 1066202878Srdivacky/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand 1067202878Srdivacky/// the base addrec, which is the addrec without any non-loop-dominating 1068202878Srdivacky/// values, and return the PHI. 1069202878SrdivackyPHINode * 1070202878SrdivackySCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, 1071202878Srdivacky const Loop *L, 1072226633Sdim Type *ExpandTy, 1073276479Sdim Type *IntTy, 1074276479Sdim Type *&TruncTy, 1075276479Sdim bool &InvertStep) { 1076224145Sdim assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position"); 1077224145Sdim 1078202878Srdivacky // Reuse a previously-inserted PHI, if present. 1079226633Sdim BasicBlock *LatchBlock = L->getLoopLatch(); 1080226633Sdim if (LatchBlock) { 1081276479Sdim PHINode *AddRecPhiMatch = nullptr; 1082276479Sdim Instruction *IncV = nullptr; 1083276479Sdim TruncTy = nullptr; 1084276479Sdim InvertStep = false; 1085276479Sdim 1086276479Sdim // Only try partially matching scevs that need truncation and/or 1087276479Sdim // step-inversion if we know this loop is outside the current loop. 1088276479Sdim bool TryNonMatchingSCEV = IVIncInsertLoop && 1089276479Sdim SE.DT->properlyDominates(LatchBlock, IVIncInsertLoop->getHeader()); 1090276479Sdim 1091226633Sdim for (BasicBlock::iterator I = L->getHeader()->begin(); 1092226633Sdim PHINode *PN = dyn_cast<PHINode>(I); ++I) { 1093276479Sdim if (!SE.isSCEVable(PN->getType())) 1094226633Sdim continue; 1095202878Srdivacky 1096276479Sdim const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PN)); 1097276479Sdim if (!PhiSCEV) 1098276479Sdim continue; 1099226633Sdim 1100276479Sdim bool IsMatchingSCEV = PhiSCEV == Normalized; 1101276479Sdim // We only handle truncation and inversion of phi recurrences for the 1102276479Sdim // expanded expression if the expanded expression's loop dominates the 1103276479Sdim // loop we insert to. Check now, so we can bail out early. 1104276479Sdim if (!IsMatchingSCEV && !TryNonMatchingSCEV) 1105276479Sdim continue; 1106276479Sdim 1107276479Sdim Instruction *TempIncV = 1108276479Sdim cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock)); 1109276479Sdim 1110276479Sdim // Check whether we can reuse this PHI node. 1111226633Sdim if (LSRMode) { 1112276479Sdim if (!isExpandedAddRecExprPHI(PN, TempIncV, L)) 1113226633Sdim continue; 1114276479Sdim if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos)) 1115234353Sdim continue; 1116276479Sdim } else { 1117276479Sdim if (!isNormalAddRecExprPHI(PN, TempIncV, L)) 1118226633Sdim continue; 1119226633Sdim } 1120276479Sdim 1121276479Sdim // Stop if we have found an exact match SCEV. 1122276479Sdim if (IsMatchingSCEV) { 1123276479Sdim IncV = TempIncV; 1124276479Sdim TruncTy = nullptr; 1125276479Sdim InvertStep = false; 1126276479Sdim AddRecPhiMatch = PN; 1127276479Sdim break; 1128276479Sdim } 1129276479Sdim 1130276479Sdim // Try whether the phi can be translated into the requested form 1131276479Sdim // (truncated and/or offset by a constant). 1132276479Sdim if ((!TruncTy || InvertStep) && 1133276479Sdim canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) { 1134276479Sdim // Record the phi node. But don't stop we might find an exact match 1135276479Sdim // later. 1136276479Sdim AddRecPhiMatch = PN; 1137276479Sdim IncV = TempIncV; 1138276479Sdim TruncTy = SE.getEffectiveSCEVType(Normalized->getType()); 1139276479Sdim } 1140276479Sdim } 1141276479Sdim 1142276479Sdim if (AddRecPhiMatch) { 1143276479Sdim // Potentially, move the increment. We have made sure in 1144276479Sdim // isExpandedAddRecExprPHI or hoistIVInc that this is possible. 1145276479Sdim if (L == IVIncInsertLoop) 1146276479Sdim hoistBeforePos(SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch); 1147276479Sdim 1148226633Sdim // Ok, the add recurrence looks usable. 1149226633Sdim // Remember this PHI, even in post-inc mode. 1150276479Sdim InsertedValues.insert(AddRecPhiMatch); 1151226633Sdim // Remember the increment. 1152226633Sdim rememberInstruction(IncV); 1153276479Sdim return AddRecPhiMatch; 1154226633Sdim } 1155226633Sdim } 1156203954Srdivacky 1157202878Srdivacky // Save the original insertion point so we can restore it when we're done. 1158261991Sdim BuilderType::InsertPointGuard Guard(Builder); 1159202878Srdivacky 1160234353Sdim // Another AddRec may need to be recursively expanded below. For example, if 1161234353Sdim // this AddRec is quadratic, the StepV may itself be an AddRec in this 1162234353Sdim // loop. Remove this loop from the PostIncLoops set before expanding such 1163234353Sdim // AddRecs. Otherwise, we cannot find a valid position for the step 1164234353Sdim // (i.e. StepV can never dominate its loop header). Ideally, we could do 1165234353Sdim // SavedIncLoops.swap(PostIncLoops), but we generally have a single element, 1166234353Sdim // so it's not worth implementing SmallPtrSet::swap. 1167234353Sdim PostIncLoopSet SavedPostIncLoops = PostIncLoops; 1168234353Sdim PostIncLoops.clear(); 1169234353Sdim 1170202878Srdivacky // Expand code for the start value. 1171202878Srdivacky Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy, 1172202878Srdivacky L->getHeader()->begin()); 1173202878Srdivacky 1174224145Sdim // StartV must be hoisted into L's preheader to dominate the new phi. 1175224145Sdim assert(!isa<Instruction>(StartV) || 1176224145Sdim SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(), 1177224145Sdim L->getHeader())); 1178224145Sdim 1179234353Sdim // Expand code for the step value. Do this before creating the PHI so that PHI 1180234353Sdim // reuse code doesn't see an incomplete PHI. 1181202878Srdivacky const SCEV *Step = Normalized->getStepRecurrence(SE); 1182234353Sdim // If the stride is negative, insert a sub instead of an add for the increment 1183234353Sdim // (unless it's a constant, because subtracts of constants are canonicalized 1184234353Sdim // to adds). 1185234353Sdim bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1186234353Sdim if (useSubtract) 1187202878Srdivacky Step = SE.getNegativeSCEV(Step); 1188234353Sdim // Expand the step somewhere that dominates the loop header. 1189202878Srdivacky Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1190202878Srdivacky 1191202878Srdivacky // Create the PHI. 1192221345Sdim BasicBlock *Header = L->getHeader(); 1193221345Sdim Builder.SetInsertPoint(Header, Header->begin()); 1194221345Sdim pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1195224145Sdim PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE), 1196224145Sdim Twine(IVName) + ".iv"); 1197202878Srdivacky rememberInstruction(PN); 1198202878Srdivacky 1199202878Srdivacky // Create the step instructions and populate the PHI. 1200221345Sdim for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1201202878Srdivacky BasicBlock *Pred = *HPI; 1202202878Srdivacky 1203202878Srdivacky // Add a start value. 1204202878Srdivacky if (!L->contains(Pred)) { 1205202878Srdivacky PN->addIncoming(StartV, Pred); 1206202878Srdivacky continue; 1207202878Srdivacky } 1208202878Srdivacky 1209234353Sdim // Create a step value and add it to the PHI. 1210234353Sdim // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the 1211234353Sdim // instructions at IVIncInsertPos. 1212202878Srdivacky Instruction *InsertPos = L == IVIncInsertLoop ? 1213202878Srdivacky IVIncInsertPos : Pred->getTerminator(); 1214224145Sdim Builder.SetInsertPoint(InsertPos); 1215234353Sdim Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1216261991Sdim if (isa<OverflowingBinaryOperator>(IncV)) { 1217261991Sdim if (Normalized->getNoWrapFlags(SCEV::FlagNUW)) 1218261991Sdim cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap(); 1219261991Sdim if (Normalized->getNoWrapFlags(SCEV::FlagNSW)) 1220261991Sdim cast<BinaryOperator>(IncV)->setHasNoSignedWrap(); 1221261991Sdim } 1222202878Srdivacky PN->addIncoming(IncV, Pred); 1223202878Srdivacky } 1224202878Srdivacky 1225234353Sdim // After expanding subexpressions, restore the PostIncLoops set so the caller 1226234353Sdim // can ensure that IVIncrement dominates the current uses. 1227234353Sdim PostIncLoops = SavedPostIncLoops; 1228234353Sdim 1229202878Srdivacky // Remember this PHI, even in post-inc mode. 1230202878Srdivacky InsertedValues.insert(PN); 1231202878Srdivacky 1232202878Srdivacky return PN; 1233202878Srdivacky} 1234202878Srdivacky 1235202878SrdivackyValue *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { 1236226633Sdim Type *STy = S->getType(); 1237226633Sdim Type *IntTy = SE.getEffectiveSCEVType(STy); 1238202878Srdivacky const Loop *L = S->getLoop(); 1239202878Srdivacky 1240202878Srdivacky // Determine a normalized form of this expression, which is the expression 1241202878Srdivacky // before any post-inc adjustment is made. 1242202878Srdivacky const SCEVAddRecExpr *Normalized = S; 1243207618Srdivacky if (PostIncLoops.count(L)) { 1244207618Srdivacky PostIncLoopSet Loops; 1245207618Srdivacky Loops.insert(L); 1246207618Srdivacky Normalized = 1247276479Sdim cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, nullptr, 1248276479Sdim nullptr, Loops, SE, *SE.DT)); 1249202878Srdivacky } 1250202878Srdivacky 1251202878Srdivacky // Strip off any non-loop-dominating component from the addrec start. 1252202878Srdivacky const SCEV *Start = Normalized->getStart(); 1253276479Sdim const SCEV *PostLoopOffset = nullptr; 1254218893Sdim if (!SE.properlyDominates(Start, L->getHeader())) { 1255202878Srdivacky PostLoopOffset = Start; 1256207618Srdivacky Start = SE.getConstant(Normalized->getType(), 0); 1257221345Sdim Normalized = cast<SCEVAddRecExpr>( 1258221345Sdim SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE), 1259221345Sdim Normalized->getLoop(), 1260261991Sdim Normalized->getNoWrapFlags(SCEV::FlagNW))); 1261202878Srdivacky } 1262202878Srdivacky 1263202878Srdivacky // Strip off any non-loop-dominating component from the addrec step. 1264202878Srdivacky const SCEV *Step = Normalized->getStepRecurrence(SE); 1265276479Sdim const SCEV *PostLoopScale = nullptr; 1266218893Sdim if (!SE.dominates(Step, L->getHeader())) { 1267202878Srdivacky PostLoopScale = Step; 1268207618Srdivacky Step = SE.getConstant(Normalized->getType(), 1); 1269202878Srdivacky Normalized = 1270261991Sdim cast<SCEVAddRecExpr>(SE.getAddRecExpr( 1271261991Sdim Start, Step, Normalized->getLoop(), 1272261991Sdim Normalized->getNoWrapFlags(SCEV::FlagNW))); 1273202878Srdivacky } 1274202878Srdivacky 1275202878Srdivacky // Expand the core addrec. If we need post-loop scaling, force it to 1276202878Srdivacky // expand to an integer type to avoid the need for additional casting. 1277226633Sdim Type *ExpandTy = PostLoopScale ? IntTy : STy; 1278276479Sdim // In some cases, we decide to reuse an existing phi node but need to truncate 1279276479Sdim // it and/or invert the step. 1280276479Sdim Type *TruncTy = nullptr; 1281276479Sdim bool InvertStep = false; 1282276479Sdim PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy, 1283276479Sdim TruncTy, InvertStep); 1284202878Srdivacky 1285204642Srdivacky // Accommodate post-inc mode, if necessary. 1286202878Srdivacky Value *Result; 1287207618Srdivacky if (!PostIncLoops.count(L)) 1288202878Srdivacky Result = PN; 1289202878Srdivacky else { 1290202878Srdivacky // In PostInc mode, use the post-incremented value. 1291202878Srdivacky BasicBlock *LatchBlock = L->getLoopLatch(); 1292202878Srdivacky assert(LatchBlock && "PostInc mode requires a unique loop latch!"); 1293202878Srdivacky Result = PN->getIncomingValueForBlock(LatchBlock); 1294226633Sdim 1295226633Sdim // For an expansion to use the postinc form, the client must call 1296226633Sdim // expandCodeFor with an InsertPoint that is either outside the PostIncLoop 1297226633Sdim // or dominated by IVIncInsertPos. 1298234353Sdim if (isa<Instruction>(Result) 1299234353Sdim && !SE.DT->dominates(cast<Instruction>(Result), 1300234353Sdim Builder.GetInsertPoint())) { 1301234353Sdim // The induction variable's postinc expansion does not dominate this use. 1302234353Sdim // IVUsers tries to prevent this case, so it is rare. However, it can 1303234353Sdim // happen when an IVUser outside the loop is not dominated by the latch 1304234353Sdim // block. Adjusting IVIncInsertPos before expansion begins cannot handle 1305234353Sdim // all cases. Consider a phi outide whose operand is replaced during 1306234353Sdim // expansion with the value of the postinc user. Without fundamentally 1307234353Sdim // changing the way postinc users are tracked, the only remedy is 1308234353Sdim // inserting an extra IV increment. StepV might fold into PostLoopOffset, 1309234353Sdim // but hopefully expandCodeFor handles that. 1310234353Sdim bool useSubtract = 1311234353Sdim !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1312234353Sdim if (useSubtract) 1313234353Sdim Step = SE.getNegativeSCEV(Step); 1314261991Sdim Value *StepV; 1315261991Sdim { 1316261991Sdim // Expand the step somewhere that dominates the loop header. 1317261991Sdim BuilderType::InsertPointGuard Guard(Builder); 1318261991Sdim StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1319261991Sdim } 1320234353Sdim Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1321234353Sdim } 1322202878Srdivacky } 1323202878Srdivacky 1324276479Sdim // We have decided to reuse an induction variable of a dominating loop. Apply 1325276479Sdim // truncation and/or invertion of the step. 1326276479Sdim if (TruncTy) { 1327276479Sdim Type *ResTy = Result->getType(); 1328276479Sdim // Normalize the result type. 1329276479Sdim if (ResTy != SE.getEffectiveSCEVType(ResTy)) 1330276479Sdim Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy)); 1331276479Sdim // Truncate the result. 1332276479Sdim if (TruncTy != Result->getType()) { 1333276479Sdim Result = Builder.CreateTrunc(Result, TruncTy); 1334276479Sdim rememberInstruction(Result); 1335276479Sdim } 1336276479Sdim // Invert the result. 1337276479Sdim if (InvertStep) { 1338276479Sdim Result = Builder.CreateSub(expandCodeFor(Normalized->getStart(), TruncTy), 1339276479Sdim Result); 1340276479Sdim rememberInstruction(Result); 1341276479Sdim } 1342276479Sdim } 1343276479Sdim 1344202878Srdivacky // Re-apply any non-loop-dominating scale. 1345202878Srdivacky if (PostLoopScale) { 1346261991Sdim assert(S->isAffine() && "Can't linearly scale non-affine recurrences."); 1347203954Srdivacky Result = InsertNoopCastOfTo(Result, IntTy); 1348202878Srdivacky Result = Builder.CreateMul(Result, 1349202878Srdivacky expandCodeFor(PostLoopScale, IntTy)); 1350202878Srdivacky rememberInstruction(Result); 1351202878Srdivacky } 1352202878Srdivacky 1353202878Srdivacky // Re-apply any non-loop-dominating offset. 1354202878Srdivacky if (PostLoopOffset) { 1355226633Sdim if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) { 1356202878Srdivacky const SCEV *const OffsetArray[1] = { PostLoopOffset }; 1357202878Srdivacky Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result); 1358202878Srdivacky } else { 1359203954Srdivacky Result = InsertNoopCastOfTo(Result, IntTy); 1360202878Srdivacky Result = Builder.CreateAdd(Result, 1361202878Srdivacky expandCodeFor(PostLoopOffset, IntTy)); 1362202878Srdivacky rememberInstruction(Result); 1363202878Srdivacky } 1364202878Srdivacky } 1365202878Srdivacky 1366202878Srdivacky return Result; 1367202878Srdivacky} 1368202878Srdivacky 1369193323SedValue *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { 1370202878Srdivacky if (!CanonicalMode) return expandAddRecExprLiterally(S); 1371202878Srdivacky 1372226633Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1373193323Sed const Loop *L = S->getLoop(); 1374193323Sed 1375194178Sed // First check for an existing canonical IV in a suitable type. 1376276479Sdim PHINode *CanonicalIV = nullptr; 1377194178Sed if (PHINode *PN = L->getCanonicalInductionVariable()) 1378212904Sdim if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) 1379194178Sed CanonicalIV = PN; 1380194178Sed 1381194178Sed // Rewrite an AddRec in terms of the canonical induction variable, if 1382194178Sed // its type is more narrow. 1383194178Sed if (CanonicalIV && 1384194178Sed SE.getTypeSizeInBits(CanonicalIV->getType()) > 1385194178Sed SE.getTypeSizeInBits(Ty)) { 1386205407Srdivacky SmallVector<const SCEV *, 4> NewOps(S->getNumOperands()); 1387205407Srdivacky for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) 1388205407Srdivacky NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType()); 1389221345Sdim Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(), 1390261991Sdim S->getNoWrapFlags(SCEV::FlagNW))); 1391194178Sed BasicBlock::iterator NewInsertPt = 1392276479Sdim std::next(BasicBlock::iterator(cast<Instruction>(V))); 1393261991Sdim BuilderType::InsertPointGuard Guard(Builder); 1394226633Sdim while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) || 1395226633Sdim isa<LandingPadInst>(NewInsertPt)) 1396210299Sed ++NewInsertPt; 1397276479Sdim V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr, 1398194178Sed NewInsertPt); 1399194178Sed return V; 1400194178Sed } 1401194178Sed 1402193323Sed // {X,+,F} --> X + {0,+,F} 1403193323Sed if (!S->getStart()->isZero()) { 1404205407Srdivacky SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end()); 1405207618Srdivacky NewOps[0] = SE.getConstant(Ty, 0); 1406261991Sdim const SCEV *Rest = SE.getAddRecExpr(NewOps, L, 1407261991Sdim S->getNoWrapFlags(SCEV::FlagNW)); 1408193323Sed 1409193323Sed // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 1410193323Sed // comments on expandAddToGEP for details. 1411198090Srdivacky const SCEV *Base = S->getStart(); 1412198090Srdivacky const SCEV *RestArray[1] = { Rest }; 1413198090Srdivacky // Dig into the expression to find the pointer base for a GEP. 1414198090Srdivacky ExposePointerBase(Base, RestArray[0], SE); 1415198090Srdivacky // If we found a pointer, expand the AddRec with a GEP. 1416226633Sdim if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) { 1417198090Srdivacky // Make sure the Base isn't something exotic, such as a multiplied 1418198090Srdivacky // or divided pointer value. In those cases, the result type isn't 1419198090Srdivacky // actually a pointer type. 1420198090Srdivacky if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) { 1421198090Srdivacky Value *StartV = expand(Base); 1422198090Srdivacky assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!"); 1423198090Srdivacky return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV); 1424193323Sed } 1425193323Sed } 1426193323Sed 1427195098Sed // Just do a normal add. Pre-expand the operands to suppress folding. 1428195098Sed return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())), 1429195098Sed SE.getUnknown(expand(Rest)))); 1430193323Sed } 1431193323Sed 1432212904Sdim // If we don't yet have a canonical IV, create one. 1433212904Sdim if (!CanonicalIV) { 1434193323Sed // Create and insert the PHI node for the induction variable in the 1435193323Sed // specified loop. 1436193323Sed BasicBlock *Header = L->getHeader(); 1437221345Sdim pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1438221345Sdim CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar", 1439221345Sdim Header->begin()); 1440212904Sdim rememberInstruction(CanonicalIV); 1441193323Sed 1442261991Sdim SmallSet<BasicBlock *, 4> PredSeen; 1443193323Sed Constant *One = ConstantInt::get(Ty, 1); 1444221345Sdim for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1445210299Sed BasicBlock *HP = *HPI; 1446276479Sdim if (!PredSeen.insert(HP)) { 1447276479Sdim // There must be an incoming value for each predecessor, even the 1448276479Sdim // duplicates! 1449276479Sdim CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP); 1450261991Sdim continue; 1451276479Sdim } 1452261991Sdim 1453210299Sed if (L->contains(HP)) { 1454202878Srdivacky // Insert a unit add instruction right before the terminator 1455202878Srdivacky // corresponding to the back-edge. 1456212904Sdim Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One, 1457212904Sdim "indvar.next", 1458212904Sdim HP->getTerminator()); 1459224145Sdim Add->setDebugLoc(HP->getTerminator()->getDebugLoc()); 1460202878Srdivacky rememberInstruction(Add); 1461212904Sdim CanonicalIV->addIncoming(Add, HP); 1462198090Srdivacky } else { 1463212904Sdim CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP); 1464198090Srdivacky } 1465210299Sed } 1466193323Sed } 1467193323Sed 1468212904Sdim // {0,+,1} --> Insert a canonical induction variable into the loop! 1469212904Sdim if (S->isAffine() && S->getOperand(1)->isOne()) { 1470212904Sdim assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) && 1471212904Sdim "IVs with types different from the canonical IV should " 1472212904Sdim "already have been handled!"); 1473212904Sdim return CanonicalIV; 1474212904Sdim } 1475212904Sdim 1476194178Sed // {0,+,F} --> {0,+,1} * F 1477193323Sed 1478193323Sed // If this is a simple linear addrec, emit it now as a special case. 1479195098Sed if (S->isAffine()) // {0,+,F} --> i*F 1480195098Sed return 1481195098Sed expand(SE.getTruncateOrNoop( 1482212904Sdim SE.getMulExpr(SE.getUnknown(CanonicalIV), 1483195098Sed SE.getNoopOrAnyExtend(S->getOperand(1), 1484212904Sdim CanonicalIV->getType())), 1485195098Sed Ty)); 1486194178Sed 1487193323Sed // If this is a chain of recurrences, turn it into a closed form, using the 1488193323Sed // folders, then expandCodeFor the closed form. This allows the folders to 1489193323Sed // simplify the expression without having to build a bunch of special code 1490193323Sed // into this folder. 1491212904Sdim const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV. 1492193323Sed 1493194178Sed // Promote S up to the canonical IV type, if the cast is foldable. 1494198090Srdivacky const SCEV *NewS = S; 1495212904Sdim const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType()); 1496194178Sed if (isa<SCEVAddRecExpr>(Ext)) 1497194178Sed NewS = Ext; 1498194178Sed 1499198090Srdivacky const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE); 1500193323Sed //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; 1501193323Sed 1502194178Sed // Truncate the result down to the original type, if needed. 1503198090Srdivacky const SCEV *T = SE.getTruncateOrNoop(V, Ty); 1504194710Sed return expand(T); 1505193323Sed} 1506193323Sed 1507193323SedValue *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) { 1508226633Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1509194178Sed Value *V = expandCodeFor(S->getOperand(), 1510194178Sed SE.getEffectiveSCEVType(S->getOperand()->getType())); 1511226633Sdim Value *I = Builder.CreateTrunc(V, Ty); 1512202878Srdivacky rememberInstruction(I); 1513193323Sed return I; 1514193323Sed} 1515193323Sed 1516193323SedValue *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) { 1517226633Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1518194178Sed Value *V = expandCodeFor(S->getOperand(), 1519194178Sed SE.getEffectiveSCEVType(S->getOperand()->getType())); 1520226633Sdim Value *I = Builder.CreateZExt(V, Ty); 1521202878Srdivacky rememberInstruction(I); 1522193323Sed return I; 1523193323Sed} 1524193323Sed 1525193323SedValue *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) { 1526226633Sdim Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1527194178Sed Value *V = expandCodeFor(S->getOperand(), 1528194178Sed SE.getEffectiveSCEVType(S->getOperand()->getType())); 1529226633Sdim Value *I = Builder.CreateSExt(V, Ty); 1530202878Srdivacky rememberInstruction(I); 1531193323Sed return I; 1532193323Sed} 1533193323Sed 1534193323SedValue *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) { 1535198090Srdivacky Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1536226633Sdim Type *Ty = LHS->getType(); 1537198090Srdivacky for (int i = S->getNumOperands()-2; i >= 0; --i) { 1538198090Srdivacky // In the case of mixed integer and pointer types, do the 1539198090Srdivacky // rest of the comparisons as integer. 1540198090Srdivacky if (S->getOperand(i)->getType() != Ty) { 1541198090Srdivacky Ty = SE.getEffectiveSCEVType(Ty); 1542198090Srdivacky LHS = InsertNoopCastOfTo(LHS, Ty); 1543198090Srdivacky } 1544194178Sed Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1545226633Sdim Value *ICmp = Builder.CreateICmpSGT(LHS, RHS); 1546202878Srdivacky rememberInstruction(ICmp); 1547195340Sed Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax"); 1548202878Srdivacky rememberInstruction(Sel); 1549193323Sed LHS = Sel; 1550193323Sed } 1551198090Srdivacky // In the case of mixed integer and pointer types, cast the 1552198090Srdivacky // final result back to the pointer type. 1553198090Srdivacky if (LHS->getType() != S->getType()) 1554198090Srdivacky LHS = InsertNoopCastOfTo(LHS, S->getType()); 1555193323Sed return LHS; 1556193323Sed} 1557193323Sed 1558193323SedValue *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { 1559198090Srdivacky Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1560226633Sdim Type *Ty = LHS->getType(); 1561198090Srdivacky for (int i = S->getNumOperands()-2; i >= 0; --i) { 1562198090Srdivacky // In the case of mixed integer and pointer types, do the 1563198090Srdivacky // rest of the comparisons as integer. 1564198090Srdivacky if (S->getOperand(i)->getType() != Ty) { 1565198090Srdivacky Ty = SE.getEffectiveSCEVType(Ty); 1566198090Srdivacky LHS = InsertNoopCastOfTo(LHS, Ty); 1567198090Srdivacky } 1568194178Sed Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1569226633Sdim Value *ICmp = Builder.CreateICmpUGT(LHS, RHS); 1570202878Srdivacky rememberInstruction(ICmp); 1571195340Sed Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax"); 1572202878Srdivacky rememberInstruction(Sel); 1573193323Sed LHS = Sel; 1574193323Sed } 1575198090Srdivacky // In the case of mixed integer and pointer types, cast the 1576198090Srdivacky // final result back to the pointer type. 1577198090Srdivacky if (LHS->getType() != S->getType()) 1578198090Srdivacky LHS = InsertNoopCastOfTo(LHS, S->getType()); 1579193323Sed return LHS; 1580193323Sed} 1581193323Sed 1582226633SdimValue *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty, 1583234353Sdim Instruction *IP) { 1584205407Srdivacky Builder.SetInsertPoint(IP->getParent(), IP); 1585205407Srdivacky return expandCodeFor(SH, Ty); 1586205407Srdivacky} 1587205407Srdivacky 1588226633SdimValue *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) { 1589193323Sed // Expand the code for this SCEV. 1590193323Sed Value *V = expand(SH); 1591193323Sed if (Ty) { 1592193323Sed assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) && 1593193323Sed "non-trivial casts should be done with the SCEVs directly!"); 1594193323Sed V = InsertNoopCastOfTo(V, Ty); 1595193323Sed } 1596193323Sed return V; 1597193323Sed} 1598193323Sed 1599193323SedValue *SCEVExpander::expand(const SCEV *S) { 1600195098Sed // Compute an insertion point for this SCEV object. Hoist the instructions 1601195098Sed // as far out in the loop nest as possible. 1602195340Sed Instruction *InsertPt = Builder.GetInsertPoint(); 1603195340Sed for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ; 1604195098Sed L = L->getParentLoop()) 1605218893Sdim if (SE.isLoopInvariant(S, L)) { 1606195098Sed if (!L) break; 1607206083Srdivacky if (BasicBlock *Preheader = L->getLoopPreheader()) 1608195098Sed InsertPt = Preheader->getTerminator(); 1609234353Sdim else { 1610234353Sdim // LSR sets the insertion point for AddRec start/step values to the 1611234353Sdim // block start to simplify value reuse, even though it's an invalid 1612234353Sdim // position. SCEVExpander must correct for this in all cases. 1613234353Sdim InsertPt = L->getHeader()->getFirstInsertionPt(); 1614234353Sdim } 1615195098Sed } else { 1616195098Sed // If the SCEV is computable at this level, insert it into the header 1617195098Sed // after the PHIs (and after any other instructions that we've inserted 1618195098Sed // there) so that it is guaranteed to dominate any user inside the loop. 1619218893Sdim if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L)) 1620226633Sdim InsertPt = L->getHeader()->getFirstInsertionPt(); 1621234353Sdim while (InsertPt != Builder.GetInsertPoint() 1622234353Sdim && (isInsertedInstruction(InsertPt) 1623234353Sdim || isa<DbgInfoIntrinsic>(InsertPt))) { 1624276479Sdim InsertPt = std::next(BasicBlock::iterator(InsertPt)); 1625234353Sdim } 1626195098Sed break; 1627195098Sed } 1628195098Sed 1629195098Sed // Check to see if we already expanded this here. 1630249423Sdim std::map<std::pair<const SCEV *, Instruction *>, TrackingVH<Value> >::iterator 1631249423Sdim I = InsertedExpressions.find(std::make_pair(S, InsertPt)); 1632195340Sed if (I != InsertedExpressions.end()) 1633193323Sed return I->second; 1634195098Sed 1635261991Sdim BuilderType::InsertPointGuard Guard(Builder); 1636195340Sed Builder.SetInsertPoint(InsertPt->getParent(), InsertPt); 1637195340Sed 1638195098Sed // Expand the expression into instructions. 1639193323Sed Value *V = visit(S); 1640195098Sed 1641195098Sed // Remember the expanded value for this SCEV at this location. 1642226633Sdim // 1643226633Sdim // This is independent of PostIncLoops. The mapped value simply materializes 1644226633Sdim // the expression at this insertion point. If the mapped value happened to be 1645276479Sdim // a postinc expansion, it could be reused by a non-postinc user, but only if 1646226633Sdim // its insertion point was already at the head of the loop. 1647226633Sdim InsertedExpressions[std::make_pair(S, InsertPt)] = V; 1648193323Sed return V; 1649193323Sed} 1650193574Sed 1651203954Srdivackyvoid SCEVExpander::rememberInstruction(Value *I) { 1652210299Sed if (!PostIncLoops.empty()) 1653210299Sed InsertedPostIncValues.insert(I); 1654210299Sed else 1655203954Srdivacky InsertedValues.insert(I); 1656203954Srdivacky} 1657203954Srdivacky 1658193574Sed/// getOrInsertCanonicalInductionVariable - This method returns the 1659193574Sed/// canonical induction variable of the specified type for the specified 1660193574Sed/// loop (inserting one if there is none). A canonical induction variable 1661193574Sed/// starts at zero and steps by one on each iteration. 1662212904SdimPHINode * 1663193574SedSCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, 1664226633Sdim Type *Ty) { 1665203954Srdivacky assert(Ty->isIntegerTy() && "Can only insert integer induction variables!"); 1666212904Sdim 1667212904Sdim // Build a SCEV for {0,+,1}<L>. 1668221345Sdim // Conservatively use FlagAnyWrap for now. 1669207618Srdivacky const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0), 1670221345Sdim SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap); 1671212904Sdim 1672212904Sdim // Emit code for it. 1673261991Sdim BuilderType::InsertPointGuard Guard(Builder); 1674276479Sdim PHINode *V = cast<PHINode>(expandCodeFor(H, nullptr, 1675276479Sdim L->getHeader()->begin())); 1676212904Sdim 1677195098Sed return V; 1678193574Sed} 1679226633Sdim 1680226633Sdim/// replaceCongruentIVs - Check for congruent phis in this loop header and 1681226633Sdim/// replace them with their most canonical representative. Return the number of 1682226633Sdim/// phis eliminated. 1683226633Sdim/// 1684226633Sdim/// This does not depend on any SCEVExpander state but should be used in 1685226633Sdim/// the same context that SCEVExpander is used. 1686226633Sdimunsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, 1687234353Sdim SmallVectorImpl<WeakVH> &DeadInsts, 1688249423Sdim const TargetTransformInfo *TTI) { 1689234353Sdim // Find integer phis in order of increasing width. 1690234353Sdim SmallVector<PHINode*, 8> Phis; 1691234353Sdim for (BasicBlock::iterator I = L->getHeader()->begin(); 1692234353Sdim PHINode *Phi = dyn_cast<PHINode>(I); ++I) { 1693234353Sdim Phis.push_back(Phi); 1694234353Sdim } 1695249423Sdim if (TTI) 1696276479Sdim std::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) { 1697276479Sdim // Put pointers at the back and make sure pointer < pointer = false. 1698276479Sdim if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 1699276479Sdim return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy(); 1700276479Sdim return RHS->getType()->getPrimitiveSizeInBits() < 1701276479Sdim LHS->getType()->getPrimitiveSizeInBits(); 1702276479Sdim }); 1703234353Sdim 1704226633Sdim unsigned NumElim = 0; 1705226633Sdim DenseMap<const SCEV *, PHINode *> ExprToIVMap; 1706234353Sdim // Process phis from wide to narrow. Mapping wide phis to the their truncation 1707234353Sdim // so narrow phis can reuse them. 1708234353Sdim for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(), 1709234353Sdim PEnd = Phis.end(); PIter != PEnd; ++PIter) { 1710234353Sdim PHINode *Phi = *PIter; 1711234353Sdim 1712243830Sdim // Fold constant phis. They may be congruent to other constant phis and 1713243830Sdim // would confuse the logic below that expects proper IVs. 1714276479Sdim if (Value *V = SimplifyInstruction(Phi, SE.DL, SE.TLI, SE.DT)) { 1715243830Sdim Phi->replaceAllUsesWith(V); 1716243830Sdim DeadInsts.push_back(Phi); 1717243830Sdim ++NumElim; 1718243830Sdim DEBUG_WITH_TYPE(DebugType, dbgs() 1719243830Sdim << "INDVARS: Eliminated constant iv: " << *Phi << '\n'); 1720243830Sdim continue; 1721243830Sdim } 1722243830Sdim 1723226633Sdim if (!SE.isSCEVable(Phi->getType())) 1724226633Sdim continue; 1725226633Sdim 1726226633Sdim PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)]; 1727226633Sdim if (!OrigPhiRef) { 1728226633Sdim OrigPhiRef = Phi; 1729249423Sdim if (Phi->getType()->isIntegerTy() && TTI 1730249423Sdim && TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) { 1731234353Sdim // This phi can be freely truncated to the narrowest phi type. Map the 1732234353Sdim // truncated expression to it so it will be reused for narrow types. 1733234353Sdim const SCEV *TruncExpr = 1734234353Sdim SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType()); 1735234353Sdim ExprToIVMap[TruncExpr] = Phi; 1736234353Sdim } 1737226633Sdim continue; 1738226633Sdim } 1739226633Sdim 1740234353Sdim // Replacing a pointer phi with an integer phi or vice-versa doesn't make 1741234353Sdim // sense. 1742234353Sdim if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy()) 1743226633Sdim continue; 1744226633Sdim 1745226633Sdim if (BasicBlock *LatchBlock = L->getLoopLatch()) { 1746226633Sdim Instruction *OrigInc = 1747226633Sdim cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock)); 1748226633Sdim Instruction *IsomorphicInc = 1749226633Sdim cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock)); 1750226633Sdim 1751234353Sdim // If this phi has the same width but is more canonical, replace the 1752234353Sdim // original with it. As part of the "more canonical" determination, 1753234353Sdim // respect a prior decision to use an IV chain. 1754234353Sdim if (OrigPhiRef->getType() == Phi->getType() 1755234353Sdim && !(ChainedPhis.count(Phi) 1756234353Sdim || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) 1757234353Sdim && (ChainedPhis.count(Phi) 1758234353Sdim || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) { 1759226633Sdim std::swap(OrigPhiRef, Phi); 1760226633Sdim std::swap(OrigInc, IsomorphicInc); 1761226633Sdim } 1762226633Sdim // Replacing the congruent phi is sufficient because acyclic redundancy 1763226633Sdim // elimination, CSE/GVN, should handle the rest. However, once SCEV proves 1764226633Sdim // that a phi is congruent, it's often the head of an IV user cycle that 1765234353Sdim // is isomorphic with the original phi. It's worth eagerly cleaning up the 1766234353Sdim // common case of a single IV increment so that DeleteDeadPHIs can remove 1767234353Sdim // cycles that had postinc uses. 1768234353Sdim const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc), 1769234353Sdim IsomorphicInc->getType()); 1770234353Sdim if (OrigInc != IsomorphicInc 1771234353Sdim && TruncExpr == SE.getSCEV(IsomorphicInc) 1772234353Sdim && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc)) 1773234353Sdim || hoistIVInc(OrigInc, IsomorphicInc))) { 1774226633Sdim DEBUG_WITH_TYPE(DebugType, dbgs() 1775226633Sdim << "INDVARS: Eliminated congruent iv.inc: " 1776226633Sdim << *IsomorphicInc << '\n'); 1777234353Sdim Value *NewInc = OrigInc; 1778234353Sdim if (OrigInc->getType() != IsomorphicInc->getType()) { 1779234353Sdim Instruction *IP = isa<PHINode>(OrigInc) 1780234353Sdim ? (Instruction*)L->getHeader()->getFirstInsertionPt() 1781234353Sdim : OrigInc->getNextNode(); 1782234353Sdim IRBuilder<> Builder(IP); 1783234353Sdim Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc()); 1784234353Sdim NewInc = Builder. 1785234353Sdim CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName); 1786234353Sdim } 1787234353Sdim IsomorphicInc->replaceAllUsesWith(NewInc); 1788226633Sdim DeadInsts.push_back(IsomorphicInc); 1789226633Sdim } 1790226633Sdim } 1791226633Sdim DEBUG_WITH_TYPE(DebugType, dbgs() 1792226633Sdim << "INDVARS: Eliminated congruent iv: " << *Phi << '\n'); 1793226633Sdim ++NumElim; 1794234353Sdim Value *NewIV = OrigPhiRef; 1795234353Sdim if (OrigPhiRef->getType() != Phi->getType()) { 1796234353Sdim IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt()); 1797234353Sdim Builder.SetCurrentDebugLocation(Phi->getDebugLoc()); 1798234353Sdim NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName); 1799234353Sdim } 1800234353Sdim Phi->replaceAllUsesWith(NewIV); 1801226633Sdim DeadInsts.push_back(Phi); 1802226633Sdim } 1803226633Sdim return NumElim; 1804226633Sdim} 1805239462Sdim 1806239462Sdimnamespace { 1807239462Sdim// Search for a SCEV subexpression that is not safe to expand. Any expression 1808239462Sdim// that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely 1809239462Sdim// UDiv expressions. We don't know if the UDiv is derived from an IR divide 1810239462Sdim// instruction, but the important thing is that we prove the denominator is 1811239462Sdim// nonzero before expansion. 1812239462Sdim// 1813239462Sdim// IVUsers already checks that IV-derived expressions are safe. So this check is 1814239462Sdim// only needed when the expression includes some subexpression that is not IV 1815239462Sdim// derived. 1816239462Sdim// 1817239462Sdim// Currently, we only allow division by a nonzero constant here. If this is 1818239462Sdim// inadequate, we could easily allow division by SCEVUnknown by using 1819239462Sdim// ValueTracking to check isKnownNonZero(). 1820261991Sdim// 1821261991Sdim// We cannot generally expand recurrences unless the step dominates the loop 1822261991Sdim// header. The expander handles the special case of affine recurrences by 1823261991Sdim// scaling the recurrence outside the loop, but this technique isn't generally 1824261991Sdim// applicable. Expanding a nested recurrence outside a loop requires computing 1825261991Sdim// binomial coefficients. This could be done, but the recurrence has to be in a 1826261991Sdim// perfectly reduced form, which can't be guaranteed. 1827239462Sdimstruct SCEVFindUnsafe { 1828261991Sdim ScalarEvolution &SE; 1829239462Sdim bool IsUnsafe; 1830239462Sdim 1831261991Sdim SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {} 1832239462Sdim 1833239462Sdim bool follow(const SCEV *S) { 1834261991Sdim if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 1835261991Sdim const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS()); 1836261991Sdim if (!SC || SC->getValue()->isZero()) { 1837261991Sdim IsUnsafe = true; 1838261991Sdim return false; 1839261991Sdim } 1840261991Sdim } 1841261991Sdim if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 1842261991Sdim const SCEV *Step = AR->getStepRecurrence(SE); 1843261991Sdim if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) { 1844261991Sdim IsUnsafe = true; 1845261991Sdim return false; 1846261991Sdim } 1847261991Sdim } 1848261991Sdim return true; 1849239462Sdim } 1850239462Sdim bool isDone() const { return IsUnsafe; } 1851239462Sdim}; 1852239462Sdim} 1853239462Sdim 1854239462Sdimnamespace llvm { 1855261991Sdimbool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) { 1856261991Sdim SCEVFindUnsafe Search(SE); 1857239462Sdim visitAll(S, Search); 1858239462Sdim return !Search.IsUnsafe; 1859239462Sdim} 1860239462Sdim} 1861