LoopStrengthReduce.cpp revision 206083
151694Sroger//===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===// 251694Sroger// 351694Sroger// The LLVM Compiler Infrastructure 451694Sroger// 551694Sroger// This file is distributed under the University of Illinois Open Source 651694Sroger// License. See LICENSE.TXT for details. 751694Sroger// 851694Sroger//===----------------------------------------------------------------------===// 951694Sroger// 1051694Sroger// This transformation analyzes and transforms the induction variables (and 1151694Sroger// computations derived from them) into forms suitable for efficient execution 1251694Sroger// on the target. 1351694Sroger// 1451694Sroger// This pass performs a strength reduction on array references inside loops that 1551694Sroger// have as one or more of their components the loop induction variable, it 1651694Sroger// rewrites expressions to take advantage of scaled-index addressing modes 1751694Sroger// available on the target, and it performs a variety of other optimizations 1851694Sroger// related to loop induction variables. 1951694Sroger// 2051694Sroger// Terminology note: this code has a lot of handling for "post-increment" or 2151694Sroger// "post-inc" users. This is not talking about post-increment addressing modes; 2251694Sroger// it is instead talking about code like this: 2351694Sroger// 2451694Sroger// %i = phi [ 0, %entry ], [ %i.next, %latch ] 2551694Sroger// ... 2651694Sroger// %i.next = add %i, 1 2751694Sroger// %c = icmp eq %i.next, %n 2851694Sroger// 2951694Sroger// The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however 3051694Sroger// it's useful to think about these as the same register, with some uses using 3151694Sroger// the value of the register before the add and some using // it after. In this 3251694Sroger// example, the icmp is a post-increment user, since it uses %i.next, which is 3351694Sroger// the value of the induction variable after the increment. The other common 3451694Sroger// case of post-increment users is users outside the loop. 3551694Sroger// 3651694Sroger// TODO: More sophistication in the way Formulae are generated and filtered. 3751694Sroger// 3851694Sroger// TODO: Handle multiple loops at a time. 3951694Sroger// 4051694Sroger// TODO: Should TargetLowering::AddrMode::BaseGV be changed to a ConstantExpr 4151694Sroger// instead of a GlobalValue? 4251694Sroger// 4351694Sroger// TODO: When truncation is free, truncate ICmp users' operands to make it a 4451694Sroger// smaller encoding (on x86 at least). 4551694Sroger// 4651694Sroger// TODO: When a negated register is used by an add (such as in a list of 4751694Sroger// multiple base registers, or as the increment expression in an addrec), 4851694Sroger// we may not actually need both reg and (-1 * reg) in registers; the 4951694Sroger// negation can be implemented by using a sub instead of an add. The 5051694Sroger// lack of support for taking this into consideration when making 5159014Sroger// register pressure decisions is partly worked around by the "Special" 5251694Sroger// use kind. 5351694Sroger// 5451694Sroger//===----------------------------------------------------------------------===// 5551694Sroger 5662214Sroger#define DEBUG_TYPE "loop-reduce" 5762214Sroger#include "llvm/Transforms/Scalar.h" 5862214Sroger#include "llvm/Constants.h" 5962214Sroger#include "llvm/Instructions.h" 6062214Sroger#include "llvm/IntrinsicInst.h" 6162214Sroger#include "llvm/DerivedTypes.h" 6251694Sroger#include "llvm/Analysis/IVUsers.h" 6351694Sroger#include "llvm/Analysis/Dominators.h" 6451694Sroger#include "llvm/Analysis/LoopPass.h" 6551694Sroger#include "llvm/Analysis/ScalarEvolutionExpander.h" 6651694Sroger#include "llvm/Transforms/Utils/BasicBlockUtils.h" 6751694Sroger#include "llvm/Transforms/Utils/Local.h" 6851694Sroger#include "llvm/ADT/SmallBitVector.h" 6951694Sroger#include "llvm/ADT/SetVector.h" 7070834Swollman#include "llvm/ADT/DenseSet.h" 7170834Swollman#include "llvm/Support/Debug.h" 7270834Swollman#include "llvm/Support/ValueHandle.h" 7351694Sroger#include "llvm/Support/raw_ostream.h" 7470834Swollman#include "llvm/Target/TargetLowering.h" 7551694Sroger#include <algorithm> 7651694Srogerusing namespace llvm; 7751694Sroger 7851694Srogernamespace { 7951694Sroger 8051694Sroger/// RegSortData - This class holds data which is used to order reuse candidates. 8151694Srogerclass RegSortData { 8293023Snsouchpublic: 8351694Sroger /// UsedByIndices - This represents the set of LSRUse indices which reference 8451694Sroger /// a particular register. 8551694Sroger SmallBitVector UsedByIndices; 8659014Sroger 8759014Sroger RegSortData() {} 8859014Sroger 8959014Sroger void print(raw_ostream &OS) const; 9059014Sroger void dump() const; 9159014Sroger}; 9251694Sroger 9351694Sroger} 9451694Sroger 9551694Srogervoid RegSortData::print(raw_ostream &OS) const { 9651694Sroger OS << "[NumUses=" << UsedByIndices.count() << ']'; 9767306Sroger} 9867306Sroger 9967306Srogervoid RegSortData::dump() const { 10059014Sroger print(errs()); errs() << '\n'; 10151694Sroger} 10251694Sroger 10351694Srogernamespace { 10451694Sroger 10551694Sroger/// RegUseTracker - Map register candidates to information about how they are 10651694Sroger/// used. 10751694Srogerclass RegUseTracker { 10851694Sroger typedef DenseMap<const SCEV *, RegSortData> RegUsesTy; 10959014Sroger 11051694Sroger RegUsesTy RegUses; 11151694Sroger SmallVector<const SCEV *, 16> RegSequence; 11251694Sroger 11351694Srogerpublic: 11451694Sroger void CountRegister(const SCEV *Reg, size_t LUIdx); 11551694Sroger 11659014Sroger bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const; 11751694Sroger 11862214Sroger const SmallBitVector &getUsedByIndices(const SCEV *Reg) const; 11951694Sroger 12062214Sroger void clear(); 12162214Sroger 12251694Sroger typedef SmallVectorImpl<const SCEV *>::iterator iterator; 12351694Sroger typedef SmallVectorImpl<const SCEV *>::const_iterator const_iterator; 12451694Sroger iterator begin() { return RegSequence.begin(); } 12551694Sroger iterator end() { return RegSequence.end(); } 12651694Sroger const_iterator begin() const { return RegSequence.begin(); } 12751694Sroger const_iterator end() const { return RegSequence.end(); } 12851694Sroger}; 12951694Sroger 13051694Sroger} 13151694Sroger 13251694Srogervoid 13362214SrogerRegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) { 13462214Sroger std::pair<RegUsesTy::iterator, bool> Pair = 13562214Sroger RegUses.insert(std::make_pair(Reg, RegSortData())); 13662214Sroger RegSortData &RSD = Pair.first->second; 13762214Sroger if (Pair.second) 13862214Sroger RegSequence.push_back(Reg); 13962214Sroger RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1)); 14062214Sroger RSD.UsedByIndices.set(LUIdx); 14162214Sroger} 14262214Sroger 14362214Srogerbool 14462214SrogerRegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const { 14562214Sroger if (!RegUses.count(Reg)) return false; 14662214Sroger const SmallBitVector &UsedByIndices = 14762214Sroger RegUses.find(Reg)->second.UsedByIndices; 14862214Sroger int i = UsedByIndices.find_first(); 14962214Sroger if (i == -1) return false; 15062214Sroger if ((size_t)i != LUIdx) return true; 15162214Sroger return UsedByIndices.find_next(i) != -1; 15262214Sroger} 15359014Sroger 15459014Srogerconst SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const { 15559014Sroger RegUsesTy::const_iterator I = RegUses.find(Reg); 15659014Sroger assert(I != RegUses.end() && "Unknown register!"); 15759014Sroger return I->second.UsedByIndices; 15859014Sroger} 15959014Sroger 16059014Srogervoid RegUseTracker::clear() { 16159014Sroger RegUses.clear(); 16259014Sroger RegSequence.clear(); 16359014Sroger} 16459014Sroger 16551694Srogernamespace { 16651694Sroger 16751694Sroger/// Formula - This class holds information that describes a formula for 16862214Sroger/// computing satisfying a use. It may include broken-out immediates and scaled 16962214Sroger/// registers. 17062214Srogerstruct Formula { 17162214Sroger /// AM - This is used to represent complex addressing, as well as other kinds 17262214Sroger /// of interesting uses. 17362214Sroger TargetLowering::AddrMode AM; 17462214Sroger 17562214Sroger /// BaseRegs - The list of "base" registers for this use. When this is 17662214Sroger /// non-empty, AM.HasBaseReg should be set to true. 17762214Sroger SmallVector<const SCEV *, 2> BaseRegs; 17862214Sroger 17962214Sroger /// ScaledReg - The 'scaled' register for this use. This should be non-null 18062214Sroger /// when AM.Scale is not zero. 18162214Sroger const SCEV *ScaledReg; 18262214Sroger 18362214Sroger Formula() : ScaledReg(0) {} 18462214Sroger 18593023Snsouch void InitialMatch(const SCEV *S, Loop *L, 18665692Sroger ScalarEvolution &SE, DominatorTree &DT); 18765392Speter 18893023Snsouch unsigned getNumRegs() const; 18993023Snsouch const Type *getType() const; 19093023Snsouch 19162214Sroger bool referencesReg(const SCEV *S) const; 19265392Speter bool hasRegsUsedByUsesOtherThan(size_t LUIdx, 19351694Sroger const RegUseTracker &RegUses) const; 19462214Sroger 19551694Sroger void print(raw_ostream &OS) const; 19651694Sroger void dump() const; 19751694Sroger}; 19851694Sroger 19951694Sroger} 20051694Sroger 20151694Sroger/// DoInitialMatch - Recursion helper for InitialMatch. 20251694Srogerstatic void DoInitialMatch(const SCEV *S, Loop *L, 20351694Sroger SmallVectorImpl<const SCEV *> &Good, 20451694Sroger SmallVectorImpl<const SCEV *> &Bad, 20551694Sroger ScalarEvolution &SE, DominatorTree &DT) { 20651694Sroger // Collect expressions which properly dominate the loop header. 20751694Sroger if (S->properlyDominates(L->getHeader(), &DT)) { 20851694Sroger Good.push_back(S); 20951694Sroger return; 21051694Sroger } 21151694Sroger 21251694Sroger // Look at add operands. 21393023Snsouch if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 21493023Snsouch for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 21593023Snsouch I != E; ++I) 21693023Snsouch DoInitialMatch(*I, L, Good, Bad, SE, DT); 21793023Snsouch return; 21893023Snsouch } 21993023Snsouch 22093023Snsouch // Look at addrec operands. 22193023Snsouch if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 22293023Snsouch if (!AR->getStart()->isZero()) { 22393023Snsouch DoInitialMatch(AR->getStart(), L, Good, Bad, SE, DT); 22493023Snsouch DoInitialMatch(SE.getAddRecExpr(SE.getIntegerSCEV(0, AR->getType()), 22593023Snsouch AR->getStepRecurrence(SE), 22693023Snsouch AR->getLoop()), 22793023Snsouch L, Good, Bad, SE, DT); 22893023Snsouch return; 22951694Sroger } 23051694Sroger 23151694Sroger // Handle a multiplication by -1 (negation) if it didn't fold. 23251694Sroger if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) 23351694Sroger if (Mul->getOperand(0)->isAllOnesValue()) { 23451694Sroger SmallVector<const SCEV *, 4> Ops(Mul->op_begin()+1, Mul->op_end()); 23551694Sroger const SCEV *NewMul = SE.getMulExpr(Ops); 23651694Sroger 23751694Sroger SmallVector<const SCEV *, 4> MyGood; 23851694Sroger SmallVector<const SCEV *, 4> MyBad; 23951694Sroger DoInitialMatch(NewMul, L, MyGood, MyBad, SE, DT); 24051694Sroger const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue( 24151694Sroger SE.getEffectiveSCEVType(NewMul->getType()))); 24251694Sroger for (SmallVectorImpl<const SCEV *>::const_iterator I = MyGood.begin(), 24351694Sroger E = MyGood.end(); I != E; ++I) 24451694Sroger Good.push_back(SE.getMulExpr(NegOne, *I)); 24551694Sroger for (SmallVectorImpl<const SCEV *>::const_iterator I = MyBad.begin(), 24651694Sroger E = MyBad.end(); I != E; ++I) 24751694Sroger Bad.push_back(SE.getMulExpr(NegOne, *I)); 24851694Sroger return; 24951694Sroger } 25051694Sroger 25151694Sroger // Ok, we can't do anything interesting. Just stuff the whole thing into a 25251694Sroger // register and hope for the best. 25351694Sroger Bad.push_back(S); 25451694Sroger} 25551694Sroger 25651694Sroger/// InitialMatch - Incorporate loop-variant parts of S into this Formula, 25751694Sroger/// attempting to keep all loop-invariant and loop-computable values in a 25851694Sroger/// single base register. 25951694Srogervoid Formula::InitialMatch(const SCEV *S, Loop *L, 26051694Sroger ScalarEvolution &SE, DominatorTree &DT) { 26151694Sroger SmallVector<const SCEV *, 4> Good; 26251694Sroger SmallVector<const SCEV *, 4> Bad; 26351694Sroger DoInitialMatch(S, L, Good, Bad, SE, DT); 26451694Sroger if (!Good.empty()) { 26552995Speter BaseRegs.push_back(SE.getAddExpr(Good)); 26667306Sroger AM.HasBaseReg = true; 26765728Sroger } 26865728Sroger if (!Bad.empty()) { 26967306Sroger BaseRegs.push_back(SE.getAddExpr(Bad)); 27051694Sroger AM.HasBaseReg = true; 27151694Sroger } 27251694Sroger} 27351694Sroger 27451694Sroger/// getNumRegs - Return the total number of register operands used by this 27551694Sroger/// formula. This does not include register uses implied by non-constant 27651694Sroger/// addrec strides. 27751694Srogerunsigned Formula::getNumRegs() const { 27851694Sroger return !!ScaledReg + BaseRegs.size(); 27951694Sroger} 28051694Sroger 28167306Sroger/// getType - Return the type of this formula, if it has one, or null 28267306Sroger/// otherwise. This type is meaningless except for the bit size. 28367306Srogerconst Type *Formula::getType() const { 28467306Sroger return !BaseRegs.empty() ? BaseRegs.front()->getType() : 28567306Sroger ScaledReg ? ScaledReg->getType() : 28667306Sroger AM.BaseGV ? AM.BaseGV->getType() : 28767306Sroger 0; 28867306Sroger} 28967306Sroger 29067306Sroger/// referencesReg - Test if this formula references the given register. 29167306Srogerbool Formula::referencesReg(const SCEV *S) const { 29267306Sroger return S == ScaledReg || 29367306Sroger std::find(BaseRegs.begin(), BaseRegs.end(), S) != BaseRegs.end(); 29467306Sroger} 29567306Sroger 29667306Sroger/// hasRegsUsedByUsesOtherThan - Test whether this formula uses registers 29767306Sroger/// which are used by uses other than the use with the given index. 29867306Srogerbool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx, 29967306Sroger const RegUseTracker &RegUses) const { 30051694Sroger if (ScaledReg) 30151694Sroger if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx)) 30251694Sroger return true; 30351694Sroger for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 30451694Sroger E = BaseRegs.end(); I != E; ++I) 30551694Sroger if (RegUses.isRegUsedByUsesOtherThan(*I, LUIdx)) 30651694Sroger return true; 30751694Sroger return false; 30851694Sroger} 30951694Sroger 31051694Srogervoid Formula::print(raw_ostream &OS) const { 31151694Sroger bool First = true; 31251694Sroger if (AM.BaseGV) { 31351694Sroger if (!First) OS << " + "; else First = false; 31451694Sroger WriteAsOperand(OS, AM.BaseGV, /*PrintType=*/false); 31551694Sroger } 31651694Sroger if (AM.BaseOffs != 0) { 31751694Sroger if (!First) OS << " + "; else First = false; 31851694Sroger OS << AM.BaseOffs; 31951694Sroger } 32051694Sroger for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 32151694Sroger E = BaseRegs.end(); I != E; ++I) { 32251694Sroger if (!First) OS << " + "; else First = false; 32351694Sroger OS << "reg(" << **I << ')'; 32451694Sroger } 32551694Sroger if (AM.Scale != 0) { 32662112Sroger if (!First) OS << " + "; else First = false; 32762112Sroger OS << AM.Scale << "*reg("; 32862112Sroger if (ScaledReg) 32951694Sroger OS << *ScaledReg; 33051694Sroger else 33151694Sroger OS << "<unknown>"; 33251694Sroger OS << ')'; 33351694Sroger } 33451694Sroger} 33551694Sroger 33651694Srogervoid Formula::dump() const { 33751694Sroger print(errs()); errs() << '\n'; 33851694Sroger} 33965049Sroger 34065049Sroger/// isAddRecSExtable - Return true if the given addrec can be sign-extended 34165049Sroger/// without changing its value. 34251694Srogerstatic bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { 34365049Sroger const Type *WideTy = 34451694Sroger IntegerType::get(SE.getContext(), 34551694Sroger SE.getTypeSizeInBits(AR->getType()) + 1); 34651694Sroger return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); 34751694Sroger} 34851694Sroger 34959014Sroger/// isAddSExtable - Return true if the given add can be sign-extended 35059014Sroger/// without changing its value. 35151694Srogerstatic bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) { 35259014Sroger const Type *WideTy = 35351694Sroger IntegerType::get(SE.getContext(), 35451694Sroger SE.getTypeSizeInBits(A->getType()) + 1); 35551694Sroger return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy)); 35659014Sroger} 35759014Sroger 35851694Sroger/// isMulSExtable - Return true if the given add can be sign-extended 35951694Sroger/// without changing its value. 36051694Srogerstatic bool isMulSExtable(const SCEVMulExpr *A, ScalarEvolution &SE) { 36151694Sroger const Type *WideTy = 36251694Sroger IntegerType::get(SE.getContext(), 36351694Sroger SE.getTypeSizeInBits(A->getType()) + 1); 36451694Sroger return isa<SCEVMulExpr>(SE.getSignExtendExpr(A, WideTy)); 36551694Sroger} 36651694Sroger 36751694Sroger/// getExactSDiv - Return an expression for LHS /s RHS, if it can be determined 36851694Sroger/// and if the remainder is known to be zero, or null otherwise. If 36951694Sroger/// IgnoreSignificantBits is true, expressions like (X * Y) /s Y are simplified 37051694Sroger/// to Y, ignoring that the multiplication may overflow, which is useful when 37165049Sroger/// the result will be used in a context where the most significant bits are 37265049Sroger/// ignored. 37365049Srogerstatic const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, 37451694Sroger ScalarEvolution &SE, 37551694Sroger bool IgnoreSignificantBits = false) { 37651694Sroger // Handle the trivial case, which works for any SCEV type. 37751694Sroger if (LHS == RHS) 37851694Sroger return SE.getIntegerSCEV(1, LHS->getType()); 37951694Sroger 38059250Sroger // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do some 38151694Sroger // folding. 38251694Sroger if (RHS->isAllOnesValue()) 38351694Sroger return SE.getMulExpr(LHS, RHS); 38451694Sroger 38551694Sroger // Check for a division of a constant by a constant. 38651694Sroger if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) { 38751694Sroger const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS); 38851694Sroger if (!RC) 38951694Sroger return 0; 39051694Sroger if (C->getValue()->getValue().srem(RC->getValue()->getValue()) != 0) 39151694Sroger return 0; 39251694Sroger return SE.getConstant(C->getValue()->getValue() 39351694Sroger .sdiv(RC->getValue()->getValue())); 39451694Sroger } 39551694Sroger 39651694Sroger // Distribute the sdiv over addrec operands, if the addrec doesn't overflow. 39751694Sroger if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) { 39851694Sroger if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) { 39951694Sroger const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE, 40051694Sroger IgnoreSignificantBits); 40151694Sroger if (!Start) return 0; 40251694Sroger const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE, 40351694Sroger IgnoreSignificantBits); 40451694Sroger if (!Step) return 0; 40551694Sroger return SE.getAddRecExpr(Start, Step, AR->getLoop()); 40665692Sroger } 40793023Snsouch } 40851694Sroger 40951694Sroger // Distribute the sdiv over add operands, if the add doesn't overflow. 41051694Sroger if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) { 41151694Sroger if (IgnoreSignificantBits || isAddSExtable(Add, SE)) { 41251694Sroger SmallVector<const SCEV *, 8> Ops; 41351694Sroger for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 41451694Sroger I != E; ++I) { 41551694Sroger const SCEV *Op = getExactSDiv(*I, RHS, SE, 41651694Sroger IgnoreSignificantBits); 41751694Sroger if (!Op) return 0; 41851694Sroger Ops.push_back(Op); 41951694Sroger } 42051694Sroger return SE.getAddExpr(Ops); 42151694Sroger } 42251694Sroger } 42351694Sroger 42451694Sroger // Check for a multiply operand that we can pull RHS out of. 42551694Sroger if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) 42651694Sroger if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) { 42751694Sroger SmallVector<const SCEV *, 4> Ops; 42851694Sroger bool Found = false; 42951694Sroger for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end(); 43051694Sroger I != E; ++I) { 43151694Sroger if (!Found) 43251694Sroger if (const SCEV *Q = getExactSDiv(*I, RHS, SE, 43351694Sroger IgnoreSignificantBits)) { 43451694Sroger Ops.push_back(Q); 43551694Sroger Found = true; 43651694Sroger continue; 43751694Sroger } 43851694Sroger Ops.push_back(*I); 43951694Sroger } 44051694Sroger return Found ? SE.getMulExpr(Ops) : 0; 44151694Sroger } 44267306Sroger 44367306Sroger // Otherwise we don't know. 44467306Sroger return 0; 44567306Sroger} 44667306Sroger 44767306Sroger/// ExtractImmediate - If S involves the addition of a constant integer value, 44867306Sroger/// return that integer value, and mutate S to point to a new SCEV with that 44951694Sroger/// value excluded. 45067306Srogerstatic int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) { 45167306Sroger if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 45267306Sroger if (C->getValue()->getValue().getMinSignedBits() <= 64) { 45367306Sroger S = SE.getIntegerSCEV(0, C->getType()); 45467306Sroger return C->getValue()->getSExtValue(); 45567306Sroger } 45667306Sroger } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 45767306Sroger SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 45867306Sroger int64_t Result = ExtractImmediate(NewOps.front(), SE); 45967306Sroger S = SE.getAddExpr(NewOps); 46067306Sroger return Result; 46151694Sroger } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 46251694Sroger SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 46351694Sroger int64_t Result = ExtractImmediate(NewOps.front(), SE); 46465049Sroger S = SE.getAddRecExpr(NewOps, AR->getLoop()); 46565049Sroger return Result; 46665049Sroger } 46765049Sroger return 0; 46851694Sroger} 46951694Sroger 47051694Sroger/// ExtractSymbol - If S involves the addition of a GlobalValue address, 47151694Sroger/// return that symbol, and mutate S to point to a new SCEV with that 47251694Sroger/// value excluded. 47351694Srogerstatic GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { 47451694Sroger if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 47551694Sroger if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) { 47651694Sroger S = SE.getIntegerSCEV(0, GV->getType()); 47751694Sroger return GV; 47867366Sroger } 47967366Sroger } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 48051694Sroger SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 48151694Sroger GlobalValue *Result = ExtractSymbol(NewOps.back(), SE); 48267366Sroger S = SE.getAddExpr(NewOps); 48367366Sroger return Result; 48451694Sroger } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 48559014Sroger SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 48659014Sroger GlobalValue *Result = ExtractSymbol(NewOps.front(), SE); 48751694Sroger S = SE.getAddRecExpr(NewOps, AR->getLoop()); 48893023Snsouch return Result; 48993023Snsouch } 49093023Snsouch return 0; 49193023Snsouch} 49293023Snsouch 49367306Sroger/// isAddressUse - Returns true if the specified instruction is using the 49467306Sroger/// specified value as an address. 49567306Srogerstatic bool isAddressUse(Instruction *Inst, Value *OperandVal) { 49651694Sroger bool isAddress = isa<LoadInst>(Inst); 49767306Sroger if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 49867306Sroger if (SI->getOperand(1) == OperandVal) 49967306Sroger isAddress = true; 50067306Sroger } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 50167306Sroger // Addressing modes can also be folded into prefetches and a variety 50267306Sroger // of intrinsics. 50367306Sroger switch (II->getIntrinsicID()) { 50467306Sroger default: break; 50567306Sroger case Intrinsic::prefetch: 50667306Sroger case Intrinsic::x86_sse2_loadu_dq: 50767306Sroger case Intrinsic::x86_sse2_loadu_pd: 50867306Sroger case Intrinsic::x86_sse_loadu_ps: 50967306Sroger case Intrinsic::x86_sse_storeu_ps: 51067306Sroger case Intrinsic::x86_sse2_storeu_pd: 51151694Sroger case Intrinsic::x86_sse2_storeu_dq: 51251694Sroger case Intrinsic::x86_sse2_storel_dq: 51351694Sroger if (II->getOperand(1) == OperandVal) 51451694Sroger isAddress = true; 51565049Sroger break; 51665049Sroger } 51765049Sroger } 51851694Sroger return isAddress; 51951694Sroger} 52051694Sroger 52151694Sroger/// getAccessType - Return the type of the memory being accessed. 52251694Srogerstatic const Type *getAccessType(const Instruction *Inst) { 52351694Sroger const Type *AccessTy = Inst->getType(); 52451694Sroger if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) 52551694Sroger AccessTy = SI->getOperand(0)->getType(); 52651694Sroger else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 52751694Sroger // Addressing modes can also be folded into prefetches and a variety 52851694Sroger // of intrinsics. 52951694Sroger switch (II->getIntrinsicID()) { 53059014Sroger default: break; 53159014Sroger case Intrinsic::x86_sse_storeu_ps: 53251694Sroger case Intrinsic::x86_sse2_storeu_pd: 53351694Sroger case Intrinsic::x86_sse2_storeu_dq: 53451694Sroger case Intrinsic::x86_sse2_storel_dq: 53551694Sroger AccessTy = II->getOperand(1)->getType(); 53651694Sroger break; 53751694Sroger } 53851694Sroger } 53951694Sroger 54051694Sroger // All pointers have the same requirements, so canonicalize them to an 54151694Sroger // arbitrary pointer type to minimize variation. 54251694Sroger if (const PointerType *PTy = dyn_cast<PointerType>(AccessTy)) 54351694Sroger AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1), 54451694Sroger PTy->getAddressSpace()); 54551694Sroger 54651694Sroger return AccessTy; 54751694Sroger} 54851694Sroger 54951694Sroger/// DeleteTriviallyDeadInstructions - If any of the instructions is the 55051694Sroger/// specified set are trivially dead, delete them and see if this makes any of 55151694Sroger/// their operands subsequently dead. 55251694Srogerstatic bool 55351694SrogerDeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) { 55451694Sroger bool Changed = false; 55551694Sroger 55651694Sroger while (!DeadInsts.empty()) { 55751694Sroger Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()); 55851694Sroger 55951694Sroger if (I == 0 || !isInstructionTriviallyDead(I)) 56051694Sroger continue; 56151694Sroger 56251694Sroger for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) 56351694Sroger if (Instruction *U = dyn_cast<Instruction>(*OI)) { 56451694Sroger *OI = 0; 56551694Sroger if (U->use_empty()) 56651694Sroger DeadInsts.push_back(U); 56751694Sroger } 56851694Sroger 56951694Sroger I->eraseFromParent(); 57051694Sroger Changed = true; 57151694Sroger } 57251694Sroger 57351694Sroger return Changed; 574104094Sphk} 57583366Sjulian 57651694Srogernamespace { 57751694Sroger 57851694Sroger/// Cost - This class is used to measure and compare candidate formulae. 57951694Srogerclass Cost { 58051694Sroger /// TODO: Some of these could be merged. Also, a lexical ordering 58151694Sroger /// isn't always optimal. 58251694Sroger unsigned NumRegs; 58351694Sroger unsigned AddRecCost; 58451694Sroger unsigned NumIVMuls; 58551694Sroger unsigned NumBaseAdds; 58651694Sroger unsigned ImmCost; 58751694Sroger unsigned SetupCost; 58851694Sroger 58951694Srogerpublic: 59051694Sroger Cost() 59151694Sroger : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0), 59251694Sroger SetupCost(0) {} 59351694Sroger 59451694Sroger unsigned getNumRegs() const { return NumRegs; } 59551694Sroger 59651694Sroger bool operator<(const Cost &Other) const; 59751694Sroger 59851694Sroger void Loose(); 59951694Sroger 60051694Sroger void RateFormula(const Formula &F, 60151694Sroger SmallPtrSet<const SCEV *, 16> &Regs, 60251694Sroger const DenseSet<const SCEV *> &VisitedRegs, 60351694Sroger const Loop *L, 60451694Sroger const SmallVectorImpl<int64_t> &Offsets, 60551694Sroger ScalarEvolution &SE, DominatorTree &DT); 60651694Sroger 60751694Sroger void print(raw_ostream &OS) const; 60851694Sroger void dump() const; 60951694Sroger 61051694Srogerprivate: 61151694Sroger void RateRegister(const SCEV *Reg, 61251694Sroger SmallPtrSet<const SCEV *, 16> &Regs, 61351694Sroger const Loop *L, 61451694Sroger ScalarEvolution &SE, DominatorTree &DT); 61551694Sroger void RatePrimaryRegister(const SCEV *Reg, 61651694Sroger SmallPtrSet<const SCEV *, 16> &Regs, 61751694Sroger const Loop *L, 61859014Sroger ScalarEvolution &SE, DominatorTree &DT); 61951694Sroger}; 62051694Sroger 62151694Sroger} 62251694Sroger 62359014Sroger/// RateRegister - Tally up interesting quantities from the given register. 62459014Srogervoid Cost::RateRegister(const SCEV *Reg, 62559014Sroger SmallPtrSet<const SCEV *, 16> &Regs, 62659014Sroger const Loop *L, 62759014Sroger ScalarEvolution &SE, DominatorTree &DT) { 62859014Sroger if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) { 62951694Sroger if (AR->getLoop() == L) 63051694Sroger AddRecCost += 1; /// TODO: This should be a function of the stride. 63151694Sroger 63251694Sroger // If this is an addrec for a loop that's already been visited by LSR, 63351694Sroger // don't second-guess its addrec phi nodes. LSR isn't currently smart 63451694Sroger // enough to reason about more than one loop at a time. Consider these 63551694Sroger // registers free and leave them alone. 63651694Sroger else if (L->contains(AR->getLoop()) || 63751694Sroger (!AR->getLoop()->contains(L) && 63851694Sroger DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))) { 63951694Sroger for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin(); 64051694Sroger PHINode *PN = dyn_cast<PHINode>(I); ++I) 64151694Sroger if (SE.isSCEVable(PN->getType()) && 64251694Sroger (SE.getEffectiveSCEVType(PN->getType()) == 64351694Sroger SE.getEffectiveSCEVType(AR->getType())) && 64451694Sroger SE.getSCEV(PN) == AR) 64551694Sroger return; 64651694Sroger 64751694Sroger // If this isn't one of the addrecs that the loop already has, it 64851694Sroger // would require a costly new phi and add. TODO: This isn't 64951694Sroger // precisely modeled right now. 65051694Sroger ++NumBaseAdds; 65151694Sroger if (!Regs.count(AR->getStart())) 65251694Sroger RateRegister(AR->getStart(), Regs, L, SE, DT); 65351694Sroger } 654104094Sphk 65583366Sjulian // Add the step value register, if it needs one. 65651694Sroger // TODO: The non-affine case isn't precisely modeled here. 65751694Sroger if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) 65851694Sroger if (!Regs.count(AR->getStart())) 65951694Sroger RateRegister(AR->getOperand(1), Regs, L, SE, DT); 66051694Sroger } 66151694Sroger ++NumRegs; 66251694Sroger 66351694Sroger // Rough heuristic; favor registers which don't require extra setup 66451694Sroger // instructions in the preheader. 66551694Sroger if (!isa<SCEVUnknown>(Reg) && 66651694Sroger !isa<SCEVConstant>(Reg) && 66751694Sroger !(isa<SCEVAddRecExpr>(Reg) && 66851694Sroger (isa<SCEVUnknown>(cast<SCEVAddRecExpr>(Reg)->getStart()) || 66951694Sroger isa<SCEVConstant>(cast<SCEVAddRecExpr>(Reg)->getStart())))) 67051694Sroger ++SetupCost; 67151694Sroger} 67251694Sroger 67351694Sroger/// RatePrimaryRegister - Record this register in the set. If we haven't seen it 67451694Sroger/// before, rate it. 67551694Srogervoid Cost::RatePrimaryRegister(const SCEV *Reg, 67651694Sroger SmallPtrSet<const SCEV *, 16> &Regs, 67751694Sroger const Loop *L, 67851694Sroger ScalarEvolution &SE, DominatorTree &DT) { 67951694Sroger if (Regs.insert(Reg)) 68051694Sroger RateRegister(Reg, Regs, L, SE, DT); 68151694Sroger} 68251694Sroger 68351694Srogervoid Cost::RateFormula(const Formula &F, 68451694Sroger SmallPtrSet<const SCEV *, 16> &Regs, 68551694Sroger const DenseSet<const SCEV *> &VisitedRegs, 68651694Sroger const Loop *L, 68751694Sroger const SmallVectorImpl<int64_t> &Offsets, 68851694Sroger ScalarEvolution &SE, DominatorTree &DT) { 68951694Sroger // Tally up the registers. 69051694Sroger if (const SCEV *ScaledReg = F.ScaledReg) { 69151694Sroger if (VisitedRegs.count(ScaledReg)) { 69251694Sroger Loose(); 693104094Sphk return; 69451694Sroger } 69551694Sroger RatePrimaryRegister(ScaledReg, Regs, L, SE, DT); 69651694Sroger } 69751694Sroger for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 69851694Sroger E = F.BaseRegs.end(); I != E; ++I) { 69951694Sroger const SCEV *BaseReg = *I; 70051694Sroger if (VisitedRegs.count(BaseReg)) { 70151694Sroger Loose(); 70251694Sroger return; 70351694Sroger } 70451694Sroger RatePrimaryRegister(BaseReg, Regs, L, SE, DT); 70551694Sroger 70651694Sroger NumIVMuls += isa<SCEVMulExpr>(BaseReg) && 70751694Sroger BaseReg->hasComputableLoopEvolution(L); 70851694Sroger } 70951694Sroger 71051694Sroger if (F.BaseRegs.size() > 1) 71151694Sroger NumBaseAdds += F.BaseRegs.size() - 1; 71251694Sroger 71351694Sroger // Tally up the non-zero immediates. 71451694Sroger for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 71551694Sroger E = Offsets.end(); I != E; ++I) { 71651694Sroger int64_t Offset = (uint64_t)*I + F.AM.BaseOffs; 71751694Sroger if (F.AM.BaseGV) 71851694Sroger ImmCost += 64; // Handle symbolic values conservatively. 71951694Sroger // TODO: This should probably be the pointer size. 72051694Sroger else if (Offset != 0) 721104094Sphk ImmCost += APInt(64, Offset, true).getMinSignedBits(); 72251694Sroger } 72351694Sroger} 72451694Sroger 72551694Sroger/// Loose - Set this cost to a loosing value. 72651694Srogervoid Cost::Loose() { 72751694Sroger NumRegs = ~0u; 72851694Sroger AddRecCost = ~0u; 72951694Sroger NumIVMuls = ~0u; 73051694Sroger NumBaseAdds = ~0u; 731104094Sphk ImmCost = ~0u; 73283366Sjulian SetupCost = ~0u; 73351694Sroger} 73451694Sroger 73551694Sroger/// operator< - Choose the lower cost. 73651694Srogerbool Cost::operator<(const Cost &Other) const { 73751694Sroger if (NumRegs != Other.NumRegs) 73851694Sroger return NumRegs < Other.NumRegs; 73951694Sroger if (AddRecCost != Other.AddRecCost) 74051694Sroger return AddRecCost < Other.AddRecCost; 74151694Sroger if (NumIVMuls != Other.NumIVMuls) 74251694Sroger return NumIVMuls < Other.NumIVMuls; 74351694Sroger if (NumBaseAdds != Other.NumBaseAdds) 74451694Sroger return NumBaseAdds < Other.NumBaseAdds; 74551694Sroger if (ImmCost != Other.ImmCost) 74651694Sroger return ImmCost < Other.ImmCost; 74751694Sroger if (SetupCost != Other.SetupCost) 74851694Sroger return SetupCost < Other.SetupCost; 74951694Sroger return false; 75051694Sroger} 75183366Sjulian 75251694Srogervoid Cost::print(raw_ostream &OS) const { 75383366Sjulian OS << NumRegs << " reg" << (NumRegs == 1 ? "" : "s"); 75451694Sroger if (AddRecCost != 0) 75551694Sroger OS << ", with addrec cost " << AddRecCost; 75651694Sroger if (NumIVMuls != 0) 75751694Sroger OS << ", plus " << NumIVMuls << " IV mul" << (NumIVMuls == 1 ? "" : "s"); 75851694Sroger if (NumBaseAdds != 0) 75951694Sroger OS << ", plus " << NumBaseAdds << " base add" 76051694Sroger << (NumBaseAdds == 1 ? "" : "s"); 76151694Sroger if (ImmCost != 0) 76251694Sroger OS << ", plus " << ImmCost << " imm cost"; 763104094Sphk if (SetupCost != 0) 76451694Sroger OS << ", plus " << SetupCost << " setup cost"; 76551694Sroger} 76651694Sroger 76751694Srogervoid Cost::dump() const { 76851694Sroger print(errs()); errs() << '\n'; 76951694Sroger} 77051694Sroger 77151694Srogernamespace { 77251694Sroger 77351694Sroger/// LSRFixup - An operand value in an instruction which is to be replaced 77451694Sroger/// with some equivalent, possibly strength-reduced, replacement. 77551694Srogerstruct LSRFixup { 77651694Sroger /// UserInst - The instruction which will be updated. 77751694Sroger Instruction *UserInst; 77851694Sroger 77951694Sroger /// OperandValToReplace - The operand of the instruction which will 78051694Sroger /// be replaced. The operand may be used more than once; every instance 78151694Sroger /// will be replaced. 78251694Sroger Value *OperandValToReplace; 78351694Sroger 78451694Sroger /// PostIncLoop - If this user is to use the post-incremented value of an 78551694Sroger /// induction variable, this variable is non-null and holds the loop 78651694Sroger /// associated with the induction variable. 78751694Sroger const Loop *PostIncLoop; 78851694Sroger 78951694Sroger /// LUIdx - The index of the LSRUse describing the expression which 79059250Sroger /// this fixup needs, minus an offset (below). 79151694Sroger size_t LUIdx; 79251694Sroger 793104094Sphk /// Offset - A constant offset to be added to the LSRUse expression. 794104094Sphk /// This allows multiple fixups to share the same LSRUse with different 79551694Sroger /// offsets, for example in an unrolled loop. 79651694Sroger int64_t Offset; 79751694Sroger 79851694Sroger LSRFixup(); 79959277Sroger 80051694Sroger void print(raw_ostream &OS) const; 80151694Sroger void dump() const; 80251694Sroger}; 80351694Sroger 80451694Sroger} 80551694Sroger 80651694SrogerLSRFixup::LSRFixup() 80751694Sroger : UserInst(0), OperandValToReplace(0), PostIncLoop(0), 80851694Sroger LUIdx(~size_t(0)), Offset(0) {} 80951694Sroger 81059277Srogervoid LSRFixup::print(raw_ostream &OS) const { 81151694Sroger OS << "UserInst="; 81251694Sroger // Store is common and interesting enough to be worth special-casing. 81351694Sroger if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) { 81451694Sroger OS << "store "; 81551694Sroger WriteAsOperand(OS, Store->getOperand(0), /*PrintType=*/false); 81651694Sroger } else if (UserInst->getType()->isVoidTy()) 81783366Sjulian OS << UserInst->getOpcodeName(); 81851694Sroger else 81951694Sroger WriteAsOperand(OS, UserInst, /*PrintType=*/false); 82051694Sroger 82151694Sroger OS << ", OperandValToReplace="; 82251694Sroger WriteAsOperand(OS, OperandValToReplace, /*PrintType=*/false); 82351694Sroger 82459250Sroger if (PostIncLoop) { 82551694Sroger OS << ", PostIncLoop="; 82651694Sroger WriteAsOperand(OS, PostIncLoop->getHeader(), /*PrintType=*/false); 82751694Sroger } 82851694Sroger 82951694Sroger if (LUIdx != ~size_t(0)) 83051694Sroger OS << ", LUIdx=" << LUIdx; 83151694Sroger 83251694Sroger if (Offset != 0) 83351694Sroger OS << ", Offset=" << Offset; 83451694Sroger} 83551694Sroger 83651694Srogervoid LSRFixup::dump() const { 83751694Sroger print(errs()); errs() << '\n'; 83851694Sroger} 83951694Sroger 84051694Srogernamespace { 84151694Sroger 84251694Sroger/// UniquifierDenseMapInfo - A DenseMapInfo implementation for holding 84359014Sroger/// DenseMaps and DenseSets of sorted SmallVectors of const SCEV*. 84459014Srogerstruct UniquifierDenseMapInfo { 84559014Sroger static SmallVector<const SCEV *, 2> getEmptyKey() { 84659014Sroger SmallVector<const SCEV *, 2> V; 84759014Sroger V.push_back(reinterpret_cast<const SCEV *>(-1)); 84859014Sroger return V; 84959014Sroger } 85059014Sroger 85159014Sroger static SmallVector<const SCEV *, 2> getTombstoneKey() { 85259014Sroger SmallVector<const SCEV *, 2> V; 85359014Sroger V.push_back(reinterpret_cast<const SCEV *>(-2)); 85459014Sroger return V; 85559014Sroger } 85659014Sroger 85759014Sroger static unsigned getHashValue(const SmallVector<const SCEV *, 2> &V) { 85862112Sroger unsigned Result = 0; 85992739Salfred for (SmallVectorImpl<const SCEV *>::const_iterator I = V.begin(), 86059014Sroger E = V.end(); I != E; ++I) 86192739Salfred Result ^= DenseMapInfo<const SCEV *>::getHashValue(*I); 86259014Sroger return Result; 86392739Salfred } 86459014Sroger 86559014Sroger static bool isEqual(const SmallVector<const SCEV *, 2> &LHS, 86659014Sroger const SmallVector<const SCEV *, 2> &RHS) { 86759014Sroger return LHS == RHS; 86859014Sroger } 86959014Sroger}; 87059014Sroger 87159014Sroger/// LSRUse - This class holds the state that LSR keeps for each use in 87259014Sroger/// IVUsers, as well as uses invented by LSR itself. It includes information 87359014Sroger/// about what kinds of things can be folded into the user, information about 87459014Sroger/// the user itself, and information about how the use may be satisfied. 87559014Sroger/// TODO: Represent multiple users of the same expression in common? 87659014Srogerclass LSRUse { 87762112Sroger DenseSet<SmallVector<const SCEV *, 2>, UniquifierDenseMapInfo> Uniquifier; 87862112Sroger 87962112Srogerpublic: 88062112Sroger /// KindType - An enum for a kind of use, indicating what types of 88159014Sroger /// scaled and immediate operands it might support. 88259014Sroger enum KindType { 88359014Sroger Basic, ///< A normal use, with no folding. 88459014Sroger Special, ///< A special case of basic, allowing -1 scales. 88559014Sroger Address, ///< An address use; folding according to TargetLowering 88659014Sroger ICmpZero ///< An equality icmp with both operands folded into one. 88759014Sroger // TODO: Add a generic icmp too? 88859014Sroger }; 88959014Sroger 89059014Sroger KindType Kind; 89159014Sroger const Type *AccessTy; 89259014Sroger 89359014Sroger SmallVector<int64_t, 8> Offsets; 89459014Sroger int64_t MinOffset; 89559014Sroger int64_t MaxOffset; 89659014Sroger 89759014Sroger /// AllFixupsOutsideLoop - This records whether all of the fixups using this 89859014Sroger /// LSRUse are outside of the loop, in which case some special-case heuristics 89959014Sroger /// may be used. 90059014Sroger bool AllFixupsOutsideLoop; 90159014Sroger 90259014Sroger /// Formulae - A list of ways to build a value that can satisfy this user. 90359014Sroger /// After the list is populated, one of these is selected heuristically and 90462112Sroger /// used to formulate a replacement for OperandValToReplace in UserInst. 90559014Sroger SmallVector<Formula, 12> Formulae; 90659014Sroger 90759014Sroger /// Regs - The set of register candidates used by all formulae in this LSRUse. 90859014Sroger SmallPtrSet<const SCEV *, 4> Regs; 90959014Sroger 91059014Sroger LSRUse(KindType K, const Type *T) : Kind(K), AccessTy(T), 91159014Sroger MinOffset(INT64_MAX), 91259014Sroger MaxOffset(INT64_MIN), 91359014Sroger AllFixupsOutsideLoop(true) {} 91459014Sroger 91559014Sroger bool InsertFormula(const Formula &F); 91659014Sroger 91759014Sroger void check() const; 91859014Sroger 91959014Sroger void print(raw_ostream &OS) const; 92059014Sroger void dump() const; 92159014Sroger}; 92259014Sroger 92359014Sroger/// InsertFormula - If the given formula has not yet been inserted, add it to 92459014Sroger/// the list, and return true. Return false otherwise. 92559014Srogerbool LSRUse::InsertFormula(const Formula &F) { 92659014Sroger SmallVector<const SCEV *, 2> Key = F.BaseRegs; 92759014Sroger if (F.ScaledReg) Key.push_back(F.ScaledReg); 92859014Sroger // Unstable sort by host order ok, because this is only used for uniquifying. 92959014Sroger std::sort(Key.begin(), Key.end()); 93059014Sroger 93159014Sroger if (!Uniquifier.insert(Key).second) 93259014Sroger return false; 93359014Sroger 93459014Sroger // Using a register to hold the value of 0 is not profitable. 93559014Sroger assert((!F.ScaledReg || !F.ScaledReg->isZero()) && 93659014Sroger "Zero allocated in a scaled register!"); 93759014Sroger#ifndef NDEBUG 93859014Sroger for (SmallVectorImpl<const SCEV *>::const_iterator I = 93959014Sroger F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) 94059014Sroger assert(!(*I)->isZero() && "Zero allocated in a base register!"); 94159014Sroger#endif 94259014Sroger 94359014Sroger // Add the formula to the list. 94459014Sroger Formulae.push_back(F); 94559014Sroger 94659014Sroger // Record registers now being used by this use. 94759014Sroger if (F.ScaledReg) Regs.insert(F.ScaledReg); 94859014Sroger Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 94959014Sroger 95059014Sroger return true; 95159014Sroger} 95259014Sroger 95359014Srogervoid LSRUse::print(raw_ostream &OS) const { 95459014Sroger OS << "LSR Use: Kind="; 95559014Sroger switch (Kind) { 95659014Sroger case Basic: OS << "Basic"; break; 95759014Sroger case Special: OS << "Special"; break; 95859014Sroger case ICmpZero: OS << "ICmpZero"; break; 95959014Sroger case Address: 96059014Sroger OS << "Address of "; 96159014Sroger if (AccessTy->isPointerTy()) 96259014Sroger OS << "pointer"; // the full pointer type could be really verbose 96359014Sroger else 96459014Sroger OS << *AccessTy; 96559014Sroger } 96659014Sroger 96759014Sroger OS << ", Offsets={"; 96859014Sroger for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 96959014Sroger E = Offsets.end(); I != E; ++I) { 97059014Sroger OS << *I; 97159014Sroger if (next(I) != E) 97259014Sroger OS << ','; 97359014Sroger } 97459014Sroger OS << '}'; 97559014Sroger 97659014Sroger if (AllFixupsOutsideLoop) 97759014Sroger OS << ", all-fixups-outside-loop"; 97859014Sroger} 97959014Sroger 98059014Srogervoid LSRUse::dump() const { 98159014Sroger print(errs()); errs() << '\n'; 98259014Sroger} 98362112Sroger 98462112Sroger/// isLegalUse - Test whether the use described by AM is "legal", meaning it can 98562112Sroger/// be completely folded into the user instruction at isel time. This includes 98662112Sroger/// address-mode folding and special icmp tricks. 98762112Srogerstatic bool isLegalUse(const TargetLowering::AddrMode &AM, 98862112Sroger LSRUse::KindType Kind, const Type *AccessTy, 98959014Sroger const TargetLowering *TLI) { 99062112Sroger switch (Kind) { 99159014Sroger case LSRUse::Address: 99259014Sroger // If we have low-level target information, ask the target if it can 99359014Sroger // completely fold this address. 99459014Sroger if (TLI) return TLI->isLegalAddressingMode(AM, AccessTy); 99559014Sroger 99659014Sroger // Otherwise, just guess that reg+reg addressing is legal. 99759014Sroger return !AM.BaseGV && AM.BaseOffs == 0 && AM.Scale <= 1; 99859014Sroger 99959014Sroger case LSRUse::ICmpZero: 100059014Sroger // There's not even a target hook for querying whether it would be legal to 100159014Sroger // fold a GV into an ICmp. 100259014Sroger if (AM.BaseGV) 100359014Sroger return false; 100459014Sroger 100562112Sroger // ICmp only has two operands; don't allow more than two non-trivial parts. 100662112Sroger if (AM.Scale != 0 && AM.HasBaseReg && AM.BaseOffs != 0) 100759014Sroger return false; 100859014Sroger 100959014Sroger // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by 101059014Sroger // putting the scaled register in the other operand of the icmp. 101159014Sroger if (AM.Scale != 0 && AM.Scale != -1) 101259014Sroger return false; 101359014Sroger 101462112Sroger // If we have low-level target information, ask the target if it can fold an 101562112Sroger // integer immediate on an icmp. 101662112Sroger if (AM.BaseOffs != 0) { 101762112Sroger if (TLI) return TLI->isLegalICmpImmediate(-AM.BaseOffs); 101862112Sroger return false; 101959014Sroger } 102059014Sroger 102162112Sroger return true; 102259014Sroger 102359014Sroger case LSRUse::Basic: 102459014Sroger // Only handle single-register values. 102559014Sroger return !AM.BaseGV && AM.Scale == 0 && AM.BaseOffs == 0; 102659014Sroger 102759014Sroger case LSRUse::Special: 102859014Sroger // Only handle -1 scales, or no scale. 102959014Sroger return AM.Scale == 0 || AM.Scale == -1; 103059014Sroger } 103159014Sroger 103259014Sroger return false; 103359014Sroger} 103459014Sroger 103562112Srogerstatic bool isLegalUse(TargetLowering::AddrMode AM, 103662112Sroger int64_t MinOffset, int64_t MaxOffset, 103759014Sroger LSRUse::KindType Kind, const Type *AccessTy, 103862112Sroger const TargetLowering *TLI) { 103959014Sroger // Check for overflow. 104059014Sroger if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) != 104159014Sroger (MinOffset > 0)) 104259014Sroger return false; 104359014Sroger AM.BaseOffs = (uint64_t)AM.BaseOffs + MinOffset; 104459014Sroger if (isLegalUse(AM, Kind, AccessTy, TLI)) { 104559014Sroger AM.BaseOffs = (uint64_t)AM.BaseOffs - MinOffset; 104662112Sroger // Check for overflow. 104762112Sroger if (((int64_t)((uint64_t)AM.BaseOffs + MaxOffset) > AM.BaseOffs) != 104862112Sroger (MaxOffset > 0)) 104959014Sroger return false; 105062112Sroger AM.BaseOffs = (uint64_t)AM.BaseOffs + MaxOffset; 105162112Sroger return isLegalUse(AM, Kind, AccessTy, TLI); 105259014Sroger } 105359014Sroger return false; 105459014Sroger} 105559014Sroger 105659014Srogerstatic bool isAlwaysFoldable(int64_t BaseOffs, 105762112Sroger GlobalValue *BaseGV, 105859014Sroger bool HasBaseReg, 105959014Sroger LSRUse::KindType Kind, const Type *AccessTy, 106059014Sroger const TargetLowering *TLI) { 106159014Sroger // Fast-path: zero is always foldable. 106259014Sroger if (BaseOffs == 0 && !BaseGV) return true; 106359014Sroger 106459014Sroger // Conservatively, create an address with an immediate and a 106559014Sroger // base and a scale. 106659014Sroger TargetLowering::AddrMode AM; 106759014Sroger AM.BaseOffs = BaseOffs; 106859014Sroger AM.BaseGV = BaseGV; 106959014Sroger AM.HasBaseReg = HasBaseReg; 107059014Sroger AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 107159014Sroger 107259014Sroger return isLegalUse(AM, Kind, AccessTy, TLI); 107359014Sroger} 107459014Sroger 107559014Srogerstatic bool isAlwaysFoldable(const SCEV *S, 107659014Sroger int64_t MinOffset, int64_t MaxOffset, 107759014Sroger bool HasBaseReg, 107859014Sroger LSRUse::KindType Kind, const Type *AccessTy, 107959014Sroger const TargetLowering *TLI, 108059014Sroger ScalarEvolution &SE) { 108159014Sroger // Fast-path: zero is always foldable. 108259014Sroger if (S->isZero()) return true; 108359014Sroger 108459014Sroger // Conservatively, create an address with an immediate and a 108562112Sroger // base and a scale. 108662112Sroger int64_t BaseOffs = ExtractImmediate(S, SE); 108759014Sroger GlobalValue *BaseGV = ExtractSymbol(S, SE); 108859014Sroger 108959014Sroger // If there's anything else involved, it's not foldable. 109059014Sroger if (!S->isZero()) return false; 109159014Sroger 109262112Sroger // Fast-path: zero is always foldable. 109362112Sroger if (BaseOffs == 0 && !BaseGV) return true; 109459014Sroger 109559014Sroger // Conservatively, create an address with an immediate and a 109659014Sroger // base and a scale. 109759014Sroger TargetLowering::AddrMode AM; 109859014Sroger AM.BaseOffs = BaseOffs; 109959014Sroger AM.BaseGV = BaseGV; 110059014Sroger AM.HasBaseReg = HasBaseReg; 110159014Sroger AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 110259014Sroger 110359014Sroger return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI); 110462112Sroger} 110562112Sroger 110659014Sroger/// FormulaSorter - This class implements an ordering for formulae which sorts 110759014Sroger/// the by their standalone cost. 110859014Srogerclass FormulaSorter { 110959014Sroger /// These two sets are kept empty, so that we compute standalone costs. 111059014Sroger DenseSet<const SCEV *> VisitedRegs; 111162112Sroger SmallPtrSet<const SCEV *, 16> Regs; 111262112Sroger Loop *L; 111359014Sroger LSRUse *LU; 111459014Sroger ScalarEvolution &SE; 111559014Sroger DominatorTree &DT; 111659014Sroger 111759014Srogerpublic: 111859014Sroger FormulaSorter(Loop *l, LSRUse &lu, ScalarEvolution &se, DominatorTree &dt) 111959014Sroger : L(l), LU(&lu), SE(se), DT(dt) {} 112059014Sroger 112159014Sroger bool operator()(const Formula &A, const Formula &B) { 112259014Sroger Cost CostA; 112359014Sroger CostA.RateFormula(A, Regs, VisitedRegs, L, LU->Offsets, SE, DT); 112459014Sroger Regs.clear(); 112559014Sroger Cost CostB; 112659014Sroger CostB.RateFormula(B, Regs, VisitedRegs, L, LU->Offsets, SE, DT); 112759014Sroger Regs.clear(); 112859014Sroger return CostA < CostB; 112959014Sroger } 113059014Sroger}; 113159014Sroger 113259014Sroger/// LSRInstance - This class holds state for the main loop strength reduction 113359014Sroger/// logic. 113459014Srogerclass LSRInstance { 113559014Sroger IVUsers &IU; 113659014Sroger ScalarEvolution &SE; 113759014Sroger DominatorTree &DT; 113859014Sroger const TargetLowering *const TLI; 113959014Sroger Loop *const L; 114059014Sroger bool Changed; 114159014Sroger 114259014Sroger /// IVIncInsertPos - This is the insert position that the current loop's 114359014Sroger /// induction variable increment should be placed. In simple loops, this is 114459014Sroger /// the latch block's terminator. But in more complicated cases, this is a 114559014Sroger /// position which will dominate all the in-loop post-increment users. 114659014Sroger Instruction *IVIncInsertPos; 114759014Sroger 114859014Sroger /// Factors - Interesting factors between use strides. 114959014Sroger SmallSetVector<int64_t, 8> Factors; 115059014Sroger 115162112Sroger /// Types - Interesting use types, to facilitate truncation reuse. 115262112Sroger SmallSetVector<const Type *, 4> Types; 115359014Sroger 115459014Sroger /// Fixups - The list of operands which are to be replaced. 115559014Sroger SmallVector<LSRFixup, 16> Fixups; 115659014Sroger 115759014Sroger /// Uses - The list of interesting uses. 115883366Sjulian SmallVector<LSRUse, 16> Uses; 115959014Sroger 116059014Sroger /// RegUses - Track which uses use which register candidates. 116159014Sroger RegUseTracker RegUses; 116259014Sroger 116362112Sroger void OptimizeShadowIV(); 116459014Sroger bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse); 116559014Sroger ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse); 116659014Sroger bool OptimizeLoopTermCond(); 116762112Sroger 116859014Sroger void CollectInterestingTypesAndFactors(); 116959014Sroger void CollectFixupsAndInitialFormulae(); 117059014Sroger 117159014Sroger LSRFixup &getNewFixup() { 117262112Sroger Fixups.push_back(LSRFixup()); 117359014Sroger return Fixups.back(); 117462112Sroger } 117559014Sroger 117662112Sroger // Support for sharing of LSRUses between LSRFixups. 117759014Sroger typedef DenseMap<const SCEV *, size_t> UseMapTy; 117862112Sroger UseMapTy UseMap; 117959014Sroger 118062112Sroger bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, 118159014Sroger LSRUse::KindType Kind, const Type *AccessTy); 118259014Sroger 118362112Sroger std::pair<size_t, int64_t> getUse(const SCEV *&Expr, 118459014Sroger LSRUse::KindType Kind, 118559014Sroger const Type *AccessTy); 118659014Sroger 118759014Srogerpublic: 118859014Sroger void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 118959014Sroger void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 119059014Sroger void CountRegisters(const Formula &F, size_t LUIdx); 119183366Sjulian bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F); 119259014Sroger 119359014Sroger void CollectLoopInvariantFixupsAndFormulae(); 119459014Sroger 119559014Sroger void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base, 119662112Sroger unsigned Depth = 0); 119759014Sroger void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base); 119859014Sroger void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 119959014Sroger void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 120062112Sroger void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base); 120159014Sroger void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base); 120262112Sroger void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base); 120359014Sroger void GenerateCrossUseConstantOffsets(); 120462112Sroger void GenerateAllReuseFormulae(); 120559014Sroger 120662112Sroger void FilterOutUndesirableDedicatedRegisters(); 120759014Sroger void NarrowSearchSpaceUsingHeuristics(); 120859014Sroger 120962112Sroger void SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 121059014Sroger Cost &SolutionCost, 121159014Sroger SmallVectorImpl<const Formula *> &Workspace, 121259014Sroger const Cost &CurCost, 121359014Sroger const SmallPtrSet<const SCEV *, 16> &CurRegs, 121459014Sroger DenseSet<const SCEV *> &VisitedRegs) const; 121559014Sroger void Solve(SmallVectorImpl<const Formula *> &Solution) const; 121662112Sroger 121759014Sroger Value *Expand(const LSRFixup &LF, 121859014Sroger const Formula &F, 121959014Sroger BasicBlock::iterator IP, 122059014Sroger SCEVExpander &Rewriter, 122162112Sroger SmallVectorImpl<WeakVH> &DeadInsts) const; 122259014Sroger void RewriteForPHI(PHINode *PN, const LSRFixup &LF, 122359014Sroger const Formula &F, 122459014Sroger SCEVExpander &Rewriter, 122562112Sroger SmallVectorImpl<WeakVH> &DeadInsts, 122659014Sroger Pass *P) const; 122762112Sroger void Rewrite(const LSRFixup &LF, 122859014Sroger const Formula &F, 122962112Sroger SCEVExpander &Rewriter, 123059014Sroger SmallVectorImpl<WeakVH> &DeadInsts, 123159014Sroger Pass *P) const; 123262112Sroger void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 123359014Sroger Pass *P); 123459014Sroger 123559014Sroger LSRInstance(const TargetLowering *tli, Loop *l, Pass *P); 123659014Sroger 123759014Sroger bool getChanged() const { return Changed; } 123859014Sroger 123959014Sroger void print_factors_and_types(raw_ostream &OS) const; 124062112Sroger void print_fixups(raw_ostream &OS) const; 124159014Sroger void print_uses(raw_ostream &OS) const; 124259014Sroger void print(raw_ostream &OS) const; 124362112Sroger void dump() const; 124459014Sroger}; 124559014Sroger 124659014Sroger} 124759014Sroger 124859014Sroger/// OptimizeShadowIV - If IV is used in a int-to-float cast 124959014Sroger/// inside the loop then try to eliminate the cast operation. 125083366Sjulianvoid LSRInstance::OptimizeShadowIV() { 125159014Sroger const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 125259014Sroger if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 125359014Sroger return; 125459014Sroger 125562112Sroger for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); 125659014Sroger UI != E; /* empty */) { 125759014Sroger IVUsers::const_iterator CandidateUI = UI; 125859014Sroger ++UI; 125959014Sroger Instruction *ShadowUse = CandidateUI->getUser(); 126062112Sroger const Type *DestTy = NULL; 126159014Sroger 126262112Sroger /* If shadow use is a int->float cast then insert a second IV 126359014Sroger to eliminate this cast. 126462112Sroger 126559014Sroger for (unsigned i = 0; i < n; ++i) 126662112Sroger foo((double)i); 126759014Sroger 126859014Sroger is transformed into 126962112Sroger 127059014Sroger double d = 0.0; 127159014Sroger for (unsigned i = 0; i < n; ++i, ++d) 127259014Sroger foo(d); 127359014Sroger */ 127459014Sroger if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) 127562214Sroger DestTy = UCast->getDestTy(); 127662214Sroger else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) 127759014Sroger DestTy = SCast->getDestTy(); 127859014Sroger if (!DestTy) continue; 127959014Sroger 128059014Sroger if (TLI) { 128162112Sroger // If target does not support DestTy natively then do not apply 128259014Sroger // this transformation. 128362112Sroger EVT DVT = TLI->getValueType(DestTy); 128462112Sroger if (!TLI->isTypeLegal(DVT)) continue; 128559014Sroger } 128659014Sroger 128759014Sroger PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0)); 128862112Sroger if (!PH) continue; 128962112Sroger if (PH->getNumIncomingValues() != 2) continue; 129059014Sroger 129162112Sroger const Type *SrcTy = PH->getType(); 129262112Sroger int Mantissa = DestTy->getFPMantissaWidth(); 129359014Sroger if (Mantissa == -1) continue; 129459014Sroger if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa) 129559014Sroger continue; 129662112Sroger 129759014Sroger unsigned Entry, Latch; 129862112Sroger if (PH->getIncomingBlock(0) == L->getLoopPreheader()) { 129959014Sroger Entry = 0; 130059014Sroger Latch = 1; 130159014Sroger } else { 130251694Sroger Entry = 1; 1303 Latch = 0; 1304 } 1305 1306 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry)); 1307 if (!Init) continue; 1308 Constant *NewInit = ConstantFP::get(DestTy, Init->getZExtValue()); 1309 1310 BinaryOperator *Incr = 1311 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch)); 1312 if (!Incr) continue; 1313 if (Incr->getOpcode() != Instruction::Add 1314 && Incr->getOpcode() != Instruction::Sub) 1315 continue; 1316 1317 /* Initialize new IV, double d = 0.0 in above example. */ 1318 ConstantInt *C = NULL; 1319 if (Incr->getOperand(0) == PH) 1320 C = dyn_cast<ConstantInt>(Incr->getOperand(1)); 1321 else if (Incr->getOperand(1) == PH) 1322 C = dyn_cast<ConstantInt>(Incr->getOperand(0)); 1323 else 1324 continue; 1325 1326 if (!C) continue; 1327 1328 // Ignore negative constants, as the code below doesn't handle them 1329 // correctly. TODO: Remove this restriction. 1330 if (!C->getValue().isStrictlyPositive()) continue; 1331 1332 /* Add new PHINode. */ 1333 PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH); 1334 1335 /* create new increment. '++d' in above example. */ 1336 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue()); 1337 BinaryOperator *NewIncr = 1338 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ? 1339 Instruction::FAdd : Instruction::FSub, 1340 NewPH, CFP, "IV.S.next.", Incr); 1341 1342 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry)); 1343 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch)); 1344 1345 /* Remove cast operation */ 1346 ShadowUse->replaceAllUsesWith(NewPH); 1347 ShadowUse->eraseFromParent(); 1348 break; 1349 } 1350} 1351 1352/// FindIVUserForCond - If Cond has an operand that is an expression of an IV, 1353/// set the IV user and stride information and return true, otherwise return 1354/// false. 1355bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, 1356 IVStrideUse *&CondUse) { 1357 for (IVUsers::iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1358 if (UI->getUser() == Cond) { 1359 // NOTE: we could handle setcc instructions with multiple uses here, but 1360 // InstCombine does it as well for simple uses, it's not clear that it 1361 // occurs enough in real life to handle. 1362 CondUse = UI; 1363 return true; 1364 } 1365 return false; 1366} 1367 1368/// OptimizeMax - Rewrite the loop's terminating condition if it uses 1369/// a max computation. 1370/// 1371/// This is a narrow solution to a specific, but acute, problem. For loops 1372/// like this: 1373/// 1374/// i = 0; 1375/// do { 1376/// p[i] = 0.0; 1377/// } while (++i < n); 1378/// 1379/// the trip count isn't just 'n', because 'n' might not be positive. And 1380/// unfortunately this can come up even for loops where the user didn't use 1381/// a C do-while loop. For example, seemingly well-behaved top-test loops 1382/// will commonly be lowered like this: 1383// 1384/// if (n > 0) { 1385/// i = 0; 1386/// do { 1387/// p[i] = 0.0; 1388/// } while (++i < n); 1389/// } 1390/// 1391/// and then it's possible for subsequent optimization to obscure the if 1392/// test in such a way that indvars can't find it. 1393/// 1394/// When indvars can't find the if test in loops like this, it creates a 1395/// max expression, which allows it to give the loop a canonical 1396/// induction variable: 1397/// 1398/// i = 0; 1399/// max = n < 1 ? 1 : n; 1400/// do { 1401/// p[i] = 0.0; 1402/// } while (++i != max); 1403/// 1404/// Canonical induction variables are necessary because the loop passes 1405/// are designed around them. The most obvious example of this is the 1406/// LoopInfo analysis, which doesn't remember trip count values. It 1407/// expects to be able to rediscover the trip count each time it is 1408/// needed, and it does this using a simple analysis that only succeeds if 1409/// the loop has a canonical induction variable. 1410/// 1411/// However, when it comes time to generate code, the maximum operation 1412/// can be quite costly, especially if it's inside of an outer loop. 1413/// 1414/// This function solves this problem by detecting this type of loop and 1415/// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting 1416/// the instructions for the maximum computation. 1417/// 1418ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { 1419 // Check that the loop matches the pattern we're looking for. 1420 if (Cond->getPredicate() != CmpInst::ICMP_EQ && 1421 Cond->getPredicate() != CmpInst::ICMP_NE) 1422 return Cond; 1423 1424 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1)); 1425 if (!Sel || !Sel->hasOneUse()) return Cond; 1426 1427 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1428 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1429 return Cond; 1430 const SCEV *One = SE.getIntegerSCEV(1, BackedgeTakenCount->getType()); 1431 1432 // Add one to the backedge-taken count to get the trip count. 1433 const SCEV *IterationCount = SE.getAddExpr(BackedgeTakenCount, One); 1434 1435 // Check for a max calculation that matches the pattern. 1436 if (!isa<SCEVSMaxExpr>(IterationCount) && !isa<SCEVUMaxExpr>(IterationCount)) 1437 return Cond; 1438 const SCEVNAryExpr *Max = cast<SCEVNAryExpr>(IterationCount); 1439 if (Max != SE.getSCEV(Sel)) return Cond; 1440 1441 // To handle a max with more than two operands, this optimization would 1442 // require additional checking and setup. 1443 if (Max->getNumOperands() != 2) 1444 return Cond; 1445 1446 const SCEV *MaxLHS = Max->getOperand(0); 1447 const SCEV *MaxRHS = Max->getOperand(1); 1448 if (!MaxLHS || MaxLHS != One) return Cond; 1449 // Check the relevant induction variable for conformance to 1450 // the pattern. 1451 const SCEV *IV = SE.getSCEV(Cond->getOperand(0)); 1452 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); 1453 if (!AR || !AR->isAffine() || 1454 AR->getStart() != One || 1455 AR->getStepRecurrence(SE) != One) 1456 return Cond; 1457 1458 assert(AR->getLoop() == L && 1459 "Loop condition operand is an addrec in a different loop!"); 1460 1461 // Check the right operand of the select, and remember it, as it will 1462 // be used in the new comparison instruction. 1463 Value *NewRHS = 0; 1464 if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS) 1465 NewRHS = Sel->getOperand(1); 1466 else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS) 1467 NewRHS = Sel->getOperand(2); 1468 if (!NewRHS) return Cond; 1469 1470 // Determine the new comparison opcode. It may be signed or unsigned, 1471 // and the original comparison may be either equality or inequality. 1472 CmpInst::Predicate Pred = 1473 isa<SCEVSMaxExpr>(Max) ? CmpInst::ICMP_SLT : CmpInst::ICMP_ULT; 1474 if (Cond->getPredicate() == CmpInst::ICMP_EQ) 1475 Pred = CmpInst::getInversePredicate(Pred); 1476 1477 // Ok, everything looks ok to change the condition into an SLT or SGE and 1478 // delete the max calculation. 1479 ICmpInst *NewCond = 1480 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp"); 1481 1482 // Delete the max calculation instructions. 1483 Cond->replaceAllUsesWith(NewCond); 1484 CondUse->setUser(NewCond); 1485 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0)); 1486 Cond->eraseFromParent(); 1487 Sel->eraseFromParent(); 1488 if (Cmp->use_empty()) 1489 Cmp->eraseFromParent(); 1490 return NewCond; 1491} 1492 1493/// OptimizeLoopTermCond - Change loop terminating condition to use the 1494/// postinc iv when possible. 1495bool 1496LSRInstance::OptimizeLoopTermCond() { 1497 SmallPtrSet<Instruction *, 4> PostIncs; 1498 1499 BasicBlock *LatchBlock = L->getLoopLatch(); 1500 SmallVector<BasicBlock*, 8> ExitingBlocks; 1501 L->getExitingBlocks(ExitingBlocks); 1502 1503 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 1504 BasicBlock *ExitingBlock = ExitingBlocks[i]; 1505 1506 // Get the terminating condition for the loop if possible. If we 1507 // can, we want to change it to use a post-incremented version of its 1508 // induction variable, to allow coalescing the live ranges for the IV into 1509 // one register value. 1510 1511 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 1512 if (!TermBr) 1513 continue; 1514 // FIXME: Overly conservative, termination condition could be an 'or' etc.. 1515 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition())) 1516 continue; 1517 1518 // Search IVUsesByStride to find Cond's IVUse if there is one. 1519 IVStrideUse *CondUse = 0; 1520 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); 1521 if (!FindIVUserForCond(Cond, CondUse)) 1522 continue; 1523 1524 // If the trip count is computed in terms of a max (due to ScalarEvolution 1525 // being unable to find a sufficient guard, for example), change the loop 1526 // comparison to use SLT or ULT instead of NE. 1527 // One consequence of doing this now is that it disrupts the count-down 1528 // optimization. That's not always a bad thing though, because in such 1529 // cases it may still be worthwhile to avoid a max. 1530 Cond = OptimizeMax(Cond, CondUse); 1531 1532 // If this exiting block dominates the latch block, it may also use 1533 // the post-inc value if it won't be shared with other uses. 1534 // Check for dominance. 1535 if (!DT.dominates(ExitingBlock, LatchBlock)) 1536 continue; 1537 1538 // Conservatively avoid trying to use the post-inc value in non-latch 1539 // exits if there may be pre-inc users in intervening blocks. 1540 if (LatchBlock != ExitingBlock) 1541 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1542 // Test if the use is reachable from the exiting block. This dominator 1543 // query is a conservative approximation of reachability. 1544 if (&*UI != CondUse && 1545 !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) { 1546 // Conservatively assume there may be reuse if the quotient of their 1547 // strides could be a legal scale. 1548 const SCEV *A = CondUse->getStride(); 1549 const SCEV *B = UI->getStride(); 1550 if (SE.getTypeSizeInBits(A->getType()) != 1551 SE.getTypeSizeInBits(B->getType())) { 1552 if (SE.getTypeSizeInBits(A->getType()) > 1553 SE.getTypeSizeInBits(B->getType())) 1554 B = SE.getSignExtendExpr(B, A->getType()); 1555 else 1556 A = SE.getSignExtendExpr(A, B->getType()); 1557 } 1558 if (const SCEVConstant *D = 1559 dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) { 1560 // Stride of one or negative one can have reuse with non-addresses. 1561 if (D->getValue()->isOne() || 1562 D->getValue()->isAllOnesValue()) 1563 goto decline_post_inc; 1564 // Avoid weird situations. 1565 if (D->getValue()->getValue().getMinSignedBits() >= 64 || 1566 D->getValue()->getValue().isMinSignedValue()) 1567 goto decline_post_inc; 1568 // Without TLI, assume that any stride might be valid, and so any 1569 // use might be shared. 1570 if (!TLI) 1571 goto decline_post_inc; 1572 // Check for possible scaled-address reuse. 1573 const Type *AccessTy = getAccessType(UI->getUser()); 1574 TargetLowering::AddrMode AM; 1575 AM.Scale = D->getValue()->getSExtValue(); 1576 if (TLI->isLegalAddressingMode(AM, AccessTy)) 1577 goto decline_post_inc; 1578 AM.Scale = -AM.Scale; 1579 if (TLI->isLegalAddressingMode(AM, AccessTy)) 1580 goto decline_post_inc; 1581 } 1582 } 1583 1584 DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: " 1585 << *Cond << '\n'); 1586 1587 // It's possible for the setcc instruction to be anywhere in the loop, and 1588 // possible for it to have multiple users. If it is not immediately before 1589 // the exiting block branch, move it. 1590 if (&*++BasicBlock::iterator(Cond) != TermBr) { 1591 if (Cond->hasOneUse()) { 1592 Cond->moveBefore(TermBr); 1593 } else { 1594 // Clone the terminating condition and insert into the loopend. 1595 ICmpInst *OldCond = Cond; 1596 Cond = cast<ICmpInst>(Cond->clone()); 1597 Cond->setName(L->getHeader()->getName() + ".termcond"); 1598 ExitingBlock->getInstList().insert(TermBr, Cond); 1599 1600 // Clone the IVUse, as the old use still exists! 1601 CondUse = &IU.AddUser(CondUse->getStride(), CondUse->getOffset(), 1602 Cond, CondUse->getOperandValToReplace()); 1603 TermBr->replaceUsesOfWith(OldCond, Cond); 1604 } 1605 } 1606 1607 // If we get to here, we know that we can transform the setcc instruction to 1608 // use the post-incremented version of the IV, allowing us to coalesce the 1609 // live ranges for the IV correctly. 1610 CondUse->setOffset(SE.getMinusSCEV(CondUse->getOffset(), 1611 CondUse->getStride())); 1612 CondUse->setIsUseOfPostIncrementedValue(true); 1613 Changed = true; 1614 1615 PostIncs.insert(Cond); 1616 decline_post_inc:; 1617 } 1618 1619 // Determine an insertion point for the loop induction variable increment. It 1620 // must dominate all the post-inc comparisons we just set up, and it must 1621 // dominate the loop latch edge. 1622 IVIncInsertPos = L->getLoopLatch()->getTerminator(); 1623 for (SmallPtrSet<Instruction *, 4>::const_iterator I = PostIncs.begin(), 1624 E = PostIncs.end(); I != E; ++I) { 1625 BasicBlock *BB = 1626 DT.findNearestCommonDominator(IVIncInsertPos->getParent(), 1627 (*I)->getParent()); 1628 if (BB == (*I)->getParent()) 1629 IVIncInsertPos = *I; 1630 else if (BB != IVIncInsertPos->getParent()) 1631 IVIncInsertPos = BB->getTerminator(); 1632 } 1633 1634 return Changed; 1635} 1636 1637bool 1638LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, 1639 LSRUse::KindType Kind, const Type *AccessTy) { 1640 int64_t NewMinOffset = LU.MinOffset; 1641 int64_t NewMaxOffset = LU.MaxOffset; 1642 const Type *NewAccessTy = AccessTy; 1643 1644 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to 1645 // something conservative, however this can pessimize in the case that one of 1646 // the uses will have all its uses outside the loop, for example. 1647 if (LU.Kind != Kind) 1648 return false; 1649 // Conservatively assume HasBaseReg is true for now. 1650 if (NewOffset < LU.MinOffset) { 1651 if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, /*HasBaseReg=*/true, 1652 Kind, AccessTy, TLI)) 1653 return false; 1654 NewMinOffset = NewOffset; 1655 } else if (NewOffset > LU.MaxOffset) { 1656 if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, /*HasBaseReg=*/true, 1657 Kind, AccessTy, TLI)) 1658 return false; 1659 NewMaxOffset = NewOffset; 1660 } 1661 // Check for a mismatched access type, and fall back conservatively as needed. 1662 if (Kind == LSRUse::Address && AccessTy != LU.AccessTy) 1663 NewAccessTy = Type::getVoidTy(AccessTy->getContext()); 1664 1665 // Update the use. 1666 LU.MinOffset = NewMinOffset; 1667 LU.MaxOffset = NewMaxOffset; 1668 LU.AccessTy = NewAccessTy; 1669 if (NewOffset != LU.Offsets.back()) 1670 LU.Offsets.push_back(NewOffset); 1671 return true; 1672} 1673 1674/// getUse - Return an LSRUse index and an offset value for a fixup which 1675/// needs the given expression, with the given kind and optional access type. 1676/// Either reuse an existing use or create a new one, as needed. 1677std::pair<size_t, int64_t> 1678LSRInstance::getUse(const SCEV *&Expr, 1679 LSRUse::KindType Kind, const Type *AccessTy) { 1680 const SCEV *Copy = Expr; 1681 int64_t Offset = ExtractImmediate(Expr, SE); 1682 1683 // Basic uses can't accept any offset, for example. 1684 if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true, Kind, AccessTy, TLI)) { 1685 Expr = Copy; 1686 Offset = 0; 1687 } 1688 1689 std::pair<UseMapTy::iterator, bool> P = 1690 UseMap.insert(std::make_pair(Expr, 0)); 1691 if (!P.second) { 1692 // A use already existed with this base. 1693 size_t LUIdx = P.first->second; 1694 LSRUse &LU = Uses[LUIdx]; 1695 if (reconcileNewOffset(LU, Offset, Kind, AccessTy)) 1696 // Reuse this use. 1697 return std::make_pair(LUIdx, Offset); 1698 } 1699 1700 // Create a new use. 1701 size_t LUIdx = Uses.size(); 1702 P.first->second = LUIdx; 1703 Uses.push_back(LSRUse(Kind, AccessTy)); 1704 LSRUse &LU = Uses[LUIdx]; 1705 1706 // We don't need to track redundant offsets, but we don't need to go out 1707 // of our way here to avoid them. 1708 if (LU.Offsets.empty() || Offset != LU.Offsets.back()) 1709 LU.Offsets.push_back(Offset); 1710 1711 LU.MinOffset = Offset; 1712 LU.MaxOffset = Offset; 1713 return std::make_pair(LUIdx, Offset); 1714} 1715 1716void LSRInstance::CollectInterestingTypesAndFactors() { 1717 SmallSetVector<const SCEV *, 4> Strides; 1718 1719 // Collect interesting types and strides. 1720 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 1721 const SCEV *Stride = UI->getStride(); 1722 1723 // Collect interesting types. 1724 Types.insert(SE.getEffectiveSCEVType(Stride->getType())); 1725 1726 // Add the stride for this loop. 1727 Strides.insert(Stride); 1728 1729 // Add strides for other mentioned loops. 1730 for (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(UI->getOffset()); 1731 AR; AR = dyn_cast<SCEVAddRecExpr>(AR->getStart())) 1732 Strides.insert(AR->getStepRecurrence(SE)); 1733 } 1734 1735 // Compute interesting factors from the set of interesting strides. 1736 for (SmallSetVector<const SCEV *, 4>::const_iterator 1737 I = Strides.begin(), E = Strides.end(); I != E; ++I) 1738 for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter = 1739 next(I); NewStrideIter != E; ++NewStrideIter) { 1740 const SCEV *OldStride = *I; 1741 const SCEV *NewStride = *NewStrideIter; 1742 1743 if (SE.getTypeSizeInBits(OldStride->getType()) != 1744 SE.getTypeSizeInBits(NewStride->getType())) { 1745 if (SE.getTypeSizeInBits(OldStride->getType()) > 1746 SE.getTypeSizeInBits(NewStride->getType())) 1747 NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType()); 1748 else 1749 OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType()); 1750 } 1751 if (const SCEVConstant *Factor = 1752 dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride, 1753 SE, true))) { 1754 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 1755 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 1756 } else if (const SCEVConstant *Factor = 1757 dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride, 1758 NewStride, 1759 SE, true))) { 1760 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 1761 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 1762 } 1763 } 1764 1765 // If all uses use the same type, don't bother looking for truncation-based 1766 // reuse. 1767 if (Types.size() == 1) 1768 Types.clear(); 1769 1770 DEBUG(print_factors_and_types(dbgs())); 1771} 1772 1773void LSRInstance::CollectFixupsAndInitialFormulae() { 1774 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 1775 // Record the uses. 1776 LSRFixup &LF = getNewFixup(); 1777 LF.UserInst = UI->getUser(); 1778 LF.OperandValToReplace = UI->getOperandValToReplace(); 1779 if (UI->isUseOfPostIncrementedValue()) 1780 LF.PostIncLoop = L; 1781 1782 LSRUse::KindType Kind = LSRUse::Basic; 1783 const Type *AccessTy = 0; 1784 if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) { 1785 Kind = LSRUse::Address; 1786 AccessTy = getAccessType(LF.UserInst); 1787 } 1788 1789 const SCEV *S = IU.getCanonicalExpr(*UI); 1790 1791 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as 1792 // (N - i == 0), and this allows (N - i) to be the expression that we work 1793 // with rather than just N or i, so we can consider the register 1794 // requirements for both N and i at the same time. Limiting this code to 1795 // equality icmps is not a problem because all interesting loops use 1796 // equality icmps, thanks to IndVarSimplify. 1797 if (ICmpInst *CI = dyn_cast<ICmpInst>(LF.UserInst)) 1798 if (CI->isEquality()) { 1799 // Swap the operands if needed to put the OperandValToReplace on the 1800 // left, for consistency. 1801 Value *NV = CI->getOperand(1); 1802 if (NV == LF.OperandValToReplace) { 1803 CI->setOperand(1, CI->getOperand(0)); 1804 CI->setOperand(0, NV); 1805 } 1806 1807 // x == y --> x - y == 0 1808 const SCEV *N = SE.getSCEV(NV); 1809 if (N->isLoopInvariant(L)) { 1810 Kind = LSRUse::ICmpZero; 1811 S = SE.getMinusSCEV(N, S); 1812 } 1813 1814 // -1 and the negations of all interesting strides (except the negation 1815 // of -1) are now also interesting. 1816 for (size_t i = 0, e = Factors.size(); i != e; ++i) 1817 if (Factors[i] != -1) 1818 Factors.insert(-(uint64_t)Factors[i]); 1819 Factors.insert(-1); 1820 } 1821 1822 // Set up the initial formula for this use. 1823 std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy); 1824 LF.LUIdx = P.first; 1825 LF.Offset = P.second; 1826 LSRUse &LU = Uses[LF.LUIdx]; 1827 LU.AllFixupsOutsideLoop &= !L->contains(LF.UserInst); 1828 1829 // If this is the first use of this LSRUse, give it a formula. 1830 if (LU.Formulae.empty()) { 1831 InsertInitialFormula(S, LU, LF.LUIdx); 1832 CountRegisters(LU.Formulae.back(), LF.LUIdx); 1833 } 1834 } 1835 1836 DEBUG(print_fixups(dbgs())); 1837} 1838 1839void 1840LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { 1841 Formula F; 1842 F.InitialMatch(S, L, SE, DT); 1843 bool Inserted = InsertFormula(LU, LUIdx, F); 1844 assert(Inserted && "Initial formula already exists!"); (void)Inserted; 1845} 1846 1847void 1848LSRInstance::InsertSupplementalFormula(const SCEV *S, 1849 LSRUse &LU, size_t LUIdx) { 1850 Formula F; 1851 F.BaseRegs.push_back(S); 1852 F.AM.HasBaseReg = true; 1853 bool Inserted = InsertFormula(LU, LUIdx, F); 1854 assert(Inserted && "Supplemental formula already exists!"); (void)Inserted; 1855} 1856 1857/// CountRegisters - Note which registers are used by the given formula, 1858/// updating RegUses. 1859void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) { 1860 if (F.ScaledReg) 1861 RegUses.CountRegister(F.ScaledReg, LUIdx); 1862 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 1863 E = F.BaseRegs.end(); I != E; ++I) 1864 RegUses.CountRegister(*I, LUIdx); 1865} 1866 1867/// InsertFormula - If the given formula has not yet been inserted, add it to 1868/// the list, and return true. Return false otherwise. 1869bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) { 1870 if (!LU.InsertFormula(F)) 1871 return false; 1872 1873 CountRegisters(F, LUIdx); 1874 return true; 1875} 1876 1877/// CollectLoopInvariantFixupsAndFormulae - Check for other uses of 1878/// loop-invariant values which we're tracking. These other uses will pin these 1879/// values in registers, making them less profitable for elimination. 1880/// TODO: This currently misses non-constant addrec step registers. 1881/// TODO: Should this give more weight to users inside the loop? 1882void 1883LSRInstance::CollectLoopInvariantFixupsAndFormulae() { 1884 SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end()); 1885 SmallPtrSet<const SCEV *, 8> Inserted; 1886 1887 while (!Worklist.empty()) { 1888 const SCEV *S = Worklist.pop_back_val(); 1889 1890 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) 1891 Worklist.insert(Worklist.end(), N->op_begin(), N->op_end()); 1892 else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) 1893 Worklist.push_back(C->getOperand()); 1894 else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 1895 Worklist.push_back(D->getLHS()); 1896 Worklist.push_back(D->getRHS()); 1897 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 1898 if (!Inserted.insert(U)) continue; 1899 const Value *V = U->getValue(); 1900 if (const Instruction *Inst = dyn_cast<Instruction>(V)) 1901 if (L->contains(Inst)) continue; 1902 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end(); 1903 UI != UE; ++UI) { 1904 const Instruction *UserInst = dyn_cast<Instruction>(*UI); 1905 // Ignore non-instructions. 1906 if (!UserInst) 1907 continue; 1908 // Ignore instructions in other functions (as can happen with 1909 // Constants). 1910 if (UserInst->getParent()->getParent() != L->getHeader()->getParent()) 1911 continue; 1912 // Ignore instructions not dominated by the loop. 1913 const BasicBlock *UseBB = !isa<PHINode>(UserInst) ? 1914 UserInst->getParent() : 1915 cast<PHINode>(UserInst)->getIncomingBlock( 1916 PHINode::getIncomingValueNumForOperand(UI.getOperandNo())); 1917 if (!DT.dominates(L->getHeader(), UseBB)) 1918 continue; 1919 // Ignore uses which are part of other SCEV expressions, to avoid 1920 // analyzing them multiple times. 1921 if (SE.isSCEVable(UserInst->getType()) && 1922 !isa<SCEVUnknown>(SE.getSCEV(const_cast<Instruction *>(UserInst)))) 1923 continue; 1924 // Ignore icmp instructions which are already being analyzed. 1925 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) { 1926 unsigned OtherIdx = !UI.getOperandNo(); 1927 Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx)); 1928 if (SE.getSCEV(OtherOp)->hasComputableLoopEvolution(L)) 1929 continue; 1930 } 1931 1932 LSRFixup &LF = getNewFixup(); 1933 LF.UserInst = const_cast<Instruction *>(UserInst); 1934 LF.OperandValToReplace = UI.getUse(); 1935 std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, 0); 1936 LF.LUIdx = P.first; 1937 LF.Offset = P.second; 1938 LSRUse &LU = Uses[LF.LUIdx]; 1939 LU.AllFixupsOutsideLoop &= L->contains(LF.UserInst); 1940 InsertSupplementalFormula(U, LU, LF.LUIdx); 1941 CountRegisters(LU.Formulae.back(), Uses.size() - 1); 1942 break; 1943 } 1944 } 1945 } 1946} 1947 1948/// CollectSubexprs - Split S into subexpressions which can be pulled out into 1949/// separate registers. If C is non-null, multiply each subexpression by C. 1950static void CollectSubexprs(const SCEV *S, const SCEVConstant *C, 1951 SmallVectorImpl<const SCEV *> &Ops, 1952 ScalarEvolution &SE) { 1953 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 1954 // Break out add operands. 1955 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 1956 I != E; ++I) 1957 CollectSubexprs(*I, C, Ops, SE); 1958 return; 1959 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 1960 // Split a non-zero base out of an addrec. 1961 if (!AR->getStart()->isZero()) { 1962 CollectSubexprs(SE.getAddRecExpr(SE.getIntegerSCEV(0, AR->getType()), 1963 AR->getStepRecurrence(SE), 1964 AR->getLoop()), C, Ops, SE); 1965 CollectSubexprs(AR->getStart(), C, Ops, SE); 1966 return; 1967 } 1968 } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 1969 // Break (C * (a + b + c)) into C*a + C*b + C*c. 1970 if (Mul->getNumOperands() == 2) 1971 if (const SCEVConstant *Op0 = 1972 dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 1973 CollectSubexprs(Mul->getOperand(1), 1974 C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0, 1975 Ops, SE); 1976 return; 1977 } 1978 } 1979 1980 // Otherwise use the value itself. 1981 Ops.push_back(C ? SE.getMulExpr(C, S) : S); 1982} 1983 1984/// GenerateReassociations - Split out subexpressions from adds and the bases of 1985/// addrecs. 1986void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, 1987 Formula Base, 1988 unsigned Depth) { 1989 // Arbitrarily cap recursion to protect compile time. 1990 if (Depth >= 3) return; 1991 1992 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 1993 const SCEV *BaseReg = Base.BaseRegs[i]; 1994 1995 SmallVector<const SCEV *, 8> AddOps; 1996 CollectSubexprs(BaseReg, 0, AddOps, SE); 1997 if (AddOps.size() == 1) continue; 1998 1999 for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(), 2000 JE = AddOps.end(); J != JE; ++J) { 2001 // Don't pull a constant into a register if the constant could be folded 2002 // into an immediate field. 2003 if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset, 2004 Base.getNumRegs() > 1, 2005 LU.Kind, LU.AccessTy, TLI, SE)) 2006 continue; 2007 2008 // Collect all operands except *J. 2009 SmallVector<const SCEV *, 8> InnerAddOps; 2010 for (SmallVectorImpl<const SCEV *>::const_iterator K = AddOps.begin(), 2011 KE = AddOps.end(); K != KE; ++K) 2012 if (K != J) 2013 InnerAddOps.push_back(*K); 2014 2015 // Don't leave just a constant behind in a register if the constant could 2016 // be folded into an immediate field. 2017 if (InnerAddOps.size() == 1 && 2018 isAlwaysFoldable(InnerAddOps[0], LU.MinOffset, LU.MaxOffset, 2019 Base.getNumRegs() > 1, 2020 LU.Kind, LU.AccessTy, TLI, SE)) 2021 continue; 2022 2023 Formula F = Base; 2024 F.BaseRegs[i] = SE.getAddExpr(InnerAddOps); 2025 F.BaseRegs.push_back(*J); 2026 if (InsertFormula(LU, LUIdx, F)) 2027 // If that formula hadn't been seen before, recurse to find more like 2028 // it. 2029 GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth+1); 2030 } 2031 } 2032} 2033 2034/// GenerateCombinations - Generate a formula consisting of all of the 2035/// loop-dominating registers added into a single register. 2036void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, 2037 Formula Base) { 2038 // This method is only interesting on a plurality of registers. 2039 if (Base.BaseRegs.size() <= 1) return; 2040 2041 Formula F = Base; 2042 F.BaseRegs.clear(); 2043 SmallVector<const SCEV *, 4> Ops; 2044 for (SmallVectorImpl<const SCEV *>::const_iterator 2045 I = Base.BaseRegs.begin(), E = Base.BaseRegs.end(); I != E; ++I) { 2046 const SCEV *BaseReg = *I; 2047 if (BaseReg->properlyDominates(L->getHeader(), &DT) && 2048 !BaseReg->hasComputableLoopEvolution(L)) 2049 Ops.push_back(BaseReg); 2050 else 2051 F.BaseRegs.push_back(BaseReg); 2052 } 2053 if (Ops.size() > 1) { 2054 const SCEV *Sum = SE.getAddExpr(Ops); 2055 // TODO: If Sum is zero, it probably means ScalarEvolution missed an 2056 // opportunity to fold something. For now, just ignore such cases 2057 // rather than proceed with zero in a register. 2058 if (!Sum->isZero()) { 2059 F.BaseRegs.push_back(Sum); 2060 (void)InsertFormula(LU, LUIdx, F); 2061 } 2062 } 2063} 2064 2065/// GenerateSymbolicOffsets - Generate reuse formulae using symbolic offsets. 2066void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, 2067 Formula Base) { 2068 // We can't add a symbolic offset if the address already contains one. 2069 if (Base.AM.BaseGV) return; 2070 2071 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 2072 const SCEV *G = Base.BaseRegs[i]; 2073 GlobalValue *GV = ExtractSymbol(G, SE); 2074 if (G->isZero() || !GV) 2075 continue; 2076 Formula F = Base; 2077 F.AM.BaseGV = GV; 2078 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, 2079 LU.Kind, LU.AccessTy, TLI)) 2080 continue; 2081 F.BaseRegs[i] = G; 2082 (void)InsertFormula(LU, LUIdx, F); 2083 } 2084} 2085 2086/// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets. 2087void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, 2088 Formula Base) { 2089 // TODO: For now, just add the min and max offset, because it usually isn't 2090 // worthwhile looking at everything inbetween. 2091 SmallVector<int64_t, 4> Worklist; 2092 Worklist.push_back(LU.MinOffset); 2093 if (LU.MaxOffset != LU.MinOffset) 2094 Worklist.push_back(LU.MaxOffset); 2095 2096 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 2097 const SCEV *G = Base.BaseRegs[i]; 2098 2099 for (SmallVectorImpl<int64_t>::const_iterator I = Worklist.begin(), 2100 E = Worklist.end(); I != E; ++I) { 2101 Formula F = Base; 2102 F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I; 2103 if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I, 2104 LU.Kind, LU.AccessTy, TLI)) { 2105 F.BaseRegs[i] = SE.getAddExpr(G, SE.getIntegerSCEV(*I, G->getType())); 2106 2107 (void)InsertFormula(LU, LUIdx, F); 2108 } 2109 } 2110 2111 int64_t Imm = ExtractImmediate(G, SE); 2112 if (G->isZero() || Imm == 0) 2113 continue; 2114 Formula F = Base; 2115 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Imm; 2116 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, 2117 LU.Kind, LU.AccessTy, TLI)) 2118 continue; 2119 F.BaseRegs[i] = G; 2120 (void)InsertFormula(LU, LUIdx, F); 2121 } 2122} 2123 2124/// GenerateICmpZeroScales - For ICmpZero, check to see if we can scale up 2125/// the comparison. For example, x == y -> x*c == y*c. 2126void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, 2127 Formula Base) { 2128 if (LU.Kind != LSRUse::ICmpZero) return; 2129 2130 // Determine the integer type for the base formula. 2131 const Type *IntTy = Base.getType(); 2132 if (!IntTy) return; 2133 if (SE.getTypeSizeInBits(IntTy) > 64) return; 2134 2135 // Don't do this if there is more than one offset. 2136 if (LU.MinOffset != LU.MaxOffset) return; 2137 2138 assert(!Base.AM.BaseGV && "ICmpZero use is not legal!"); 2139 2140 // Check each interesting stride. 2141 for (SmallSetVector<int64_t, 8>::const_iterator 2142 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 2143 int64_t Factor = *I; 2144 Formula F = Base; 2145 2146 // Check that the multiplication doesn't overflow. 2147 if (F.AM.BaseOffs == INT64_MIN && Factor == -1) 2148 continue; 2149 F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs * Factor; 2150 if (F.AM.BaseOffs / Factor != Base.AM.BaseOffs) 2151 continue; 2152 2153 // Check that multiplying with the use offset doesn't overflow. 2154 int64_t Offset = LU.MinOffset; 2155 if (Offset == INT64_MIN && Factor == -1) 2156 continue; 2157 Offset = (uint64_t)Offset * Factor; 2158 if (Offset / Factor != LU.MinOffset) 2159 continue; 2160 2161 // Check that this scale is legal. 2162 if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, TLI)) 2163 continue; 2164 2165 // Compensate for the use having MinOffset built into it. 2166 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Offset - LU.MinOffset; 2167 2168 const SCEV *FactorS = SE.getIntegerSCEV(Factor, IntTy); 2169 2170 // Check that multiplying with each base register doesn't overflow. 2171 for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) { 2172 F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS); 2173 if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i]) 2174 goto next; 2175 } 2176 2177 // Check that multiplying with the scaled register doesn't overflow. 2178 if (F.ScaledReg) { 2179 F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS); 2180 if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg) 2181 continue; 2182 } 2183 2184 // If we make it here and it's legal, add it. 2185 (void)InsertFormula(LU, LUIdx, F); 2186 next:; 2187 } 2188} 2189 2190/// GenerateScales - Generate stride factor reuse formulae by making use of 2191/// scaled-offset address modes, for example. 2192void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, 2193 Formula Base) { 2194 // Determine the integer type for the base formula. 2195 const Type *IntTy = Base.getType(); 2196 if (!IntTy) return; 2197 2198 // If this Formula already has a scaled register, we can't add another one. 2199 if (Base.AM.Scale != 0) return; 2200 2201 // Check each interesting stride. 2202 for (SmallSetVector<int64_t, 8>::const_iterator 2203 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 2204 int64_t Factor = *I; 2205 2206 Base.AM.Scale = Factor; 2207 Base.AM.HasBaseReg = Base.BaseRegs.size() > 1; 2208 // Check whether this scale is going to be legal. 2209 if (!isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, 2210 LU.Kind, LU.AccessTy, TLI)) { 2211 // As a special-case, handle special out-of-loop Basic users specially. 2212 // TODO: Reconsider this special case. 2213 if (LU.Kind == LSRUse::Basic && 2214 isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, 2215 LSRUse::Special, LU.AccessTy, TLI) && 2216 LU.AllFixupsOutsideLoop) 2217 LU.Kind = LSRUse::Special; 2218 else 2219 continue; 2220 } 2221 // For an ICmpZero, negating a solitary base register won't lead to 2222 // new solutions. 2223 if (LU.Kind == LSRUse::ICmpZero && 2224 !Base.AM.HasBaseReg && Base.AM.BaseOffs == 0 && !Base.AM.BaseGV) 2225 continue; 2226 // For each addrec base reg, apply the scale, if possible. 2227 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 2228 if (const SCEVAddRecExpr *AR = 2229 dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i])) { 2230 const SCEV *FactorS = SE.getIntegerSCEV(Factor, IntTy); 2231 if (FactorS->isZero()) 2232 continue; 2233 // Divide out the factor, ignoring high bits, since we'll be 2234 // scaling the value back up in the end. 2235 if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) { 2236 // TODO: This could be optimized to avoid all the copying. 2237 Formula F = Base; 2238 F.ScaledReg = Quotient; 2239 std::swap(F.BaseRegs[i], F.BaseRegs.back()); 2240 F.BaseRegs.pop_back(); 2241 (void)InsertFormula(LU, LUIdx, F); 2242 } 2243 } 2244 } 2245} 2246 2247/// GenerateTruncates - Generate reuse formulae from different IV types. 2248void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, 2249 Formula Base) { 2250 // This requires TargetLowering to tell us which truncates are free. 2251 if (!TLI) return; 2252 2253 // Don't bother truncating symbolic values. 2254 if (Base.AM.BaseGV) return; 2255 2256 // Determine the integer type for the base formula. 2257 const Type *DstTy = Base.getType(); 2258 if (!DstTy) return; 2259 DstTy = SE.getEffectiveSCEVType(DstTy); 2260 2261 for (SmallSetVector<const Type *, 4>::const_iterator 2262 I = Types.begin(), E = Types.end(); I != E; ++I) { 2263 const Type *SrcTy = *I; 2264 if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) { 2265 Formula F = Base; 2266 2267 if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I); 2268 for (SmallVectorImpl<const SCEV *>::iterator J = F.BaseRegs.begin(), 2269 JE = F.BaseRegs.end(); J != JE; ++J) 2270 *J = SE.getAnyExtendExpr(*J, SrcTy); 2271 2272 // TODO: This assumes we've done basic processing on all uses and 2273 // have an idea what the register usage is. 2274 if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses)) 2275 continue; 2276 2277 (void)InsertFormula(LU, LUIdx, F); 2278 } 2279 } 2280} 2281 2282namespace { 2283 2284/// WorkItem - Helper class for GenerateCrossUseConstantOffsets. It's used to 2285/// defer modifications so that the search phase doesn't have to worry about 2286/// the data structures moving underneath it. 2287struct WorkItem { 2288 size_t LUIdx; 2289 int64_t Imm; 2290 const SCEV *OrigReg; 2291 2292 WorkItem(size_t LI, int64_t I, const SCEV *R) 2293 : LUIdx(LI), Imm(I), OrigReg(R) {} 2294 2295 void print(raw_ostream &OS) const; 2296 void dump() const; 2297}; 2298 2299} 2300 2301void WorkItem::print(raw_ostream &OS) const { 2302 OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx 2303 << " , add offset " << Imm; 2304} 2305 2306void WorkItem::dump() const { 2307 print(errs()); errs() << '\n'; 2308} 2309 2310/// GenerateCrossUseConstantOffsets - Look for registers which are a constant 2311/// distance apart and try to form reuse opportunities between them. 2312void LSRInstance::GenerateCrossUseConstantOffsets() { 2313 // Group the registers by their value without any added constant offset. 2314 typedef std::map<int64_t, const SCEV *> ImmMapTy; 2315 typedef DenseMap<const SCEV *, ImmMapTy> RegMapTy; 2316 RegMapTy Map; 2317 DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap; 2318 SmallVector<const SCEV *, 8> Sequence; 2319 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 2320 I != E; ++I) { 2321 const SCEV *Reg = *I; 2322 int64_t Imm = ExtractImmediate(Reg, SE); 2323 std::pair<RegMapTy::iterator, bool> Pair = 2324 Map.insert(std::make_pair(Reg, ImmMapTy())); 2325 if (Pair.second) 2326 Sequence.push_back(Reg); 2327 Pair.first->second.insert(std::make_pair(Imm, *I)); 2328 UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(*I); 2329 } 2330 2331 // Now examine each set of registers with the same base value. Build up 2332 // a list of work to do and do the work in a separate step so that we're 2333 // not adding formulae and register counts while we're searching. 2334 SmallVector<WorkItem, 32> WorkItems; 2335 SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems; 2336 for (SmallVectorImpl<const SCEV *>::const_iterator I = Sequence.begin(), 2337 E = Sequence.end(); I != E; ++I) { 2338 const SCEV *Reg = *I; 2339 const ImmMapTy &Imms = Map.find(Reg)->second; 2340 2341 // It's not worthwhile looking for reuse if there's only one offset. 2342 if (Imms.size() == 1) 2343 continue; 2344 2345 DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':'; 2346 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 2347 J != JE; ++J) 2348 dbgs() << ' ' << J->first; 2349 dbgs() << '\n'); 2350 2351 // Examine each offset. 2352 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 2353 J != JE; ++J) { 2354 const SCEV *OrigReg = J->second; 2355 2356 int64_t JImm = J->first; 2357 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg); 2358 2359 if (!isa<SCEVConstant>(OrigReg) && 2360 UsedByIndicesMap[Reg].count() == 1) { 2361 DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg << '\n'); 2362 continue; 2363 } 2364 2365 // Conservatively examine offsets between this orig reg a few selected 2366 // other orig regs. 2367 ImmMapTy::const_iterator OtherImms[] = { 2368 Imms.begin(), prior(Imms.end()), 2369 Imms.upper_bound((Imms.begin()->first + prior(Imms.end())->first) / 2) 2370 }; 2371 for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { 2372 ImmMapTy::const_iterator M = OtherImms[i]; 2373 if (M == J || M == JE) continue; 2374 2375 // Compute the difference between the two. 2376 int64_t Imm = (uint64_t)JImm - M->first; 2377 for (int LUIdx = UsedByIndices.find_first(); LUIdx != -1; 2378 LUIdx = UsedByIndices.find_next(LUIdx)) 2379 // Make a memo of this use, offset, and register tuple. 2380 if (UniqueItems.insert(std::make_pair(LUIdx, Imm))) 2381 WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg)); 2382 } 2383 } 2384 } 2385 2386 Map.clear(); 2387 Sequence.clear(); 2388 UsedByIndicesMap.clear(); 2389 UniqueItems.clear(); 2390 2391 // Now iterate through the worklist and add new formulae. 2392 for (SmallVectorImpl<WorkItem>::const_iterator I = WorkItems.begin(), 2393 E = WorkItems.end(); I != E; ++I) { 2394 const WorkItem &WI = *I; 2395 size_t LUIdx = WI.LUIdx; 2396 LSRUse &LU = Uses[LUIdx]; 2397 int64_t Imm = WI.Imm; 2398 const SCEV *OrigReg = WI.OrigReg; 2399 2400 const Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType()); 2401 const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm)); 2402 unsigned BitWidth = SE.getTypeSizeInBits(IntTy); 2403 2404 // TODO: Use a more targeted data structure. 2405 for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) { 2406 Formula F = LU.Formulae[L]; 2407 // Use the immediate in the scaled register. 2408 if (F.ScaledReg == OrigReg) { 2409 int64_t Offs = (uint64_t)F.AM.BaseOffs + 2410 Imm * (uint64_t)F.AM.Scale; 2411 // Don't create 50 + reg(-50). 2412 if (F.referencesReg(SE.getSCEV( 2413 ConstantInt::get(IntTy, -(uint64_t)Offs)))) 2414 continue; 2415 Formula NewF = F; 2416 NewF.AM.BaseOffs = Offs; 2417 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, 2418 LU.Kind, LU.AccessTy, TLI)) 2419 continue; 2420 NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg); 2421 2422 // If the new scale is a constant in a register, and adding the constant 2423 // value to the immediate would produce a value closer to zero than the 2424 // immediate itself, then the formula isn't worthwhile. 2425 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg)) 2426 if (C->getValue()->getValue().isNegative() != 2427 (NewF.AM.BaseOffs < 0) && 2428 (C->getValue()->getValue().abs() * APInt(BitWidth, F.AM.Scale)) 2429 .ule(APInt(BitWidth, NewF.AM.BaseOffs).abs())) 2430 continue; 2431 2432 // OK, looks good. 2433 (void)InsertFormula(LU, LUIdx, NewF); 2434 } else { 2435 // Use the immediate in a base register. 2436 for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) { 2437 const SCEV *BaseReg = F.BaseRegs[N]; 2438 if (BaseReg != OrigReg) 2439 continue; 2440 Formula NewF = F; 2441 NewF.AM.BaseOffs = (uint64_t)NewF.AM.BaseOffs + Imm; 2442 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, 2443 LU.Kind, LU.AccessTy, TLI)) 2444 continue; 2445 NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg); 2446 2447 // If the new formula has a constant in a register, and adding the 2448 // constant value to the immediate would produce a value closer to 2449 // zero than the immediate itself, then the formula isn't worthwhile. 2450 for (SmallVectorImpl<const SCEV *>::const_iterator 2451 J = NewF.BaseRegs.begin(), JE = NewF.BaseRegs.end(); 2452 J != JE; ++J) 2453 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*J)) 2454 if (C->getValue()->getValue().isNegative() != 2455 (NewF.AM.BaseOffs < 0) && 2456 C->getValue()->getValue().abs() 2457 .ule(APInt(BitWidth, NewF.AM.BaseOffs).abs())) 2458 goto skip_formula; 2459 2460 // Ok, looks good. 2461 (void)InsertFormula(LU, LUIdx, NewF); 2462 break; 2463 skip_formula:; 2464 } 2465 } 2466 } 2467 } 2468} 2469 2470/// GenerateAllReuseFormulae - Generate formulae for each use. 2471void 2472LSRInstance::GenerateAllReuseFormulae() { 2473 // This is split into multiple loops so that hasRegsUsedByUsesOtherThan 2474 // queries are more precise. 2475 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2476 LSRUse &LU = Uses[LUIdx]; 2477 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2478 GenerateReassociations(LU, LUIdx, LU.Formulae[i]); 2479 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2480 GenerateCombinations(LU, LUIdx, LU.Formulae[i]); 2481 } 2482 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2483 LSRUse &LU = Uses[LUIdx]; 2484 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2485 GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]); 2486 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2487 GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]); 2488 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2489 GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]); 2490 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2491 GenerateScales(LU, LUIdx, LU.Formulae[i]); 2492 } 2493 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2494 LSRUse &LU = Uses[LUIdx]; 2495 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2496 GenerateTruncates(LU, LUIdx, LU.Formulae[i]); 2497 } 2498 2499 GenerateCrossUseConstantOffsets(); 2500} 2501 2502/// If their are multiple formulae with the same set of registers used 2503/// by other uses, pick the best one and delete the others. 2504void LSRInstance::FilterOutUndesirableDedicatedRegisters() { 2505#ifndef NDEBUG 2506 bool Changed = false; 2507#endif 2508 2509 // Collect the best formula for each unique set of shared registers. This 2510 // is reset for each use. 2511 typedef DenseMap<SmallVector<const SCEV *, 2>, size_t, UniquifierDenseMapInfo> 2512 BestFormulaeTy; 2513 BestFormulaeTy BestFormulae; 2514 2515 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2516 LSRUse &LU = Uses[LUIdx]; 2517 FormulaSorter Sorter(L, LU, SE, DT); 2518 2519 // Clear out the set of used regs; it will be recomputed. 2520 LU.Regs.clear(); 2521 2522 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); 2523 FIdx != NumForms; ++FIdx) { 2524 Formula &F = LU.Formulae[FIdx]; 2525 2526 SmallVector<const SCEV *, 2> Key; 2527 for (SmallVectorImpl<const SCEV *>::const_iterator J = F.BaseRegs.begin(), 2528 JE = F.BaseRegs.end(); J != JE; ++J) { 2529 const SCEV *Reg = *J; 2530 if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) 2531 Key.push_back(Reg); 2532 } 2533 if (F.ScaledReg && 2534 RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) 2535 Key.push_back(F.ScaledReg); 2536 // Unstable sort by host order ok, because this is only used for 2537 // uniquifying. 2538 std::sort(Key.begin(), Key.end()); 2539 2540 std::pair<BestFormulaeTy::const_iterator, bool> P = 2541 BestFormulae.insert(std::make_pair(Key, FIdx)); 2542 if (!P.second) { 2543 Formula &Best = LU.Formulae[P.first->second]; 2544 if (Sorter.operator()(F, Best)) 2545 std::swap(F, Best); 2546 DEBUG(dbgs() << "Filtering out "; F.print(dbgs()); 2547 dbgs() << "\n" 2548 " in favor of "; Best.print(dbgs()); 2549 dbgs() << '\n'); 2550#ifndef NDEBUG 2551 Changed = true; 2552#endif 2553 std::swap(F, LU.Formulae.back()); 2554 LU.Formulae.pop_back(); 2555 --FIdx; 2556 --NumForms; 2557 continue; 2558 } 2559 if (F.ScaledReg) LU.Regs.insert(F.ScaledReg); 2560 LU.Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 2561 } 2562 BestFormulae.clear(); 2563 } 2564 2565 DEBUG(if (Changed) { 2566 dbgs() << "\n" 2567 "After filtering out undesirable candidates:\n"; 2568 print_uses(dbgs()); 2569 }); 2570} 2571 2572/// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of 2573/// formulae to choose from, use some rough heuristics to prune down the number 2574/// of formulae. This keeps the main solver from taking an extraordinary amount 2575/// of time in some worst-case scenarios. 2576void LSRInstance::NarrowSearchSpaceUsingHeuristics() { 2577 // This is a rough guess that seems to work fairly well. 2578 const size_t Limit = UINT16_MAX; 2579 2580 SmallPtrSet<const SCEV *, 4> Taken; 2581 for (;;) { 2582 // Estimate the worst-case number of solutions we might consider. We almost 2583 // never consider this many solutions because we prune the search space, 2584 // but the pruning isn't always sufficient. 2585 uint32_t Power = 1; 2586 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 2587 E = Uses.end(); I != E; ++I) { 2588 size_t FSize = I->Formulae.size(); 2589 if (FSize >= Limit) { 2590 Power = Limit; 2591 break; 2592 } 2593 Power *= FSize; 2594 if (Power >= Limit) 2595 break; 2596 } 2597 if (Power < Limit) 2598 break; 2599 2600 // Ok, we have too many of formulae on our hands to conveniently handle. 2601 // Use a rough heuristic to thin out the list. 2602 2603 // Pick the register which is used by the most LSRUses, which is likely 2604 // to be a good reuse register candidate. 2605 const SCEV *Best = 0; 2606 unsigned BestNum = 0; 2607 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 2608 I != E; ++I) { 2609 const SCEV *Reg = *I; 2610 if (Taken.count(Reg)) 2611 continue; 2612 if (!Best) 2613 Best = Reg; 2614 else { 2615 unsigned Count = RegUses.getUsedByIndices(Reg).count(); 2616 if (Count > BestNum) { 2617 Best = Reg; 2618 BestNum = Count; 2619 } 2620 } 2621 } 2622 2623 DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best 2624 << " will yield profitable reuse.\n"); 2625 Taken.insert(Best); 2626 2627 // In any use with formulae which references this register, delete formulae 2628 // which don't reference it. 2629 for (SmallVectorImpl<LSRUse>::iterator I = Uses.begin(), 2630 E = Uses.end(); I != E; ++I) { 2631 LSRUse &LU = *I; 2632 if (!LU.Regs.count(Best)) continue; 2633 2634 // Clear out the set of used regs; it will be recomputed. 2635 LU.Regs.clear(); 2636 2637 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 2638 Formula &F = LU.Formulae[i]; 2639 if (!F.referencesReg(Best)) { 2640 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 2641 std::swap(LU.Formulae.back(), F); 2642 LU.Formulae.pop_back(); 2643 --e; 2644 --i; 2645 continue; 2646 } 2647 2648 if (F.ScaledReg) LU.Regs.insert(F.ScaledReg); 2649 LU.Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 2650 } 2651 } 2652 2653 DEBUG(dbgs() << "After pre-selection:\n"; 2654 print_uses(dbgs())); 2655 } 2656} 2657 2658/// SolveRecurse - This is the recursive solver. 2659void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 2660 Cost &SolutionCost, 2661 SmallVectorImpl<const Formula *> &Workspace, 2662 const Cost &CurCost, 2663 const SmallPtrSet<const SCEV *, 16> &CurRegs, 2664 DenseSet<const SCEV *> &VisitedRegs) const { 2665 // Some ideas: 2666 // - prune more: 2667 // - use more aggressive filtering 2668 // - sort the formula so that the most profitable solutions are found first 2669 // - sort the uses too 2670 // - search faster: 2671 // - don't compute a cost, and then compare. compare while computing a cost 2672 // and bail early. 2673 // - track register sets with SmallBitVector 2674 2675 const LSRUse &LU = Uses[Workspace.size()]; 2676 2677 // If this use references any register that's already a part of the 2678 // in-progress solution, consider it a requirement that a formula must 2679 // reference that register in order to be considered. This prunes out 2680 // unprofitable searching. 2681 SmallSetVector<const SCEV *, 4> ReqRegs; 2682 for (SmallPtrSet<const SCEV *, 16>::const_iterator I = CurRegs.begin(), 2683 E = CurRegs.end(); I != E; ++I) 2684 if (LU.Regs.count(*I)) 2685 ReqRegs.insert(*I); 2686 2687 bool AnySatisfiedReqRegs = false; 2688 SmallPtrSet<const SCEV *, 16> NewRegs; 2689 Cost NewCost; 2690retry: 2691 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 2692 E = LU.Formulae.end(); I != E; ++I) { 2693 const Formula &F = *I; 2694 2695 // Ignore formulae which do not use any of the required registers. 2696 for (SmallSetVector<const SCEV *, 4>::const_iterator J = ReqRegs.begin(), 2697 JE = ReqRegs.end(); J != JE; ++J) { 2698 const SCEV *Reg = *J; 2699 if ((!F.ScaledReg || F.ScaledReg != Reg) && 2700 std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) == 2701 F.BaseRegs.end()) 2702 goto skip; 2703 } 2704 AnySatisfiedReqRegs = true; 2705 2706 // Evaluate the cost of the current formula. If it's already worse than 2707 // the current best, prune the search at that point. 2708 NewCost = CurCost; 2709 NewRegs = CurRegs; 2710 NewCost.RateFormula(F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT); 2711 if (NewCost < SolutionCost) { 2712 Workspace.push_back(&F); 2713 if (Workspace.size() != Uses.size()) { 2714 SolveRecurse(Solution, SolutionCost, Workspace, NewCost, 2715 NewRegs, VisitedRegs); 2716 if (F.getNumRegs() == 1 && Workspace.size() == 1) 2717 VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]); 2718 } else { 2719 DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); 2720 dbgs() << ". Regs:"; 2721 for (SmallPtrSet<const SCEV *, 16>::const_iterator 2722 I = NewRegs.begin(), E = NewRegs.end(); I != E; ++I) 2723 dbgs() << ' ' << **I; 2724 dbgs() << '\n'); 2725 2726 SolutionCost = NewCost; 2727 Solution = Workspace; 2728 } 2729 Workspace.pop_back(); 2730 } 2731 skip:; 2732 } 2733 2734 // If none of the formulae had all of the required registers, relax the 2735 // constraint so that we don't exclude all formulae. 2736 if (!AnySatisfiedReqRegs) { 2737 ReqRegs.clear(); 2738 goto retry; 2739 } 2740} 2741 2742void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const { 2743 SmallVector<const Formula *, 8> Workspace; 2744 Cost SolutionCost; 2745 SolutionCost.Loose(); 2746 Cost CurCost; 2747 SmallPtrSet<const SCEV *, 16> CurRegs; 2748 DenseSet<const SCEV *> VisitedRegs; 2749 Workspace.reserve(Uses.size()); 2750 2751 SolveRecurse(Solution, SolutionCost, Workspace, CurCost, 2752 CurRegs, VisitedRegs); 2753 2754 // Ok, we've now made all our decisions. 2755 DEBUG(dbgs() << "\n" 2756 "The chosen solution requires "; SolutionCost.print(dbgs()); 2757 dbgs() << ":\n"; 2758 for (size_t i = 0, e = Uses.size(); i != e; ++i) { 2759 dbgs() << " "; 2760 Uses[i].print(dbgs()); 2761 dbgs() << "\n" 2762 " "; 2763 Solution[i]->print(dbgs()); 2764 dbgs() << '\n'; 2765 }); 2766} 2767 2768/// getImmediateDominator - A handy utility for the specific DominatorTree 2769/// query that we need here. 2770/// 2771static BasicBlock *getImmediateDominator(BasicBlock *BB, DominatorTree &DT) { 2772 DomTreeNode *Node = DT.getNode(BB); 2773 if (!Node) return 0; 2774 Node = Node->getIDom(); 2775 if (!Node) return 0; 2776 return Node->getBlock(); 2777} 2778 2779Value *LSRInstance::Expand(const LSRFixup &LF, 2780 const Formula &F, 2781 BasicBlock::iterator IP, 2782 SCEVExpander &Rewriter, 2783 SmallVectorImpl<WeakVH> &DeadInsts) const { 2784 const LSRUse &LU = Uses[LF.LUIdx]; 2785 2786 // Then, collect some instructions which we will remain dominated by when 2787 // expanding the replacement. These must be dominated by any operands that 2788 // will be required in the expansion. 2789 SmallVector<Instruction *, 4> Inputs; 2790 if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace)) 2791 Inputs.push_back(I); 2792 if (LU.Kind == LSRUse::ICmpZero) 2793 if (Instruction *I = 2794 dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1))) 2795 Inputs.push_back(I); 2796 if (LF.PostIncLoop) { 2797 if (!L->contains(LF.UserInst)) 2798 Inputs.push_back(L->getLoopLatch()->getTerminator()); 2799 else 2800 Inputs.push_back(IVIncInsertPos); 2801 } 2802 2803 // Then, climb up the immediate dominator tree as far as we can go while 2804 // still being dominated by the input positions. 2805 for (;;) { 2806 bool AllDominate = true; 2807 Instruction *BetterPos = 0; 2808 BasicBlock *IDom = getImmediateDominator(IP->getParent(), DT); 2809 if (!IDom) break; 2810 Instruction *Tentative = IDom->getTerminator(); 2811 for (SmallVectorImpl<Instruction *>::const_iterator I = Inputs.begin(), 2812 E = Inputs.end(); I != E; ++I) { 2813 Instruction *Inst = *I; 2814 if (Inst == Tentative || !DT.dominates(Inst, Tentative)) { 2815 AllDominate = false; 2816 break; 2817 } 2818 if (IDom == Inst->getParent() && 2819 (!BetterPos || DT.dominates(BetterPos, Inst))) 2820 BetterPos = next(BasicBlock::iterator(Inst)); 2821 } 2822 if (!AllDominate) 2823 break; 2824 if (BetterPos) 2825 IP = BetterPos; 2826 else 2827 IP = Tentative; 2828 } 2829 while (isa<PHINode>(IP)) ++IP; 2830 while (isa<DbgInfoIntrinsic>(IP)) ++IP; 2831 2832 // Inform the Rewriter if we have a post-increment use, so that it can 2833 // perform an advantageous expansion. 2834 Rewriter.setPostInc(LF.PostIncLoop); 2835 2836 // This is the type that the user actually needs. 2837 const Type *OpTy = LF.OperandValToReplace->getType(); 2838 // This will be the type that we'll initially expand to. 2839 const Type *Ty = F.getType(); 2840 if (!Ty) 2841 // No type known; just expand directly to the ultimate type. 2842 Ty = OpTy; 2843 else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy)) 2844 // Expand directly to the ultimate type if it's the right size. 2845 Ty = OpTy; 2846 // This is the type to do integer arithmetic in. 2847 const Type *IntTy = SE.getEffectiveSCEVType(Ty); 2848 2849 // Build up a list of operands to add together to form the full base. 2850 SmallVector<const SCEV *, 8> Ops; 2851 2852 // Expand the BaseRegs portion. 2853 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 2854 E = F.BaseRegs.end(); I != E; ++I) { 2855 const SCEV *Reg = *I; 2856 assert(!Reg->isZero() && "Zero allocated in a base register!"); 2857 2858 // If we're expanding for a post-inc user for the add-rec's loop, make the 2859 // post-inc adjustment. 2860 const SCEV *Start = Reg; 2861 while (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Start)) { 2862 if (AR->getLoop() == LF.PostIncLoop) { 2863 Reg = SE.getAddExpr(Reg, AR->getStepRecurrence(SE)); 2864 // If the user is inside the loop, insert the code after the increment 2865 // so that it is dominated by its operand. If the original insert point 2866 // was already dominated by the increment, keep it, because there may 2867 // be loop-variant operands that need to be respected also. 2868 if (L->contains(LF.UserInst) && !DT.dominates(IVIncInsertPos, IP)) { 2869 IP = IVIncInsertPos; 2870 while (isa<DbgInfoIntrinsic>(IP)) ++IP; 2871 } 2872 break; 2873 } 2874 Start = AR->getStart(); 2875 } 2876 2877 Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP))); 2878 } 2879 2880 // Flush the operand list to suppress SCEVExpander hoisting. 2881 if (!Ops.empty()) { 2882 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 2883 Ops.clear(); 2884 Ops.push_back(SE.getUnknown(FullV)); 2885 } 2886 2887 // Expand the ScaledReg portion. 2888 Value *ICmpScaledV = 0; 2889 if (F.AM.Scale != 0) { 2890 const SCEV *ScaledS = F.ScaledReg; 2891 2892 // If we're expanding for a post-inc user for the add-rec's loop, make the 2893 // post-inc adjustment. 2894 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ScaledS)) 2895 if (AR->getLoop() == LF.PostIncLoop) 2896 ScaledS = SE.getAddExpr(ScaledS, AR->getStepRecurrence(SE)); 2897 2898 if (LU.Kind == LSRUse::ICmpZero) { 2899 // An interesting way of "folding" with an icmp is to use a negated 2900 // scale, which we'll implement by inserting it into the other operand 2901 // of the icmp. 2902 assert(F.AM.Scale == -1 && 2903 "The only scale supported by ICmpZero uses is -1!"); 2904 ICmpScaledV = Rewriter.expandCodeFor(ScaledS, 0, IP); 2905 } else { 2906 // Otherwise just expand the scaled register and an explicit scale, 2907 // which is expected to be matched as part of the address. 2908 ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP)); 2909 ScaledS = SE.getMulExpr(ScaledS, 2910 SE.getIntegerSCEV(F.AM.Scale, 2911 ScaledS->getType())); 2912 Ops.push_back(ScaledS); 2913 2914 // Flush the operand list to suppress SCEVExpander hoisting. 2915 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 2916 Ops.clear(); 2917 Ops.push_back(SE.getUnknown(FullV)); 2918 } 2919 } 2920 2921 // Expand the GV portion. 2922 if (F.AM.BaseGV) { 2923 Ops.push_back(SE.getUnknown(F.AM.BaseGV)); 2924 2925 // Flush the operand list to suppress SCEVExpander hoisting. 2926 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 2927 Ops.clear(); 2928 Ops.push_back(SE.getUnknown(FullV)); 2929 } 2930 2931 // Expand the immediate portion. 2932 int64_t Offset = (uint64_t)F.AM.BaseOffs + LF.Offset; 2933 if (Offset != 0) { 2934 if (LU.Kind == LSRUse::ICmpZero) { 2935 // The other interesting way of "folding" with an ICmpZero is to use a 2936 // negated immediate. 2937 if (!ICmpScaledV) 2938 ICmpScaledV = ConstantInt::get(IntTy, -Offset); 2939 else { 2940 Ops.push_back(SE.getUnknown(ICmpScaledV)); 2941 ICmpScaledV = ConstantInt::get(IntTy, Offset); 2942 } 2943 } else { 2944 // Just add the immediate values. These again are expected to be matched 2945 // as part of the address. 2946 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset))); 2947 } 2948 } 2949 2950 // Emit instructions summing all the operands. 2951 const SCEV *FullS = Ops.empty() ? 2952 SE.getIntegerSCEV(0, IntTy) : 2953 SE.getAddExpr(Ops); 2954 Value *FullV = Rewriter.expandCodeFor(FullS, Ty, IP); 2955 2956 // We're done expanding now, so reset the rewriter. 2957 Rewriter.setPostInc(0); 2958 2959 // An ICmpZero Formula represents an ICmp which we're handling as a 2960 // comparison against zero. Now that we've expanded an expression for that 2961 // form, update the ICmp's other operand. 2962 if (LU.Kind == LSRUse::ICmpZero) { 2963 ICmpInst *CI = cast<ICmpInst>(LF.UserInst); 2964 DeadInsts.push_back(CI->getOperand(1)); 2965 assert(!F.AM.BaseGV && "ICmp does not support folding a global value and " 2966 "a scale at the same time!"); 2967 if (F.AM.Scale == -1) { 2968 if (ICmpScaledV->getType() != OpTy) { 2969 Instruction *Cast = 2970 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false, 2971 OpTy, false), 2972 ICmpScaledV, OpTy, "tmp", CI); 2973 ICmpScaledV = Cast; 2974 } 2975 CI->setOperand(1, ICmpScaledV); 2976 } else { 2977 assert(F.AM.Scale == 0 && 2978 "ICmp does not support folding a global value and " 2979 "a scale at the same time!"); 2980 Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy), 2981 -(uint64_t)Offset); 2982 if (C->getType() != OpTy) 2983 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 2984 OpTy, false), 2985 C, OpTy); 2986 2987 CI->setOperand(1, C); 2988 } 2989 } 2990 2991 return FullV; 2992} 2993 2994/// RewriteForPHI - Helper for Rewrite. PHI nodes are special because the use 2995/// of their operands effectively happens in their predecessor blocks, so the 2996/// expression may need to be expanded in multiple places. 2997void LSRInstance::RewriteForPHI(PHINode *PN, 2998 const LSRFixup &LF, 2999 const Formula &F, 3000 SCEVExpander &Rewriter, 3001 SmallVectorImpl<WeakVH> &DeadInsts, 3002 Pass *P) const { 3003 DenseMap<BasicBlock *, Value *> Inserted; 3004 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 3005 if (PN->getIncomingValue(i) == LF.OperandValToReplace) { 3006 BasicBlock *BB = PN->getIncomingBlock(i); 3007 3008 // If this is a critical edge, split the edge so that we do not insert 3009 // the code on all predecessor/successor paths. We do this unless this 3010 // is the canonical backedge for this loop, which complicates post-inc 3011 // users. 3012 if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 && 3013 !isa<IndirectBrInst>(BB->getTerminator()) && 3014 (PN->getParent() != L->getHeader() || !L->contains(BB))) { 3015 // Split the critical edge. 3016 BasicBlock *NewBB = SplitCriticalEdge(BB, PN->getParent(), P); 3017 3018 // If PN is outside of the loop and BB is in the loop, we want to 3019 // move the block to be immediately before the PHI block, not 3020 // immediately after BB. 3021 if (L->contains(BB) && !L->contains(PN)) 3022 NewBB->moveBefore(PN->getParent()); 3023 3024 // Splitting the edge can reduce the number of PHI entries we have. 3025 e = PN->getNumIncomingValues(); 3026 BB = NewBB; 3027 i = PN->getBasicBlockIndex(BB); 3028 } 3029 3030 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair = 3031 Inserted.insert(std::make_pair(BB, static_cast<Value *>(0))); 3032 if (!Pair.second) 3033 PN->setIncomingValue(i, Pair.first->second); 3034 else { 3035 Value *FullV = Expand(LF, F, BB->getTerminator(), Rewriter, DeadInsts); 3036 3037 // If this is reuse-by-noop-cast, insert the noop cast. 3038 const Type *OpTy = LF.OperandValToReplace->getType(); 3039 if (FullV->getType() != OpTy) 3040 FullV = 3041 CastInst::Create(CastInst::getCastOpcode(FullV, false, 3042 OpTy, false), 3043 FullV, LF.OperandValToReplace->getType(), 3044 "tmp", BB->getTerminator()); 3045 3046 PN->setIncomingValue(i, FullV); 3047 Pair.first->second = FullV; 3048 } 3049 } 3050} 3051 3052/// Rewrite - Emit instructions for the leading candidate expression for this 3053/// LSRUse (this is called "expanding"), and update the UserInst to reference 3054/// the newly expanded value. 3055void LSRInstance::Rewrite(const LSRFixup &LF, 3056 const Formula &F, 3057 SCEVExpander &Rewriter, 3058 SmallVectorImpl<WeakVH> &DeadInsts, 3059 Pass *P) const { 3060 // First, find an insertion point that dominates UserInst. For PHI nodes, 3061 // find the nearest block which dominates all the relevant uses. 3062 if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) { 3063 RewriteForPHI(PN, LF, F, Rewriter, DeadInsts, P); 3064 } else { 3065 Value *FullV = Expand(LF, F, LF.UserInst, Rewriter, DeadInsts); 3066 3067 // If this is reuse-by-noop-cast, insert the noop cast. 3068 const Type *OpTy = LF.OperandValToReplace->getType(); 3069 if (FullV->getType() != OpTy) { 3070 Instruction *Cast = 3071 CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false), 3072 FullV, OpTy, "tmp", LF.UserInst); 3073 FullV = Cast; 3074 } 3075 3076 // Update the user. ICmpZero is handled specially here (for now) because 3077 // Expand may have updated one of the operands of the icmp already, and 3078 // its new value may happen to be equal to LF.OperandValToReplace, in 3079 // which case doing replaceUsesOfWith leads to replacing both operands 3080 // with the same value. TODO: Reorganize this. 3081 if (Uses[LF.LUIdx].Kind == LSRUse::ICmpZero) 3082 LF.UserInst->setOperand(0, FullV); 3083 else 3084 LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV); 3085 } 3086 3087 DeadInsts.push_back(LF.OperandValToReplace); 3088} 3089 3090void 3091LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 3092 Pass *P) { 3093 // Keep track of instructions we may have made dead, so that 3094 // we can remove them after we are done working. 3095 SmallVector<WeakVH, 16> DeadInsts; 3096 3097 SCEVExpander Rewriter(SE); 3098 Rewriter.disableCanonicalMode(); 3099 Rewriter.setIVIncInsertPos(L, IVIncInsertPos); 3100 3101 // Expand the new value definitions and update the users. 3102 for (size_t i = 0, e = Fixups.size(); i != e; ++i) { 3103 size_t LUIdx = Fixups[i].LUIdx; 3104 3105 Rewrite(Fixups[i], *Solution[LUIdx], Rewriter, DeadInsts, P); 3106 3107 Changed = true; 3108 } 3109 3110 // Clean up after ourselves. This must be done before deleting any 3111 // instructions. 3112 Rewriter.clear(); 3113 3114 Changed |= DeleteTriviallyDeadInstructions(DeadInsts); 3115} 3116 3117LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P) 3118 : IU(P->getAnalysis<IVUsers>()), 3119 SE(P->getAnalysis<ScalarEvolution>()), 3120 DT(P->getAnalysis<DominatorTree>()), 3121 TLI(tli), L(l), Changed(false), IVIncInsertPos(0) { 3122 3123 // If LoopSimplify form is not available, stay out of trouble. 3124 if (!L->isLoopSimplifyForm()) return; 3125 3126 // If there's no interesting work to be done, bail early. 3127 if (IU.empty()) return; 3128 3129 DEBUG(dbgs() << "\nLSR on loop "; 3130 WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false); 3131 dbgs() << ":\n"); 3132 3133 /// OptimizeShadowIV - If IV is used in a int-to-float cast 3134 /// inside the loop then try to eliminate the cast operation. 3135 OptimizeShadowIV(); 3136 3137 // Change loop terminating condition to use the postinc iv when possible. 3138 Changed |= OptimizeLoopTermCond(); 3139 3140 CollectInterestingTypesAndFactors(); 3141 CollectFixupsAndInitialFormulae(); 3142 CollectLoopInvariantFixupsAndFormulae(); 3143 3144 DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n"; 3145 print_uses(dbgs())); 3146 3147 // Now use the reuse data to generate a bunch of interesting ways 3148 // to formulate the values needed for the uses. 3149 GenerateAllReuseFormulae(); 3150 3151 DEBUG(dbgs() << "\n" 3152 "After generating reuse formulae:\n"; 3153 print_uses(dbgs())); 3154 3155 FilterOutUndesirableDedicatedRegisters(); 3156 NarrowSearchSpaceUsingHeuristics(); 3157 3158 SmallVector<const Formula *, 8> Solution; 3159 Solve(Solution); 3160 assert(Solution.size() == Uses.size() && "Malformed solution!"); 3161 3162 // Release memory that is no longer needed. 3163 Factors.clear(); 3164 Types.clear(); 3165 RegUses.clear(); 3166 3167#ifndef NDEBUG 3168 // Formulae should be legal. 3169 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 3170 E = Uses.end(); I != E; ++I) { 3171 const LSRUse &LU = *I; 3172 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 3173 JE = LU.Formulae.end(); J != JE; ++J) 3174 assert(isLegalUse(J->AM, LU.MinOffset, LU.MaxOffset, 3175 LU.Kind, LU.AccessTy, TLI) && 3176 "Illegal formula generated!"); 3177 }; 3178#endif 3179 3180 // Now that we've decided what we want, make it so. 3181 ImplementSolution(Solution, P); 3182} 3183 3184void LSRInstance::print_factors_and_types(raw_ostream &OS) const { 3185 if (Factors.empty() && Types.empty()) return; 3186 3187 OS << "LSR has identified the following interesting factors and types: "; 3188 bool First = true; 3189 3190 for (SmallSetVector<int64_t, 8>::const_iterator 3191 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 3192 if (!First) OS << ", "; 3193 First = false; 3194 OS << '*' << *I; 3195 } 3196 3197 for (SmallSetVector<const Type *, 4>::const_iterator 3198 I = Types.begin(), E = Types.end(); I != E; ++I) { 3199 if (!First) OS << ", "; 3200 First = false; 3201 OS << '(' << **I << ')'; 3202 } 3203 OS << '\n'; 3204} 3205 3206void LSRInstance::print_fixups(raw_ostream &OS) const { 3207 OS << "LSR is examining the following fixup sites:\n"; 3208 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(), 3209 E = Fixups.end(); I != E; ++I) { 3210 const LSRFixup &LF = *I; 3211 dbgs() << " "; 3212 LF.print(OS); 3213 OS << '\n'; 3214 } 3215} 3216 3217void LSRInstance::print_uses(raw_ostream &OS) const { 3218 OS << "LSR is examining the following uses:\n"; 3219 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 3220 E = Uses.end(); I != E; ++I) { 3221 const LSRUse &LU = *I; 3222 dbgs() << " "; 3223 LU.print(OS); 3224 OS << '\n'; 3225 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 3226 JE = LU.Formulae.end(); J != JE; ++J) { 3227 OS << " "; 3228 J->print(OS); 3229 OS << '\n'; 3230 } 3231 } 3232} 3233 3234void LSRInstance::print(raw_ostream &OS) const { 3235 print_factors_and_types(OS); 3236 print_fixups(OS); 3237 print_uses(OS); 3238} 3239 3240void LSRInstance::dump() const { 3241 print(errs()); errs() << '\n'; 3242} 3243 3244namespace { 3245 3246class LoopStrengthReduce : public LoopPass { 3247 /// TLI - Keep a pointer of a TargetLowering to consult for determining 3248 /// transformation profitability. 3249 const TargetLowering *const TLI; 3250 3251public: 3252 static char ID; // Pass ID, replacement for typeid 3253 explicit LoopStrengthReduce(const TargetLowering *tli = 0); 3254 3255private: 3256 bool runOnLoop(Loop *L, LPPassManager &LPM); 3257 void getAnalysisUsage(AnalysisUsage &AU) const; 3258}; 3259 3260} 3261 3262char LoopStrengthReduce::ID = 0; 3263static RegisterPass<LoopStrengthReduce> 3264X("loop-reduce", "Loop Strength Reduction"); 3265 3266Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) { 3267 return new LoopStrengthReduce(TLI); 3268} 3269 3270LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli) 3271 : LoopPass(&ID), TLI(tli) {} 3272 3273void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { 3274 // We split critical edges, so we change the CFG. However, we do update 3275 // many analyses if they are around. 3276 AU.addPreservedID(LoopSimplifyID); 3277 AU.addPreserved<LoopInfo>(); 3278 AU.addPreserved("domfrontier"); 3279 3280 AU.addRequiredID(LoopSimplifyID); 3281 AU.addRequired<DominatorTree>(); 3282 AU.addPreserved<DominatorTree>(); 3283 AU.addRequired<ScalarEvolution>(); 3284 AU.addPreserved<ScalarEvolution>(); 3285 AU.addRequired<IVUsers>(); 3286 AU.addPreserved<IVUsers>(); 3287} 3288 3289bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { 3290 bool Changed = false; 3291 3292 // Run the main LSR transformation. 3293 Changed |= LSRInstance(TLI, L, this).getChanged(); 3294 3295 // At this point, it is worth checking to see if any recurrence PHIs are also 3296 // dead, so that we can remove them as well. 3297 Changed |= DeleteDeadPHIs(L->getHeader()); 3298 3299 return Changed; 3300} 3301