LoopStrengthReduce.cpp revision 206083
1//===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation analyzes and transforms the induction variables (and 11// computations derived from them) into forms suitable for efficient execution 12// on the target. 13// 14// This pass performs a strength reduction on array references inside loops that 15// have as one or more of their components the loop induction variable, it 16// rewrites expressions to take advantage of scaled-index addressing modes 17// available on the target, and it performs a variety of other optimizations 18// related to loop induction variables. 19// 20// Terminology note: this code has a lot of handling for "post-increment" or 21// "post-inc" users. This is not talking about post-increment addressing modes; 22// it is instead talking about code like this: 23// 24// %i = phi [ 0, %entry ], [ %i.next, %latch ] 25// ... 26// %i.next = add %i, 1 27// %c = icmp eq %i.next, %n 28// 29// The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however 30// it's useful to think about these as the same register, with some uses using 31// the value of the register before the add and some using // it after. In this 32// example, the icmp is a post-increment user, since it uses %i.next, which is 33// the value of the induction variable after the increment. The other common 34// case of post-increment users is users outside the loop. 35// 36// TODO: More sophistication in the way Formulae are generated and filtered. 37// 38// TODO: Handle multiple loops at a time. 39// 40// TODO: Should TargetLowering::AddrMode::BaseGV be changed to a ConstantExpr 41// instead of a GlobalValue? 42// 43// TODO: When truncation is free, truncate ICmp users' operands to make it a 44// smaller encoding (on x86 at least). 45// 46// TODO: When a negated register is used by an add (such as in a list of 47// multiple base registers, or as the increment expression in an addrec), 48// we may not actually need both reg and (-1 * reg) in registers; the 49// negation can be implemented by using a sub instead of an add. The 50// lack of support for taking this into consideration when making 51// register pressure decisions is partly worked around by the "Special" 52// use kind. 53// 54//===----------------------------------------------------------------------===// 55 56#define DEBUG_TYPE "loop-reduce" 57#include "llvm/Transforms/Scalar.h" 58#include "llvm/Constants.h" 59#include "llvm/Instructions.h" 60#include "llvm/IntrinsicInst.h" 61#include "llvm/DerivedTypes.h" 62#include "llvm/Analysis/IVUsers.h" 63#include "llvm/Analysis/Dominators.h" 64#include "llvm/Analysis/LoopPass.h" 65#include "llvm/Analysis/ScalarEvolutionExpander.h" 66#include "llvm/Transforms/Utils/BasicBlockUtils.h" 67#include "llvm/Transforms/Utils/Local.h" 68#include "llvm/ADT/SmallBitVector.h" 69#include "llvm/ADT/SetVector.h" 70#include "llvm/ADT/DenseSet.h" 71#include "llvm/Support/Debug.h" 72#include "llvm/Support/ValueHandle.h" 73#include "llvm/Support/raw_ostream.h" 74#include "llvm/Target/TargetLowering.h" 75#include <algorithm> 76using namespace llvm; 77 78namespace { 79 80/// RegSortData - This class holds data which is used to order reuse candidates. 81class RegSortData { 82public: 83 /// UsedByIndices - This represents the set of LSRUse indices which reference 84 /// a particular register. 85 SmallBitVector UsedByIndices; 86 87 RegSortData() {} 88 89 void print(raw_ostream &OS) const; 90 void dump() const; 91}; 92 93} 94 95void RegSortData::print(raw_ostream &OS) const { 96 OS << "[NumUses=" << UsedByIndices.count() << ']'; 97} 98 99void RegSortData::dump() const { 100 print(errs()); errs() << '\n'; 101} 102 103namespace { 104 105/// RegUseTracker - Map register candidates to information about how they are 106/// used. 107class RegUseTracker { 108 typedef DenseMap<const SCEV *, RegSortData> RegUsesTy; 109 110 RegUsesTy RegUses; 111 SmallVector<const SCEV *, 16> RegSequence; 112 113public: 114 void CountRegister(const SCEV *Reg, size_t LUIdx); 115 116 bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const; 117 118 const SmallBitVector &getUsedByIndices(const SCEV *Reg) const; 119 120 void clear(); 121 122 typedef SmallVectorImpl<const SCEV *>::iterator iterator; 123 typedef SmallVectorImpl<const SCEV *>::const_iterator const_iterator; 124 iterator begin() { return RegSequence.begin(); } 125 iterator end() { return RegSequence.end(); } 126 const_iterator begin() const { return RegSequence.begin(); } 127 const_iterator end() const { return RegSequence.end(); } 128}; 129 130} 131 132void 133RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) { 134 std::pair<RegUsesTy::iterator, bool> Pair = 135 RegUses.insert(std::make_pair(Reg, RegSortData())); 136 RegSortData &RSD = Pair.first->second; 137 if (Pair.second) 138 RegSequence.push_back(Reg); 139 RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1)); 140 RSD.UsedByIndices.set(LUIdx); 141} 142 143bool 144RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const { 145 if (!RegUses.count(Reg)) return false; 146 const SmallBitVector &UsedByIndices = 147 RegUses.find(Reg)->second.UsedByIndices; 148 int i = UsedByIndices.find_first(); 149 if (i == -1) return false; 150 if ((size_t)i != LUIdx) return true; 151 return UsedByIndices.find_next(i) != -1; 152} 153 154const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const { 155 RegUsesTy::const_iterator I = RegUses.find(Reg); 156 assert(I != RegUses.end() && "Unknown register!"); 157 return I->second.UsedByIndices; 158} 159 160void RegUseTracker::clear() { 161 RegUses.clear(); 162 RegSequence.clear(); 163} 164 165namespace { 166 167/// Formula - This class holds information that describes a formula for 168/// computing satisfying a use. It may include broken-out immediates and scaled 169/// registers. 170struct Formula { 171 /// AM - This is used to represent complex addressing, as well as other kinds 172 /// of interesting uses. 173 TargetLowering::AddrMode AM; 174 175 /// BaseRegs - The list of "base" registers for this use. When this is 176 /// non-empty, AM.HasBaseReg should be set to true. 177 SmallVector<const SCEV *, 2> BaseRegs; 178 179 /// ScaledReg - The 'scaled' register for this use. This should be non-null 180 /// when AM.Scale is not zero. 181 const SCEV *ScaledReg; 182 183 Formula() : ScaledReg(0) {} 184 185 void InitialMatch(const SCEV *S, Loop *L, 186 ScalarEvolution &SE, DominatorTree &DT); 187 188 unsigned getNumRegs() const; 189 const Type *getType() const; 190 191 bool referencesReg(const SCEV *S) const; 192 bool hasRegsUsedByUsesOtherThan(size_t LUIdx, 193 const RegUseTracker &RegUses) const; 194 195 void print(raw_ostream &OS) const; 196 void dump() const; 197}; 198 199} 200 201/// DoInitialMatch - Recursion helper for InitialMatch. 202static void DoInitialMatch(const SCEV *S, Loop *L, 203 SmallVectorImpl<const SCEV *> &Good, 204 SmallVectorImpl<const SCEV *> &Bad, 205 ScalarEvolution &SE, DominatorTree &DT) { 206 // Collect expressions which properly dominate the loop header. 207 if (S->properlyDominates(L->getHeader(), &DT)) { 208 Good.push_back(S); 209 return; 210 } 211 212 // Look at add operands. 213 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 214 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 215 I != E; ++I) 216 DoInitialMatch(*I, L, Good, Bad, SE, DT); 217 return; 218 } 219 220 // Look at addrec operands. 221 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 222 if (!AR->getStart()->isZero()) { 223 DoInitialMatch(AR->getStart(), L, Good, Bad, SE, DT); 224 DoInitialMatch(SE.getAddRecExpr(SE.getIntegerSCEV(0, AR->getType()), 225 AR->getStepRecurrence(SE), 226 AR->getLoop()), 227 L, Good, Bad, SE, DT); 228 return; 229 } 230 231 // Handle a multiplication by -1 (negation) if it didn't fold. 232 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) 233 if (Mul->getOperand(0)->isAllOnesValue()) { 234 SmallVector<const SCEV *, 4> Ops(Mul->op_begin()+1, Mul->op_end()); 235 const SCEV *NewMul = SE.getMulExpr(Ops); 236 237 SmallVector<const SCEV *, 4> MyGood; 238 SmallVector<const SCEV *, 4> MyBad; 239 DoInitialMatch(NewMul, L, MyGood, MyBad, SE, DT); 240 const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue( 241 SE.getEffectiveSCEVType(NewMul->getType()))); 242 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyGood.begin(), 243 E = MyGood.end(); I != E; ++I) 244 Good.push_back(SE.getMulExpr(NegOne, *I)); 245 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyBad.begin(), 246 E = MyBad.end(); I != E; ++I) 247 Bad.push_back(SE.getMulExpr(NegOne, *I)); 248 return; 249 } 250 251 // Ok, we can't do anything interesting. Just stuff the whole thing into a 252 // register and hope for the best. 253 Bad.push_back(S); 254} 255 256/// InitialMatch - Incorporate loop-variant parts of S into this Formula, 257/// attempting to keep all loop-invariant and loop-computable values in a 258/// single base register. 259void Formula::InitialMatch(const SCEV *S, Loop *L, 260 ScalarEvolution &SE, DominatorTree &DT) { 261 SmallVector<const SCEV *, 4> Good; 262 SmallVector<const SCEV *, 4> Bad; 263 DoInitialMatch(S, L, Good, Bad, SE, DT); 264 if (!Good.empty()) { 265 BaseRegs.push_back(SE.getAddExpr(Good)); 266 AM.HasBaseReg = true; 267 } 268 if (!Bad.empty()) { 269 BaseRegs.push_back(SE.getAddExpr(Bad)); 270 AM.HasBaseReg = true; 271 } 272} 273 274/// getNumRegs - Return the total number of register operands used by this 275/// formula. This does not include register uses implied by non-constant 276/// addrec strides. 277unsigned Formula::getNumRegs() const { 278 return !!ScaledReg + BaseRegs.size(); 279} 280 281/// getType - Return the type of this formula, if it has one, or null 282/// otherwise. This type is meaningless except for the bit size. 283const Type *Formula::getType() const { 284 return !BaseRegs.empty() ? BaseRegs.front()->getType() : 285 ScaledReg ? ScaledReg->getType() : 286 AM.BaseGV ? AM.BaseGV->getType() : 287 0; 288} 289 290/// referencesReg - Test if this formula references the given register. 291bool Formula::referencesReg(const SCEV *S) const { 292 return S == ScaledReg || 293 std::find(BaseRegs.begin(), BaseRegs.end(), S) != BaseRegs.end(); 294} 295 296/// hasRegsUsedByUsesOtherThan - Test whether this formula uses registers 297/// which are used by uses other than the use with the given index. 298bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx, 299 const RegUseTracker &RegUses) const { 300 if (ScaledReg) 301 if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx)) 302 return true; 303 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 304 E = BaseRegs.end(); I != E; ++I) 305 if (RegUses.isRegUsedByUsesOtherThan(*I, LUIdx)) 306 return true; 307 return false; 308} 309 310void Formula::print(raw_ostream &OS) const { 311 bool First = true; 312 if (AM.BaseGV) { 313 if (!First) OS << " + "; else First = false; 314 WriteAsOperand(OS, AM.BaseGV, /*PrintType=*/false); 315 } 316 if (AM.BaseOffs != 0) { 317 if (!First) OS << " + "; else First = false; 318 OS << AM.BaseOffs; 319 } 320 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 321 E = BaseRegs.end(); I != E; ++I) { 322 if (!First) OS << " + "; else First = false; 323 OS << "reg(" << **I << ')'; 324 } 325 if (AM.Scale != 0) { 326 if (!First) OS << " + "; else First = false; 327 OS << AM.Scale << "*reg("; 328 if (ScaledReg) 329 OS << *ScaledReg; 330 else 331 OS << "<unknown>"; 332 OS << ')'; 333 } 334} 335 336void Formula::dump() const { 337 print(errs()); errs() << '\n'; 338} 339 340/// isAddRecSExtable - Return true if the given addrec can be sign-extended 341/// without changing its value. 342static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { 343 const Type *WideTy = 344 IntegerType::get(SE.getContext(), 345 SE.getTypeSizeInBits(AR->getType()) + 1); 346 return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); 347} 348 349/// isAddSExtable - Return true if the given add can be sign-extended 350/// without changing its value. 351static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) { 352 const Type *WideTy = 353 IntegerType::get(SE.getContext(), 354 SE.getTypeSizeInBits(A->getType()) + 1); 355 return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy)); 356} 357 358/// isMulSExtable - Return true if the given add can be sign-extended 359/// without changing its value. 360static bool isMulSExtable(const SCEVMulExpr *A, ScalarEvolution &SE) { 361 const Type *WideTy = 362 IntegerType::get(SE.getContext(), 363 SE.getTypeSizeInBits(A->getType()) + 1); 364 return isa<SCEVMulExpr>(SE.getSignExtendExpr(A, WideTy)); 365} 366 367/// getExactSDiv - Return an expression for LHS /s RHS, if it can be determined 368/// and if the remainder is known to be zero, or null otherwise. If 369/// IgnoreSignificantBits is true, expressions like (X * Y) /s Y are simplified 370/// to Y, ignoring that the multiplication may overflow, which is useful when 371/// the result will be used in a context where the most significant bits are 372/// ignored. 373static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, 374 ScalarEvolution &SE, 375 bool IgnoreSignificantBits = false) { 376 // Handle the trivial case, which works for any SCEV type. 377 if (LHS == RHS) 378 return SE.getIntegerSCEV(1, LHS->getType()); 379 380 // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do some 381 // folding. 382 if (RHS->isAllOnesValue()) 383 return SE.getMulExpr(LHS, RHS); 384 385 // Check for a division of a constant by a constant. 386 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) { 387 const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS); 388 if (!RC) 389 return 0; 390 if (C->getValue()->getValue().srem(RC->getValue()->getValue()) != 0) 391 return 0; 392 return SE.getConstant(C->getValue()->getValue() 393 .sdiv(RC->getValue()->getValue())); 394 } 395 396 // Distribute the sdiv over addrec operands, if the addrec doesn't overflow. 397 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) { 398 if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) { 399 const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE, 400 IgnoreSignificantBits); 401 if (!Start) return 0; 402 const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE, 403 IgnoreSignificantBits); 404 if (!Step) return 0; 405 return SE.getAddRecExpr(Start, Step, AR->getLoop()); 406 } 407 } 408 409 // Distribute the sdiv over add operands, if the add doesn't overflow. 410 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) { 411 if (IgnoreSignificantBits || isAddSExtable(Add, SE)) { 412 SmallVector<const SCEV *, 8> Ops; 413 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 414 I != E; ++I) { 415 const SCEV *Op = getExactSDiv(*I, RHS, SE, 416 IgnoreSignificantBits); 417 if (!Op) return 0; 418 Ops.push_back(Op); 419 } 420 return SE.getAddExpr(Ops); 421 } 422 } 423 424 // Check for a multiply operand that we can pull RHS out of. 425 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) 426 if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) { 427 SmallVector<const SCEV *, 4> Ops; 428 bool Found = false; 429 for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end(); 430 I != E; ++I) { 431 if (!Found) 432 if (const SCEV *Q = getExactSDiv(*I, RHS, SE, 433 IgnoreSignificantBits)) { 434 Ops.push_back(Q); 435 Found = true; 436 continue; 437 } 438 Ops.push_back(*I); 439 } 440 return Found ? SE.getMulExpr(Ops) : 0; 441 } 442 443 // Otherwise we don't know. 444 return 0; 445} 446 447/// ExtractImmediate - If S involves the addition of a constant integer value, 448/// return that integer value, and mutate S to point to a new SCEV with that 449/// value excluded. 450static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) { 451 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 452 if (C->getValue()->getValue().getMinSignedBits() <= 64) { 453 S = SE.getIntegerSCEV(0, C->getType()); 454 return C->getValue()->getSExtValue(); 455 } 456 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 457 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 458 int64_t Result = ExtractImmediate(NewOps.front(), SE); 459 S = SE.getAddExpr(NewOps); 460 return Result; 461 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 462 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 463 int64_t Result = ExtractImmediate(NewOps.front(), SE); 464 S = SE.getAddRecExpr(NewOps, AR->getLoop()); 465 return Result; 466 } 467 return 0; 468} 469 470/// ExtractSymbol - If S involves the addition of a GlobalValue address, 471/// return that symbol, and mutate S to point to a new SCEV with that 472/// value excluded. 473static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { 474 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 475 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) { 476 S = SE.getIntegerSCEV(0, GV->getType()); 477 return GV; 478 } 479 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 480 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 481 GlobalValue *Result = ExtractSymbol(NewOps.back(), SE); 482 S = SE.getAddExpr(NewOps); 483 return Result; 484 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 485 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 486 GlobalValue *Result = ExtractSymbol(NewOps.front(), SE); 487 S = SE.getAddRecExpr(NewOps, AR->getLoop()); 488 return Result; 489 } 490 return 0; 491} 492 493/// isAddressUse - Returns true if the specified instruction is using the 494/// specified value as an address. 495static bool isAddressUse(Instruction *Inst, Value *OperandVal) { 496 bool isAddress = isa<LoadInst>(Inst); 497 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 498 if (SI->getOperand(1) == OperandVal) 499 isAddress = true; 500 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 501 // Addressing modes can also be folded into prefetches and a variety 502 // of intrinsics. 503 switch (II->getIntrinsicID()) { 504 default: break; 505 case Intrinsic::prefetch: 506 case Intrinsic::x86_sse2_loadu_dq: 507 case Intrinsic::x86_sse2_loadu_pd: 508 case Intrinsic::x86_sse_loadu_ps: 509 case Intrinsic::x86_sse_storeu_ps: 510 case Intrinsic::x86_sse2_storeu_pd: 511 case Intrinsic::x86_sse2_storeu_dq: 512 case Intrinsic::x86_sse2_storel_dq: 513 if (II->getOperand(1) == OperandVal) 514 isAddress = true; 515 break; 516 } 517 } 518 return isAddress; 519} 520 521/// getAccessType - Return the type of the memory being accessed. 522static const Type *getAccessType(const Instruction *Inst) { 523 const Type *AccessTy = Inst->getType(); 524 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) 525 AccessTy = SI->getOperand(0)->getType(); 526 else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 527 // Addressing modes can also be folded into prefetches and a variety 528 // of intrinsics. 529 switch (II->getIntrinsicID()) { 530 default: break; 531 case Intrinsic::x86_sse_storeu_ps: 532 case Intrinsic::x86_sse2_storeu_pd: 533 case Intrinsic::x86_sse2_storeu_dq: 534 case Intrinsic::x86_sse2_storel_dq: 535 AccessTy = II->getOperand(1)->getType(); 536 break; 537 } 538 } 539 540 // All pointers have the same requirements, so canonicalize them to an 541 // arbitrary pointer type to minimize variation. 542 if (const PointerType *PTy = dyn_cast<PointerType>(AccessTy)) 543 AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1), 544 PTy->getAddressSpace()); 545 546 return AccessTy; 547} 548 549/// DeleteTriviallyDeadInstructions - If any of the instructions is the 550/// specified set are trivially dead, delete them and see if this makes any of 551/// their operands subsequently dead. 552static bool 553DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) { 554 bool Changed = false; 555 556 while (!DeadInsts.empty()) { 557 Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()); 558 559 if (I == 0 || !isInstructionTriviallyDead(I)) 560 continue; 561 562 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) 563 if (Instruction *U = dyn_cast<Instruction>(*OI)) { 564 *OI = 0; 565 if (U->use_empty()) 566 DeadInsts.push_back(U); 567 } 568 569 I->eraseFromParent(); 570 Changed = true; 571 } 572 573 return Changed; 574} 575 576namespace { 577 578/// Cost - This class is used to measure and compare candidate formulae. 579class Cost { 580 /// TODO: Some of these could be merged. Also, a lexical ordering 581 /// isn't always optimal. 582 unsigned NumRegs; 583 unsigned AddRecCost; 584 unsigned NumIVMuls; 585 unsigned NumBaseAdds; 586 unsigned ImmCost; 587 unsigned SetupCost; 588 589public: 590 Cost() 591 : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0), 592 SetupCost(0) {} 593 594 unsigned getNumRegs() const { return NumRegs; } 595 596 bool operator<(const Cost &Other) const; 597 598 void Loose(); 599 600 void RateFormula(const Formula &F, 601 SmallPtrSet<const SCEV *, 16> &Regs, 602 const DenseSet<const SCEV *> &VisitedRegs, 603 const Loop *L, 604 const SmallVectorImpl<int64_t> &Offsets, 605 ScalarEvolution &SE, DominatorTree &DT); 606 607 void print(raw_ostream &OS) const; 608 void dump() const; 609 610private: 611 void RateRegister(const SCEV *Reg, 612 SmallPtrSet<const SCEV *, 16> &Regs, 613 const Loop *L, 614 ScalarEvolution &SE, DominatorTree &DT); 615 void RatePrimaryRegister(const SCEV *Reg, 616 SmallPtrSet<const SCEV *, 16> &Regs, 617 const Loop *L, 618 ScalarEvolution &SE, DominatorTree &DT); 619}; 620 621} 622 623/// RateRegister - Tally up interesting quantities from the given register. 624void Cost::RateRegister(const SCEV *Reg, 625 SmallPtrSet<const SCEV *, 16> &Regs, 626 const Loop *L, 627 ScalarEvolution &SE, DominatorTree &DT) { 628 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) { 629 if (AR->getLoop() == L) 630 AddRecCost += 1; /// TODO: This should be a function of the stride. 631 632 // If this is an addrec for a loop that's already been visited by LSR, 633 // don't second-guess its addrec phi nodes. LSR isn't currently smart 634 // enough to reason about more than one loop at a time. Consider these 635 // registers free and leave them alone. 636 else if (L->contains(AR->getLoop()) || 637 (!AR->getLoop()->contains(L) && 638 DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))) { 639 for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin(); 640 PHINode *PN = dyn_cast<PHINode>(I); ++I) 641 if (SE.isSCEVable(PN->getType()) && 642 (SE.getEffectiveSCEVType(PN->getType()) == 643 SE.getEffectiveSCEVType(AR->getType())) && 644 SE.getSCEV(PN) == AR) 645 return; 646 647 // If this isn't one of the addrecs that the loop already has, it 648 // would require a costly new phi and add. TODO: This isn't 649 // precisely modeled right now. 650 ++NumBaseAdds; 651 if (!Regs.count(AR->getStart())) 652 RateRegister(AR->getStart(), Regs, L, SE, DT); 653 } 654 655 // Add the step value register, if it needs one. 656 // TODO: The non-affine case isn't precisely modeled here. 657 if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) 658 if (!Regs.count(AR->getStart())) 659 RateRegister(AR->getOperand(1), Regs, L, SE, DT); 660 } 661 ++NumRegs; 662 663 // Rough heuristic; favor registers which don't require extra setup 664 // instructions in the preheader. 665 if (!isa<SCEVUnknown>(Reg) && 666 !isa<SCEVConstant>(Reg) && 667 !(isa<SCEVAddRecExpr>(Reg) && 668 (isa<SCEVUnknown>(cast<SCEVAddRecExpr>(Reg)->getStart()) || 669 isa<SCEVConstant>(cast<SCEVAddRecExpr>(Reg)->getStart())))) 670 ++SetupCost; 671} 672 673/// RatePrimaryRegister - Record this register in the set. If we haven't seen it 674/// before, rate it. 675void Cost::RatePrimaryRegister(const SCEV *Reg, 676 SmallPtrSet<const SCEV *, 16> &Regs, 677 const Loop *L, 678 ScalarEvolution &SE, DominatorTree &DT) { 679 if (Regs.insert(Reg)) 680 RateRegister(Reg, Regs, L, SE, DT); 681} 682 683void Cost::RateFormula(const Formula &F, 684 SmallPtrSet<const SCEV *, 16> &Regs, 685 const DenseSet<const SCEV *> &VisitedRegs, 686 const Loop *L, 687 const SmallVectorImpl<int64_t> &Offsets, 688 ScalarEvolution &SE, DominatorTree &DT) { 689 // Tally up the registers. 690 if (const SCEV *ScaledReg = F.ScaledReg) { 691 if (VisitedRegs.count(ScaledReg)) { 692 Loose(); 693 return; 694 } 695 RatePrimaryRegister(ScaledReg, Regs, L, SE, DT); 696 } 697 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 698 E = F.BaseRegs.end(); I != E; ++I) { 699 const SCEV *BaseReg = *I; 700 if (VisitedRegs.count(BaseReg)) { 701 Loose(); 702 return; 703 } 704 RatePrimaryRegister(BaseReg, Regs, L, SE, DT); 705 706 NumIVMuls += isa<SCEVMulExpr>(BaseReg) && 707 BaseReg->hasComputableLoopEvolution(L); 708 } 709 710 if (F.BaseRegs.size() > 1) 711 NumBaseAdds += F.BaseRegs.size() - 1; 712 713 // Tally up the non-zero immediates. 714 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 715 E = Offsets.end(); I != E; ++I) { 716 int64_t Offset = (uint64_t)*I + F.AM.BaseOffs; 717 if (F.AM.BaseGV) 718 ImmCost += 64; // Handle symbolic values conservatively. 719 // TODO: This should probably be the pointer size. 720 else if (Offset != 0) 721 ImmCost += APInt(64, Offset, true).getMinSignedBits(); 722 } 723} 724 725/// Loose - Set this cost to a loosing value. 726void Cost::Loose() { 727 NumRegs = ~0u; 728 AddRecCost = ~0u; 729 NumIVMuls = ~0u; 730 NumBaseAdds = ~0u; 731 ImmCost = ~0u; 732 SetupCost = ~0u; 733} 734 735/// operator< - Choose the lower cost. 736bool Cost::operator<(const Cost &Other) const { 737 if (NumRegs != Other.NumRegs) 738 return NumRegs < Other.NumRegs; 739 if (AddRecCost != Other.AddRecCost) 740 return AddRecCost < Other.AddRecCost; 741 if (NumIVMuls != Other.NumIVMuls) 742 return NumIVMuls < Other.NumIVMuls; 743 if (NumBaseAdds != Other.NumBaseAdds) 744 return NumBaseAdds < Other.NumBaseAdds; 745 if (ImmCost != Other.ImmCost) 746 return ImmCost < Other.ImmCost; 747 if (SetupCost != Other.SetupCost) 748 return SetupCost < Other.SetupCost; 749 return false; 750} 751 752void Cost::print(raw_ostream &OS) const { 753 OS << NumRegs << " reg" << (NumRegs == 1 ? "" : "s"); 754 if (AddRecCost != 0) 755 OS << ", with addrec cost " << AddRecCost; 756 if (NumIVMuls != 0) 757 OS << ", plus " << NumIVMuls << " IV mul" << (NumIVMuls == 1 ? "" : "s"); 758 if (NumBaseAdds != 0) 759 OS << ", plus " << NumBaseAdds << " base add" 760 << (NumBaseAdds == 1 ? "" : "s"); 761 if (ImmCost != 0) 762 OS << ", plus " << ImmCost << " imm cost"; 763 if (SetupCost != 0) 764 OS << ", plus " << SetupCost << " setup cost"; 765} 766 767void Cost::dump() const { 768 print(errs()); errs() << '\n'; 769} 770 771namespace { 772 773/// LSRFixup - An operand value in an instruction which is to be replaced 774/// with some equivalent, possibly strength-reduced, replacement. 775struct LSRFixup { 776 /// UserInst - The instruction which will be updated. 777 Instruction *UserInst; 778 779 /// OperandValToReplace - The operand of the instruction which will 780 /// be replaced. The operand may be used more than once; every instance 781 /// will be replaced. 782 Value *OperandValToReplace; 783 784 /// PostIncLoop - If this user is to use the post-incremented value of an 785 /// induction variable, this variable is non-null and holds the loop 786 /// associated with the induction variable. 787 const Loop *PostIncLoop; 788 789 /// LUIdx - The index of the LSRUse describing the expression which 790 /// this fixup needs, minus an offset (below). 791 size_t LUIdx; 792 793 /// Offset - A constant offset to be added to the LSRUse expression. 794 /// This allows multiple fixups to share the same LSRUse with different 795 /// offsets, for example in an unrolled loop. 796 int64_t Offset; 797 798 LSRFixup(); 799 800 void print(raw_ostream &OS) const; 801 void dump() const; 802}; 803 804} 805 806LSRFixup::LSRFixup() 807 : UserInst(0), OperandValToReplace(0), PostIncLoop(0), 808 LUIdx(~size_t(0)), Offset(0) {} 809 810void LSRFixup::print(raw_ostream &OS) const { 811 OS << "UserInst="; 812 // Store is common and interesting enough to be worth special-casing. 813 if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) { 814 OS << "store "; 815 WriteAsOperand(OS, Store->getOperand(0), /*PrintType=*/false); 816 } else if (UserInst->getType()->isVoidTy()) 817 OS << UserInst->getOpcodeName(); 818 else 819 WriteAsOperand(OS, UserInst, /*PrintType=*/false); 820 821 OS << ", OperandValToReplace="; 822 WriteAsOperand(OS, OperandValToReplace, /*PrintType=*/false); 823 824 if (PostIncLoop) { 825 OS << ", PostIncLoop="; 826 WriteAsOperand(OS, PostIncLoop->getHeader(), /*PrintType=*/false); 827 } 828 829 if (LUIdx != ~size_t(0)) 830 OS << ", LUIdx=" << LUIdx; 831 832 if (Offset != 0) 833 OS << ", Offset=" << Offset; 834} 835 836void LSRFixup::dump() const { 837 print(errs()); errs() << '\n'; 838} 839 840namespace { 841 842/// UniquifierDenseMapInfo - A DenseMapInfo implementation for holding 843/// DenseMaps and DenseSets of sorted SmallVectors of const SCEV*. 844struct UniquifierDenseMapInfo { 845 static SmallVector<const SCEV *, 2> getEmptyKey() { 846 SmallVector<const SCEV *, 2> V; 847 V.push_back(reinterpret_cast<const SCEV *>(-1)); 848 return V; 849 } 850 851 static SmallVector<const SCEV *, 2> getTombstoneKey() { 852 SmallVector<const SCEV *, 2> V; 853 V.push_back(reinterpret_cast<const SCEV *>(-2)); 854 return V; 855 } 856 857 static unsigned getHashValue(const SmallVector<const SCEV *, 2> &V) { 858 unsigned Result = 0; 859 for (SmallVectorImpl<const SCEV *>::const_iterator I = V.begin(), 860 E = V.end(); I != E; ++I) 861 Result ^= DenseMapInfo<const SCEV *>::getHashValue(*I); 862 return Result; 863 } 864 865 static bool isEqual(const SmallVector<const SCEV *, 2> &LHS, 866 const SmallVector<const SCEV *, 2> &RHS) { 867 return LHS == RHS; 868 } 869}; 870 871/// LSRUse - This class holds the state that LSR keeps for each use in 872/// IVUsers, as well as uses invented by LSR itself. It includes information 873/// about what kinds of things can be folded into the user, information about 874/// the user itself, and information about how the use may be satisfied. 875/// TODO: Represent multiple users of the same expression in common? 876class LSRUse { 877 DenseSet<SmallVector<const SCEV *, 2>, UniquifierDenseMapInfo> Uniquifier; 878 879public: 880 /// KindType - An enum for a kind of use, indicating what types of 881 /// scaled and immediate operands it might support. 882 enum KindType { 883 Basic, ///< A normal use, with no folding. 884 Special, ///< A special case of basic, allowing -1 scales. 885 Address, ///< An address use; folding according to TargetLowering 886 ICmpZero ///< An equality icmp with both operands folded into one. 887 // TODO: Add a generic icmp too? 888 }; 889 890 KindType Kind; 891 const Type *AccessTy; 892 893 SmallVector<int64_t, 8> Offsets; 894 int64_t MinOffset; 895 int64_t MaxOffset; 896 897 /// AllFixupsOutsideLoop - This records whether all of the fixups using this 898 /// LSRUse are outside of the loop, in which case some special-case heuristics 899 /// may be used. 900 bool AllFixupsOutsideLoop; 901 902 /// Formulae - A list of ways to build a value that can satisfy this user. 903 /// After the list is populated, one of these is selected heuristically and 904 /// used to formulate a replacement for OperandValToReplace in UserInst. 905 SmallVector<Formula, 12> Formulae; 906 907 /// Regs - The set of register candidates used by all formulae in this LSRUse. 908 SmallPtrSet<const SCEV *, 4> Regs; 909 910 LSRUse(KindType K, const Type *T) : Kind(K), AccessTy(T), 911 MinOffset(INT64_MAX), 912 MaxOffset(INT64_MIN), 913 AllFixupsOutsideLoop(true) {} 914 915 bool InsertFormula(const Formula &F); 916 917 void check() const; 918 919 void print(raw_ostream &OS) const; 920 void dump() const; 921}; 922 923/// InsertFormula - If the given formula has not yet been inserted, add it to 924/// the list, and return true. Return false otherwise. 925bool LSRUse::InsertFormula(const Formula &F) { 926 SmallVector<const SCEV *, 2> Key = F.BaseRegs; 927 if (F.ScaledReg) Key.push_back(F.ScaledReg); 928 // Unstable sort by host order ok, because this is only used for uniquifying. 929 std::sort(Key.begin(), Key.end()); 930 931 if (!Uniquifier.insert(Key).second) 932 return false; 933 934 // Using a register to hold the value of 0 is not profitable. 935 assert((!F.ScaledReg || !F.ScaledReg->isZero()) && 936 "Zero allocated in a scaled register!"); 937#ifndef NDEBUG 938 for (SmallVectorImpl<const SCEV *>::const_iterator I = 939 F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) 940 assert(!(*I)->isZero() && "Zero allocated in a base register!"); 941#endif 942 943 // Add the formula to the list. 944 Formulae.push_back(F); 945 946 // Record registers now being used by this use. 947 if (F.ScaledReg) Regs.insert(F.ScaledReg); 948 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 949 950 return true; 951} 952 953void LSRUse::print(raw_ostream &OS) const { 954 OS << "LSR Use: Kind="; 955 switch (Kind) { 956 case Basic: OS << "Basic"; break; 957 case Special: OS << "Special"; break; 958 case ICmpZero: OS << "ICmpZero"; break; 959 case Address: 960 OS << "Address of "; 961 if (AccessTy->isPointerTy()) 962 OS << "pointer"; // the full pointer type could be really verbose 963 else 964 OS << *AccessTy; 965 } 966 967 OS << ", Offsets={"; 968 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 969 E = Offsets.end(); I != E; ++I) { 970 OS << *I; 971 if (next(I) != E) 972 OS << ','; 973 } 974 OS << '}'; 975 976 if (AllFixupsOutsideLoop) 977 OS << ", all-fixups-outside-loop"; 978} 979 980void LSRUse::dump() const { 981 print(errs()); errs() << '\n'; 982} 983 984/// isLegalUse - Test whether the use described by AM is "legal", meaning it can 985/// be completely folded into the user instruction at isel time. This includes 986/// address-mode folding and special icmp tricks. 987static bool isLegalUse(const TargetLowering::AddrMode &AM, 988 LSRUse::KindType Kind, const Type *AccessTy, 989 const TargetLowering *TLI) { 990 switch (Kind) { 991 case LSRUse::Address: 992 // If we have low-level target information, ask the target if it can 993 // completely fold this address. 994 if (TLI) return TLI->isLegalAddressingMode(AM, AccessTy); 995 996 // Otherwise, just guess that reg+reg addressing is legal. 997 return !AM.BaseGV && AM.BaseOffs == 0 && AM.Scale <= 1; 998 999 case LSRUse::ICmpZero: 1000 // There's not even a target hook for querying whether it would be legal to 1001 // fold a GV into an ICmp. 1002 if (AM.BaseGV) 1003 return false; 1004 1005 // ICmp only has two operands; don't allow more than two non-trivial parts. 1006 if (AM.Scale != 0 && AM.HasBaseReg && AM.BaseOffs != 0) 1007 return false; 1008 1009 // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by 1010 // putting the scaled register in the other operand of the icmp. 1011 if (AM.Scale != 0 && AM.Scale != -1) 1012 return false; 1013 1014 // If we have low-level target information, ask the target if it can fold an 1015 // integer immediate on an icmp. 1016 if (AM.BaseOffs != 0) { 1017 if (TLI) return TLI->isLegalICmpImmediate(-AM.BaseOffs); 1018 return false; 1019 } 1020 1021 return true; 1022 1023 case LSRUse::Basic: 1024 // Only handle single-register values. 1025 return !AM.BaseGV && AM.Scale == 0 && AM.BaseOffs == 0; 1026 1027 case LSRUse::Special: 1028 // Only handle -1 scales, or no scale. 1029 return AM.Scale == 0 || AM.Scale == -1; 1030 } 1031 1032 return false; 1033} 1034 1035static bool isLegalUse(TargetLowering::AddrMode AM, 1036 int64_t MinOffset, int64_t MaxOffset, 1037 LSRUse::KindType Kind, const Type *AccessTy, 1038 const TargetLowering *TLI) { 1039 // Check for overflow. 1040 if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) != 1041 (MinOffset > 0)) 1042 return false; 1043 AM.BaseOffs = (uint64_t)AM.BaseOffs + MinOffset; 1044 if (isLegalUse(AM, Kind, AccessTy, TLI)) { 1045 AM.BaseOffs = (uint64_t)AM.BaseOffs - MinOffset; 1046 // Check for overflow. 1047 if (((int64_t)((uint64_t)AM.BaseOffs + MaxOffset) > AM.BaseOffs) != 1048 (MaxOffset > 0)) 1049 return false; 1050 AM.BaseOffs = (uint64_t)AM.BaseOffs + MaxOffset; 1051 return isLegalUse(AM, Kind, AccessTy, TLI); 1052 } 1053 return false; 1054} 1055 1056static bool isAlwaysFoldable(int64_t BaseOffs, 1057 GlobalValue *BaseGV, 1058 bool HasBaseReg, 1059 LSRUse::KindType Kind, const Type *AccessTy, 1060 const TargetLowering *TLI) { 1061 // Fast-path: zero is always foldable. 1062 if (BaseOffs == 0 && !BaseGV) return true; 1063 1064 // Conservatively, create an address with an immediate and a 1065 // base and a scale. 1066 TargetLowering::AddrMode AM; 1067 AM.BaseOffs = BaseOffs; 1068 AM.BaseGV = BaseGV; 1069 AM.HasBaseReg = HasBaseReg; 1070 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1071 1072 return isLegalUse(AM, Kind, AccessTy, TLI); 1073} 1074 1075static bool isAlwaysFoldable(const SCEV *S, 1076 int64_t MinOffset, int64_t MaxOffset, 1077 bool HasBaseReg, 1078 LSRUse::KindType Kind, const Type *AccessTy, 1079 const TargetLowering *TLI, 1080 ScalarEvolution &SE) { 1081 // Fast-path: zero is always foldable. 1082 if (S->isZero()) return true; 1083 1084 // Conservatively, create an address with an immediate and a 1085 // base and a scale. 1086 int64_t BaseOffs = ExtractImmediate(S, SE); 1087 GlobalValue *BaseGV = ExtractSymbol(S, SE); 1088 1089 // If there's anything else involved, it's not foldable. 1090 if (!S->isZero()) return false; 1091 1092 // Fast-path: zero is always foldable. 1093 if (BaseOffs == 0 && !BaseGV) return true; 1094 1095 // Conservatively, create an address with an immediate and a 1096 // base and a scale. 1097 TargetLowering::AddrMode AM; 1098 AM.BaseOffs = BaseOffs; 1099 AM.BaseGV = BaseGV; 1100 AM.HasBaseReg = HasBaseReg; 1101 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1102 1103 return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI); 1104} 1105 1106/// FormulaSorter - This class implements an ordering for formulae which sorts 1107/// the by their standalone cost. 1108class FormulaSorter { 1109 /// These two sets are kept empty, so that we compute standalone costs. 1110 DenseSet<const SCEV *> VisitedRegs; 1111 SmallPtrSet<const SCEV *, 16> Regs; 1112 Loop *L; 1113 LSRUse *LU; 1114 ScalarEvolution &SE; 1115 DominatorTree &DT; 1116 1117public: 1118 FormulaSorter(Loop *l, LSRUse &lu, ScalarEvolution &se, DominatorTree &dt) 1119 : L(l), LU(&lu), SE(se), DT(dt) {} 1120 1121 bool operator()(const Formula &A, const Formula &B) { 1122 Cost CostA; 1123 CostA.RateFormula(A, Regs, VisitedRegs, L, LU->Offsets, SE, DT); 1124 Regs.clear(); 1125 Cost CostB; 1126 CostB.RateFormula(B, Regs, VisitedRegs, L, LU->Offsets, SE, DT); 1127 Regs.clear(); 1128 return CostA < CostB; 1129 } 1130}; 1131 1132/// LSRInstance - This class holds state for the main loop strength reduction 1133/// logic. 1134class LSRInstance { 1135 IVUsers &IU; 1136 ScalarEvolution &SE; 1137 DominatorTree &DT; 1138 const TargetLowering *const TLI; 1139 Loop *const L; 1140 bool Changed; 1141 1142 /// IVIncInsertPos - This is the insert position that the current loop's 1143 /// induction variable increment should be placed. In simple loops, this is 1144 /// the latch block's terminator. But in more complicated cases, this is a 1145 /// position which will dominate all the in-loop post-increment users. 1146 Instruction *IVIncInsertPos; 1147 1148 /// Factors - Interesting factors between use strides. 1149 SmallSetVector<int64_t, 8> Factors; 1150 1151 /// Types - Interesting use types, to facilitate truncation reuse. 1152 SmallSetVector<const Type *, 4> Types; 1153 1154 /// Fixups - The list of operands which are to be replaced. 1155 SmallVector<LSRFixup, 16> Fixups; 1156 1157 /// Uses - The list of interesting uses. 1158 SmallVector<LSRUse, 16> Uses; 1159 1160 /// RegUses - Track which uses use which register candidates. 1161 RegUseTracker RegUses; 1162 1163 void OptimizeShadowIV(); 1164 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse); 1165 ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse); 1166 bool OptimizeLoopTermCond(); 1167 1168 void CollectInterestingTypesAndFactors(); 1169 void CollectFixupsAndInitialFormulae(); 1170 1171 LSRFixup &getNewFixup() { 1172 Fixups.push_back(LSRFixup()); 1173 return Fixups.back(); 1174 } 1175 1176 // Support for sharing of LSRUses between LSRFixups. 1177 typedef DenseMap<const SCEV *, size_t> UseMapTy; 1178 UseMapTy UseMap; 1179 1180 bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, 1181 LSRUse::KindType Kind, const Type *AccessTy); 1182 1183 std::pair<size_t, int64_t> getUse(const SCEV *&Expr, 1184 LSRUse::KindType Kind, 1185 const Type *AccessTy); 1186 1187public: 1188 void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1189 void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1190 void CountRegisters(const Formula &F, size_t LUIdx); 1191 bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F); 1192 1193 void CollectLoopInvariantFixupsAndFormulae(); 1194 1195 void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base, 1196 unsigned Depth = 0); 1197 void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base); 1198 void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1199 void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1200 void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1201 void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1202 void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base); 1203 void GenerateCrossUseConstantOffsets(); 1204 void GenerateAllReuseFormulae(); 1205 1206 void FilterOutUndesirableDedicatedRegisters(); 1207 void NarrowSearchSpaceUsingHeuristics(); 1208 1209 void SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 1210 Cost &SolutionCost, 1211 SmallVectorImpl<const Formula *> &Workspace, 1212 const Cost &CurCost, 1213 const SmallPtrSet<const SCEV *, 16> &CurRegs, 1214 DenseSet<const SCEV *> &VisitedRegs) const; 1215 void Solve(SmallVectorImpl<const Formula *> &Solution) const; 1216 1217 Value *Expand(const LSRFixup &LF, 1218 const Formula &F, 1219 BasicBlock::iterator IP, 1220 SCEVExpander &Rewriter, 1221 SmallVectorImpl<WeakVH> &DeadInsts) const; 1222 void RewriteForPHI(PHINode *PN, const LSRFixup &LF, 1223 const Formula &F, 1224 SCEVExpander &Rewriter, 1225 SmallVectorImpl<WeakVH> &DeadInsts, 1226 Pass *P) const; 1227 void Rewrite(const LSRFixup &LF, 1228 const Formula &F, 1229 SCEVExpander &Rewriter, 1230 SmallVectorImpl<WeakVH> &DeadInsts, 1231 Pass *P) const; 1232 void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 1233 Pass *P); 1234 1235 LSRInstance(const TargetLowering *tli, Loop *l, Pass *P); 1236 1237 bool getChanged() const { return Changed; } 1238 1239 void print_factors_and_types(raw_ostream &OS) const; 1240 void print_fixups(raw_ostream &OS) const; 1241 void print_uses(raw_ostream &OS) const; 1242 void print(raw_ostream &OS) const; 1243 void dump() const; 1244}; 1245 1246} 1247 1248/// OptimizeShadowIV - If IV is used in a int-to-float cast 1249/// inside the loop then try to eliminate the cast operation. 1250void LSRInstance::OptimizeShadowIV() { 1251 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1252 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1253 return; 1254 1255 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); 1256 UI != E; /* empty */) { 1257 IVUsers::const_iterator CandidateUI = UI; 1258 ++UI; 1259 Instruction *ShadowUse = CandidateUI->getUser(); 1260 const Type *DestTy = NULL; 1261 1262 /* If shadow use is a int->float cast then insert a second IV 1263 to eliminate this cast. 1264 1265 for (unsigned i = 0; i < n; ++i) 1266 foo((double)i); 1267 1268 is transformed into 1269 1270 double d = 0.0; 1271 for (unsigned i = 0; i < n; ++i, ++d) 1272 foo(d); 1273 */ 1274 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) 1275 DestTy = UCast->getDestTy(); 1276 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) 1277 DestTy = SCast->getDestTy(); 1278 if (!DestTy) continue; 1279 1280 if (TLI) { 1281 // If target does not support DestTy natively then do not apply 1282 // this transformation. 1283 EVT DVT = TLI->getValueType(DestTy); 1284 if (!TLI->isTypeLegal(DVT)) continue; 1285 } 1286 1287 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0)); 1288 if (!PH) continue; 1289 if (PH->getNumIncomingValues() != 2) continue; 1290 1291 const Type *SrcTy = PH->getType(); 1292 int Mantissa = DestTy->getFPMantissaWidth(); 1293 if (Mantissa == -1) continue; 1294 if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa) 1295 continue; 1296 1297 unsigned Entry, Latch; 1298 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) { 1299 Entry = 0; 1300 Latch = 1; 1301 } else { 1302 Entry = 1; 1303 Latch = 0; 1304 } 1305 1306 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry)); 1307 if (!Init) continue; 1308 Constant *NewInit = ConstantFP::get(DestTy, Init->getZExtValue()); 1309 1310 BinaryOperator *Incr = 1311 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch)); 1312 if (!Incr) continue; 1313 if (Incr->getOpcode() != Instruction::Add 1314 && Incr->getOpcode() != Instruction::Sub) 1315 continue; 1316 1317 /* Initialize new IV, double d = 0.0 in above example. */ 1318 ConstantInt *C = NULL; 1319 if (Incr->getOperand(0) == PH) 1320 C = dyn_cast<ConstantInt>(Incr->getOperand(1)); 1321 else if (Incr->getOperand(1) == PH) 1322 C = dyn_cast<ConstantInt>(Incr->getOperand(0)); 1323 else 1324 continue; 1325 1326 if (!C) continue; 1327 1328 // Ignore negative constants, as the code below doesn't handle them 1329 // correctly. TODO: Remove this restriction. 1330 if (!C->getValue().isStrictlyPositive()) continue; 1331 1332 /* Add new PHINode. */ 1333 PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH); 1334 1335 /* create new increment. '++d' in above example. */ 1336 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue()); 1337 BinaryOperator *NewIncr = 1338 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ? 1339 Instruction::FAdd : Instruction::FSub, 1340 NewPH, CFP, "IV.S.next.", Incr); 1341 1342 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry)); 1343 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch)); 1344 1345 /* Remove cast operation */ 1346 ShadowUse->replaceAllUsesWith(NewPH); 1347 ShadowUse->eraseFromParent(); 1348 break; 1349 } 1350} 1351 1352/// FindIVUserForCond - If Cond has an operand that is an expression of an IV, 1353/// set the IV user and stride information and return true, otherwise return 1354/// false. 1355bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, 1356 IVStrideUse *&CondUse) { 1357 for (IVUsers::iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1358 if (UI->getUser() == Cond) { 1359 // NOTE: we could handle setcc instructions with multiple uses here, but 1360 // InstCombine does it as well for simple uses, it's not clear that it 1361 // occurs enough in real life to handle. 1362 CondUse = UI; 1363 return true; 1364 } 1365 return false; 1366} 1367 1368/// OptimizeMax - Rewrite the loop's terminating condition if it uses 1369/// a max computation. 1370/// 1371/// This is a narrow solution to a specific, but acute, problem. For loops 1372/// like this: 1373/// 1374/// i = 0; 1375/// do { 1376/// p[i] = 0.0; 1377/// } while (++i < n); 1378/// 1379/// the trip count isn't just 'n', because 'n' might not be positive. And 1380/// unfortunately this can come up even for loops where the user didn't use 1381/// a C do-while loop. For example, seemingly well-behaved top-test loops 1382/// will commonly be lowered like this: 1383// 1384/// if (n > 0) { 1385/// i = 0; 1386/// do { 1387/// p[i] = 0.0; 1388/// } while (++i < n); 1389/// } 1390/// 1391/// and then it's possible for subsequent optimization to obscure the if 1392/// test in such a way that indvars can't find it. 1393/// 1394/// When indvars can't find the if test in loops like this, it creates a 1395/// max expression, which allows it to give the loop a canonical 1396/// induction variable: 1397/// 1398/// i = 0; 1399/// max = n < 1 ? 1 : n; 1400/// do { 1401/// p[i] = 0.0; 1402/// } while (++i != max); 1403/// 1404/// Canonical induction variables are necessary because the loop passes 1405/// are designed around them. The most obvious example of this is the 1406/// LoopInfo analysis, which doesn't remember trip count values. It 1407/// expects to be able to rediscover the trip count each time it is 1408/// needed, and it does this using a simple analysis that only succeeds if 1409/// the loop has a canonical induction variable. 1410/// 1411/// However, when it comes time to generate code, the maximum operation 1412/// can be quite costly, especially if it's inside of an outer loop. 1413/// 1414/// This function solves this problem by detecting this type of loop and 1415/// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting 1416/// the instructions for the maximum computation. 1417/// 1418ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { 1419 // Check that the loop matches the pattern we're looking for. 1420 if (Cond->getPredicate() != CmpInst::ICMP_EQ && 1421 Cond->getPredicate() != CmpInst::ICMP_NE) 1422 return Cond; 1423 1424 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1)); 1425 if (!Sel || !Sel->hasOneUse()) return Cond; 1426 1427 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1428 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1429 return Cond; 1430 const SCEV *One = SE.getIntegerSCEV(1, BackedgeTakenCount->getType()); 1431 1432 // Add one to the backedge-taken count to get the trip count. 1433 const SCEV *IterationCount = SE.getAddExpr(BackedgeTakenCount, One); 1434 1435 // Check for a max calculation that matches the pattern. 1436 if (!isa<SCEVSMaxExpr>(IterationCount) && !isa<SCEVUMaxExpr>(IterationCount)) 1437 return Cond; 1438 const SCEVNAryExpr *Max = cast<SCEVNAryExpr>(IterationCount); 1439 if (Max != SE.getSCEV(Sel)) return Cond; 1440 1441 // To handle a max with more than two operands, this optimization would 1442 // require additional checking and setup. 1443 if (Max->getNumOperands() != 2) 1444 return Cond; 1445 1446 const SCEV *MaxLHS = Max->getOperand(0); 1447 const SCEV *MaxRHS = Max->getOperand(1); 1448 if (!MaxLHS || MaxLHS != One) return Cond; 1449 // Check the relevant induction variable for conformance to 1450 // the pattern. 1451 const SCEV *IV = SE.getSCEV(Cond->getOperand(0)); 1452 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); 1453 if (!AR || !AR->isAffine() || 1454 AR->getStart() != One || 1455 AR->getStepRecurrence(SE) != One) 1456 return Cond; 1457 1458 assert(AR->getLoop() == L && 1459 "Loop condition operand is an addrec in a different loop!"); 1460 1461 // Check the right operand of the select, and remember it, as it will 1462 // be used in the new comparison instruction. 1463 Value *NewRHS = 0; 1464 if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS) 1465 NewRHS = Sel->getOperand(1); 1466 else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS) 1467 NewRHS = Sel->getOperand(2); 1468 if (!NewRHS) return Cond; 1469 1470 // Determine the new comparison opcode. It may be signed or unsigned, 1471 // and the original comparison may be either equality or inequality. 1472 CmpInst::Predicate Pred = 1473 isa<SCEVSMaxExpr>(Max) ? CmpInst::ICMP_SLT : CmpInst::ICMP_ULT; 1474 if (Cond->getPredicate() == CmpInst::ICMP_EQ) 1475 Pred = CmpInst::getInversePredicate(Pred); 1476 1477 // Ok, everything looks ok to change the condition into an SLT or SGE and 1478 // delete the max calculation. 1479 ICmpInst *NewCond = 1480 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp"); 1481 1482 // Delete the max calculation instructions. 1483 Cond->replaceAllUsesWith(NewCond); 1484 CondUse->setUser(NewCond); 1485 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0)); 1486 Cond->eraseFromParent(); 1487 Sel->eraseFromParent(); 1488 if (Cmp->use_empty()) 1489 Cmp->eraseFromParent(); 1490 return NewCond; 1491} 1492 1493/// OptimizeLoopTermCond - Change loop terminating condition to use the 1494/// postinc iv when possible. 1495bool 1496LSRInstance::OptimizeLoopTermCond() { 1497 SmallPtrSet<Instruction *, 4> PostIncs; 1498 1499 BasicBlock *LatchBlock = L->getLoopLatch(); 1500 SmallVector<BasicBlock*, 8> ExitingBlocks; 1501 L->getExitingBlocks(ExitingBlocks); 1502 1503 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 1504 BasicBlock *ExitingBlock = ExitingBlocks[i]; 1505 1506 // Get the terminating condition for the loop if possible. If we 1507 // can, we want to change it to use a post-incremented version of its 1508 // induction variable, to allow coalescing the live ranges for the IV into 1509 // one register value. 1510 1511 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 1512 if (!TermBr) 1513 continue; 1514 // FIXME: Overly conservative, termination condition could be an 'or' etc.. 1515 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition())) 1516 continue; 1517 1518 // Search IVUsesByStride to find Cond's IVUse if there is one. 1519 IVStrideUse *CondUse = 0; 1520 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); 1521 if (!FindIVUserForCond(Cond, CondUse)) 1522 continue; 1523 1524 // If the trip count is computed in terms of a max (due to ScalarEvolution 1525 // being unable to find a sufficient guard, for example), change the loop 1526 // comparison to use SLT or ULT instead of NE. 1527 // One consequence of doing this now is that it disrupts the count-down 1528 // optimization. That's not always a bad thing though, because in such 1529 // cases it may still be worthwhile to avoid a max. 1530 Cond = OptimizeMax(Cond, CondUse); 1531 1532 // If this exiting block dominates the latch block, it may also use 1533 // the post-inc value if it won't be shared with other uses. 1534 // Check for dominance. 1535 if (!DT.dominates(ExitingBlock, LatchBlock)) 1536 continue; 1537 1538 // Conservatively avoid trying to use the post-inc value in non-latch 1539 // exits if there may be pre-inc users in intervening blocks. 1540 if (LatchBlock != ExitingBlock) 1541 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1542 // Test if the use is reachable from the exiting block. This dominator 1543 // query is a conservative approximation of reachability. 1544 if (&*UI != CondUse && 1545 !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) { 1546 // Conservatively assume there may be reuse if the quotient of their 1547 // strides could be a legal scale. 1548 const SCEV *A = CondUse->getStride(); 1549 const SCEV *B = UI->getStride(); 1550 if (SE.getTypeSizeInBits(A->getType()) != 1551 SE.getTypeSizeInBits(B->getType())) { 1552 if (SE.getTypeSizeInBits(A->getType()) > 1553 SE.getTypeSizeInBits(B->getType())) 1554 B = SE.getSignExtendExpr(B, A->getType()); 1555 else 1556 A = SE.getSignExtendExpr(A, B->getType()); 1557 } 1558 if (const SCEVConstant *D = 1559 dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) { 1560 // Stride of one or negative one can have reuse with non-addresses. 1561 if (D->getValue()->isOne() || 1562 D->getValue()->isAllOnesValue()) 1563 goto decline_post_inc; 1564 // Avoid weird situations. 1565 if (D->getValue()->getValue().getMinSignedBits() >= 64 || 1566 D->getValue()->getValue().isMinSignedValue()) 1567 goto decline_post_inc; 1568 // Without TLI, assume that any stride might be valid, and so any 1569 // use might be shared. 1570 if (!TLI) 1571 goto decline_post_inc; 1572 // Check for possible scaled-address reuse. 1573 const Type *AccessTy = getAccessType(UI->getUser()); 1574 TargetLowering::AddrMode AM; 1575 AM.Scale = D->getValue()->getSExtValue(); 1576 if (TLI->isLegalAddressingMode(AM, AccessTy)) 1577 goto decline_post_inc; 1578 AM.Scale = -AM.Scale; 1579 if (TLI->isLegalAddressingMode(AM, AccessTy)) 1580 goto decline_post_inc; 1581 } 1582 } 1583 1584 DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: " 1585 << *Cond << '\n'); 1586 1587 // It's possible for the setcc instruction to be anywhere in the loop, and 1588 // possible for it to have multiple users. If it is not immediately before 1589 // the exiting block branch, move it. 1590 if (&*++BasicBlock::iterator(Cond) != TermBr) { 1591 if (Cond->hasOneUse()) { 1592 Cond->moveBefore(TermBr); 1593 } else { 1594 // Clone the terminating condition and insert into the loopend. 1595 ICmpInst *OldCond = Cond; 1596 Cond = cast<ICmpInst>(Cond->clone()); 1597 Cond->setName(L->getHeader()->getName() + ".termcond"); 1598 ExitingBlock->getInstList().insert(TermBr, Cond); 1599 1600 // Clone the IVUse, as the old use still exists! 1601 CondUse = &IU.AddUser(CondUse->getStride(), CondUse->getOffset(), 1602 Cond, CondUse->getOperandValToReplace()); 1603 TermBr->replaceUsesOfWith(OldCond, Cond); 1604 } 1605 } 1606 1607 // If we get to here, we know that we can transform the setcc instruction to 1608 // use the post-incremented version of the IV, allowing us to coalesce the 1609 // live ranges for the IV correctly. 1610 CondUse->setOffset(SE.getMinusSCEV(CondUse->getOffset(), 1611 CondUse->getStride())); 1612 CondUse->setIsUseOfPostIncrementedValue(true); 1613 Changed = true; 1614 1615 PostIncs.insert(Cond); 1616 decline_post_inc:; 1617 } 1618 1619 // Determine an insertion point for the loop induction variable increment. It 1620 // must dominate all the post-inc comparisons we just set up, and it must 1621 // dominate the loop latch edge. 1622 IVIncInsertPos = L->getLoopLatch()->getTerminator(); 1623 for (SmallPtrSet<Instruction *, 4>::const_iterator I = PostIncs.begin(), 1624 E = PostIncs.end(); I != E; ++I) { 1625 BasicBlock *BB = 1626 DT.findNearestCommonDominator(IVIncInsertPos->getParent(), 1627 (*I)->getParent()); 1628 if (BB == (*I)->getParent()) 1629 IVIncInsertPos = *I; 1630 else if (BB != IVIncInsertPos->getParent()) 1631 IVIncInsertPos = BB->getTerminator(); 1632 } 1633 1634 return Changed; 1635} 1636 1637bool 1638LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, 1639 LSRUse::KindType Kind, const Type *AccessTy) { 1640 int64_t NewMinOffset = LU.MinOffset; 1641 int64_t NewMaxOffset = LU.MaxOffset; 1642 const Type *NewAccessTy = AccessTy; 1643 1644 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to 1645 // something conservative, however this can pessimize in the case that one of 1646 // the uses will have all its uses outside the loop, for example. 1647 if (LU.Kind != Kind) 1648 return false; 1649 // Conservatively assume HasBaseReg is true for now. 1650 if (NewOffset < LU.MinOffset) { 1651 if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, /*HasBaseReg=*/true, 1652 Kind, AccessTy, TLI)) 1653 return false; 1654 NewMinOffset = NewOffset; 1655 } else if (NewOffset > LU.MaxOffset) { 1656 if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, /*HasBaseReg=*/true, 1657 Kind, AccessTy, TLI)) 1658 return false; 1659 NewMaxOffset = NewOffset; 1660 } 1661 // Check for a mismatched access type, and fall back conservatively as needed. 1662 if (Kind == LSRUse::Address && AccessTy != LU.AccessTy) 1663 NewAccessTy = Type::getVoidTy(AccessTy->getContext()); 1664 1665 // Update the use. 1666 LU.MinOffset = NewMinOffset; 1667 LU.MaxOffset = NewMaxOffset; 1668 LU.AccessTy = NewAccessTy; 1669 if (NewOffset != LU.Offsets.back()) 1670 LU.Offsets.push_back(NewOffset); 1671 return true; 1672} 1673 1674/// getUse - Return an LSRUse index and an offset value for a fixup which 1675/// needs the given expression, with the given kind and optional access type. 1676/// Either reuse an existing use or create a new one, as needed. 1677std::pair<size_t, int64_t> 1678LSRInstance::getUse(const SCEV *&Expr, 1679 LSRUse::KindType Kind, const Type *AccessTy) { 1680 const SCEV *Copy = Expr; 1681 int64_t Offset = ExtractImmediate(Expr, SE); 1682 1683 // Basic uses can't accept any offset, for example. 1684 if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true, Kind, AccessTy, TLI)) { 1685 Expr = Copy; 1686 Offset = 0; 1687 } 1688 1689 std::pair<UseMapTy::iterator, bool> P = 1690 UseMap.insert(std::make_pair(Expr, 0)); 1691 if (!P.second) { 1692 // A use already existed with this base. 1693 size_t LUIdx = P.first->second; 1694 LSRUse &LU = Uses[LUIdx]; 1695 if (reconcileNewOffset(LU, Offset, Kind, AccessTy)) 1696 // Reuse this use. 1697 return std::make_pair(LUIdx, Offset); 1698 } 1699 1700 // Create a new use. 1701 size_t LUIdx = Uses.size(); 1702 P.first->second = LUIdx; 1703 Uses.push_back(LSRUse(Kind, AccessTy)); 1704 LSRUse &LU = Uses[LUIdx]; 1705 1706 // We don't need to track redundant offsets, but we don't need to go out 1707 // of our way here to avoid them. 1708 if (LU.Offsets.empty() || Offset != LU.Offsets.back()) 1709 LU.Offsets.push_back(Offset); 1710 1711 LU.MinOffset = Offset; 1712 LU.MaxOffset = Offset; 1713 return std::make_pair(LUIdx, Offset); 1714} 1715 1716void LSRInstance::CollectInterestingTypesAndFactors() { 1717 SmallSetVector<const SCEV *, 4> Strides; 1718 1719 // Collect interesting types and strides. 1720 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 1721 const SCEV *Stride = UI->getStride(); 1722 1723 // Collect interesting types. 1724 Types.insert(SE.getEffectiveSCEVType(Stride->getType())); 1725 1726 // Add the stride for this loop. 1727 Strides.insert(Stride); 1728 1729 // Add strides for other mentioned loops. 1730 for (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(UI->getOffset()); 1731 AR; AR = dyn_cast<SCEVAddRecExpr>(AR->getStart())) 1732 Strides.insert(AR->getStepRecurrence(SE)); 1733 } 1734 1735 // Compute interesting factors from the set of interesting strides. 1736 for (SmallSetVector<const SCEV *, 4>::const_iterator 1737 I = Strides.begin(), E = Strides.end(); I != E; ++I) 1738 for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter = 1739 next(I); NewStrideIter != E; ++NewStrideIter) { 1740 const SCEV *OldStride = *I; 1741 const SCEV *NewStride = *NewStrideIter; 1742 1743 if (SE.getTypeSizeInBits(OldStride->getType()) != 1744 SE.getTypeSizeInBits(NewStride->getType())) { 1745 if (SE.getTypeSizeInBits(OldStride->getType()) > 1746 SE.getTypeSizeInBits(NewStride->getType())) 1747 NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType()); 1748 else 1749 OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType()); 1750 } 1751 if (const SCEVConstant *Factor = 1752 dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride, 1753 SE, true))) { 1754 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 1755 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 1756 } else if (const SCEVConstant *Factor = 1757 dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride, 1758 NewStride, 1759 SE, true))) { 1760 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 1761 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 1762 } 1763 } 1764 1765 // If all uses use the same type, don't bother looking for truncation-based 1766 // reuse. 1767 if (Types.size() == 1) 1768 Types.clear(); 1769 1770 DEBUG(print_factors_and_types(dbgs())); 1771} 1772 1773void LSRInstance::CollectFixupsAndInitialFormulae() { 1774 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 1775 // Record the uses. 1776 LSRFixup &LF = getNewFixup(); 1777 LF.UserInst = UI->getUser(); 1778 LF.OperandValToReplace = UI->getOperandValToReplace(); 1779 if (UI->isUseOfPostIncrementedValue()) 1780 LF.PostIncLoop = L; 1781 1782 LSRUse::KindType Kind = LSRUse::Basic; 1783 const Type *AccessTy = 0; 1784 if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) { 1785 Kind = LSRUse::Address; 1786 AccessTy = getAccessType(LF.UserInst); 1787 } 1788 1789 const SCEV *S = IU.getCanonicalExpr(*UI); 1790 1791 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as 1792 // (N - i == 0), and this allows (N - i) to be the expression that we work 1793 // with rather than just N or i, so we can consider the register 1794 // requirements for both N and i at the same time. Limiting this code to 1795 // equality icmps is not a problem because all interesting loops use 1796 // equality icmps, thanks to IndVarSimplify. 1797 if (ICmpInst *CI = dyn_cast<ICmpInst>(LF.UserInst)) 1798 if (CI->isEquality()) { 1799 // Swap the operands if needed to put the OperandValToReplace on the 1800 // left, for consistency. 1801 Value *NV = CI->getOperand(1); 1802 if (NV == LF.OperandValToReplace) { 1803 CI->setOperand(1, CI->getOperand(0)); 1804 CI->setOperand(0, NV); 1805 } 1806 1807 // x == y --> x - y == 0 1808 const SCEV *N = SE.getSCEV(NV); 1809 if (N->isLoopInvariant(L)) { 1810 Kind = LSRUse::ICmpZero; 1811 S = SE.getMinusSCEV(N, S); 1812 } 1813 1814 // -1 and the negations of all interesting strides (except the negation 1815 // of -1) are now also interesting. 1816 for (size_t i = 0, e = Factors.size(); i != e; ++i) 1817 if (Factors[i] != -1) 1818 Factors.insert(-(uint64_t)Factors[i]); 1819 Factors.insert(-1); 1820 } 1821 1822 // Set up the initial formula for this use. 1823 std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy); 1824 LF.LUIdx = P.first; 1825 LF.Offset = P.second; 1826 LSRUse &LU = Uses[LF.LUIdx]; 1827 LU.AllFixupsOutsideLoop &= !L->contains(LF.UserInst); 1828 1829 // If this is the first use of this LSRUse, give it a formula. 1830 if (LU.Formulae.empty()) { 1831 InsertInitialFormula(S, LU, LF.LUIdx); 1832 CountRegisters(LU.Formulae.back(), LF.LUIdx); 1833 } 1834 } 1835 1836 DEBUG(print_fixups(dbgs())); 1837} 1838 1839void 1840LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { 1841 Formula F; 1842 F.InitialMatch(S, L, SE, DT); 1843 bool Inserted = InsertFormula(LU, LUIdx, F); 1844 assert(Inserted && "Initial formula already exists!"); (void)Inserted; 1845} 1846 1847void 1848LSRInstance::InsertSupplementalFormula(const SCEV *S, 1849 LSRUse &LU, size_t LUIdx) { 1850 Formula F; 1851 F.BaseRegs.push_back(S); 1852 F.AM.HasBaseReg = true; 1853 bool Inserted = InsertFormula(LU, LUIdx, F); 1854 assert(Inserted && "Supplemental formula already exists!"); (void)Inserted; 1855} 1856 1857/// CountRegisters - Note which registers are used by the given formula, 1858/// updating RegUses. 1859void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) { 1860 if (F.ScaledReg) 1861 RegUses.CountRegister(F.ScaledReg, LUIdx); 1862 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 1863 E = F.BaseRegs.end(); I != E; ++I) 1864 RegUses.CountRegister(*I, LUIdx); 1865} 1866 1867/// InsertFormula - If the given formula has not yet been inserted, add it to 1868/// the list, and return true. Return false otherwise. 1869bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) { 1870 if (!LU.InsertFormula(F)) 1871 return false; 1872 1873 CountRegisters(F, LUIdx); 1874 return true; 1875} 1876 1877/// CollectLoopInvariantFixupsAndFormulae - Check for other uses of 1878/// loop-invariant values which we're tracking. These other uses will pin these 1879/// values in registers, making them less profitable for elimination. 1880/// TODO: This currently misses non-constant addrec step registers. 1881/// TODO: Should this give more weight to users inside the loop? 1882void 1883LSRInstance::CollectLoopInvariantFixupsAndFormulae() { 1884 SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end()); 1885 SmallPtrSet<const SCEV *, 8> Inserted; 1886 1887 while (!Worklist.empty()) { 1888 const SCEV *S = Worklist.pop_back_val(); 1889 1890 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) 1891 Worklist.insert(Worklist.end(), N->op_begin(), N->op_end()); 1892 else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) 1893 Worklist.push_back(C->getOperand()); 1894 else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 1895 Worklist.push_back(D->getLHS()); 1896 Worklist.push_back(D->getRHS()); 1897 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 1898 if (!Inserted.insert(U)) continue; 1899 const Value *V = U->getValue(); 1900 if (const Instruction *Inst = dyn_cast<Instruction>(V)) 1901 if (L->contains(Inst)) continue; 1902 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end(); 1903 UI != UE; ++UI) { 1904 const Instruction *UserInst = dyn_cast<Instruction>(*UI); 1905 // Ignore non-instructions. 1906 if (!UserInst) 1907 continue; 1908 // Ignore instructions in other functions (as can happen with 1909 // Constants). 1910 if (UserInst->getParent()->getParent() != L->getHeader()->getParent()) 1911 continue; 1912 // Ignore instructions not dominated by the loop. 1913 const BasicBlock *UseBB = !isa<PHINode>(UserInst) ? 1914 UserInst->getParent() : 1915 cast<PHINode>(UserInst)->getIncomingBlock( 1916 PHINode::getIncomingValueNumForOperand(UI.getOperandNo())); 1917 if (!DT.dominates(L->getHeader(), UseBB)) 1918 continue; 1919 // Ignore uses which are part of other SCEV expressions, to avoid 1920 // analyzing them multiple times. 1921 if (SE.isSCEVable(UserInst->getType()) && 1922 !isa<SCEVUnknown>(SE.getSCEV(const_cast<Instruction *>(UserInst)))) 1923 continue; 1924 // Ignore icmp instructions which are already being analyzed. 1925 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) { 1926 unsigned OtherIdx = !UI.getOperandNo(); 1927 Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx)); 1928 if (SE.getSCEV(OtherOp)->hasComputableLoopEvolution(L)) 1929 continue; 1930 } 1931 1932 LSRFixup &LF = getNewFixup(); 1933 LF.UserInst = const_cast<Instruction *>(UserInst); 1934 LF.OperandValToReplace = UI.getUse(); 1935 std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, 0); 1936 LF.LUIdx = P.first; 1937 LF.Offset = P.second; 1938 LSRUse &LU = Uses[LF.LUIdx]; 1939 LU.AllFixupsOutsideLoop &= L->contains(LF.UserInst); 1940 InsertSupplementalFormula(U, LU, LF.LUIdx); 1941 CountRegisters(LU.Formulae.back(), Uses.size() - 1); 1942 break; 1943 } 1944 } 1945 } 1946} 1947 1948/// CollectSubexprs - Split S into subexpressions which can be pulled out into 1949/// separate registers. If C is non-null, multiply each subexpression by C. 1950static void CollectSubexprs(const SCEV *S, const SCEVConstant *C, 1951 SmallVectorImpl<const SCEV *> &Ops, 1952 ScalarEvolution &SE) { 1953 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 1954 // Break out add operands. 1955 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 1956 I != E; ++I) 1957 CollectSubexprs(*I, C, Ops, SE); 1958 return; 1959 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 1960 // Split a non-zero base out of an addrec. 1961 if (!AR->getStart()->isZero()) { 1962 CollectSubexprs(SE.getAddRecExpr(SE.getIntegerSCEV(0, AR->getType()), 1963 AR->getStepRecurrence(SE), 1964 AR->getLoop()), C, Ops, SE); 1965 CollectSubexprs(AR->getStart(), C, Ops, SE); 1966 return; 1967 } 1968 } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 1969 // Break (C * (a + b + c)) into C*a + C*b + C*c. 1970 if (Mul->getNumOperands() == 2) 1971 if (const SCEVConstant *Op0 = 1972 dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 1973 CollectSubexprs(Mul->getOperand(1), 1974 C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0, 1975 Ops, SE); 1976 return; 1977 } 1978 } 1979 1980 // Otherwise use the value itself. 1981 Ops.push_back(C ? SE.getMulExpr(C, S) : S); 1982} 1983 1984/// GenerateReassociations - Split out subexpressions from adds and the bases of 1985/// addrecs. 1986void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, 1987 Formula Base, 1988 unsigned Depth) { 1989 // Arbitrarily cap recursion to protect compile time. 1990 if (Depth >= 3) return; 1991 1992 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 1993 const SCEV *BaseReg = Base.BaseRegs[i]; 1994 1995 SmallVector<const SCEV *, 8> AddOps; 1996 CollectSubexprs(BaseReg, 0, AddOps, SE); 1997 if (AddOps.size() == 1) continue; 1998 1999 for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(), 2000 JE = AddOps.end(); J != JE; ++J) { 2001 // Don't pull a constant into a register if the constant could be folded 2002 // into an immediate field. 2003 if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset, 2004 Base.getNumRegs() > 1, 2005 LU.Kind, LU.AccessTy, TLI, SE)) 2006 continue; 2007 2008 // Collect all operands except *J. 2009 SmallVector<const SCEV *, 8> InnerAddOps; 2010 for (SmallVectorImpl<const SCEV *>::const_iterator K = AddOps.begin(), 2011 KE = AddOps.end(); K != KE; ++K) 2012 if (K != J) 2013 InnerAddOps.push_back(*K); 2014 2015 // Don't leave just a constant behind in a register if the constant could 2016 // be folded into an immediate field. 2017 if (InnerAddOps.size() == 1 && 2018 isAlwaysFoldable(InnerAddOps[0], LU.MinOffset, LU.MaxOffset, 2019 Base.getNumRegs() > 1, 2020 LU.Kind, LU.AccessTy, TLI, SE)) 2021 continue; 2022 2023 Formula F = Base; 2024 F.BaseRegs[i] = SE.getAddExpr(InnerAddOps); 2025 F.BaseRegs.push_back(*J); 2026 if (InsertFormula(LU, LUIdx, F)) 2027 // If that formula hadn't been seen before, recurse to find more like 2028 // it. 2029 GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth+1); 2030 } 2031 } 2032} 2033 2034/// GenerateCombinations - Generate a formula consisting of all of the 2035/// loop-dominating registers added into a single register. 2036void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, 2037 Formula Base) { 2038 // This method is only interesting on a plurality of registers. 2039 if (Base.BaseRegs.size() <= 1) return; 2040 2041 Formula F = Base; 2042 F.BaseRegs.clear(); 2043 SmallVector<const SCEV *, 4> Ops; 2044 for (SmallVectorImpl<const SCEV *>::const_iterator 2045 I = Base.BaseRegs.begin(), E = Base.BaseRegs.end(); I != E; ++I) { 2046 const SCEV *BaseReg = *I; 2047 if (BaseReg->properlyDominates(L->getHeader(), &DT) && 2048 !BaseReg->hasComputableLoopEvolution(L)) 2049 Ops.push_back(BaseReg); 2050 else 2051 F.BaseRegs.push_back(BaseReg); 2052 } 2053 if (Ops.size() > 1) { 2054 const SCEV *Sum = SE.getAddExpr(Ops); 2055 // TODO: If Sum is zero, it probably means ScalarEvolution missed an 2056 // opportunity to fold something. For now, just ignore such cases 2057 // rather than proceed with zero in a register. 2058 if (!Sum->isZero()) { 2059 F.BaseRegs.push_back(Sum); 2060 (void)InsertFormula(LU, LUIdx, F); 2061 } 2062 } 2063} 2064 2065/// GenerateSymbolicOffsets - Generate reuse formulae using symbolic offsets. 2066void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, 2067 Formula Base) { 2068 // We can't add a symbolic offset if the address already contains one. 2069 if (Base.AM.BaseGV) return; 2070 2071 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 2072 const SCEV *G = Base.BaseRegs[i]; 2073 GlobalValue *GV = ExtractSymbol(G, SE); 2074 if (G->isZero() || !GV) 2075 continue; 2076 Formula F = Base; 2077 F.AM.BaseGV = GV; 2078 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, 2079 LU.Kind, LU.AccessTy, TLI)) 2080 continue; 2081 F.BaseRegs[i] = G; 2082 (void)InsertFormula(LU, LUIdx, F); 2083 } 2084} 2085 2086/// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets. 2087void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, 2088 Formula Base) { 2089 // TODO: For now, just add the min and max offset, because it usually isn't 2090 // worthwhile looking at everything inbetween. 2091 SmallVector<int64_t, 4> Worklist; 2092 Worklist.push_back(LU.MinOffset); 2093 if (LU.MaxOffset != LU.MinOffset) 2094 Worklist.push_back(LU.MaxOffset); 2095 2096 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 2097 const SCEV *G = Base.BaseRegs[i]; 2098 2099 for (SmallVectorImpl<int64_t>::const_iterator I = Worklist.begin(), 2100 E = Worklist.end(); I != E; ++I) { 2101 Formula F = Base; 2102 F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I; 2103 if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I, 2104 LU.Kind, LU.AccessTy, TLI)) { 2105 F.BaseRegs[i] = SE.getAddExpr(G, SE.getIntegerSCEV(*I, G->getType())); 2106 2107 (void)InsertFormula(LU, LUIdx, F); 2108 } 2109 } 2110 2111 int64_t Imm = ExtractImmediate(G, SE); 2112 if (G->isZero() || Imm == 0) 2113 continue; 2114 Formula F = Base; 2115 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Imm; 2116 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, 2117 LU.Kind, LU.AccessTy, TLI)) 2118 continue; 2119 F.BaseRegs[i] = G; 2120 (void)InsertFormula(LU, LUIdx, F); 2121 } 2122} 2123 2124/// GenerateICmpZeroScales - For ICmpZero, check to see if we can scale up 2125/// the comparison. For example, x == y -> x*c == y*c. 2126void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, 2127 Formula Base) { 2128 if (LU.Kind != LSRUse::ICmpZero) return; 2129 2130 // Determine the integer type for the base formula. 2131 const Type *IntTy = Base.getType(); 2132 if (!IntTy) return; 2133 if (SE.getTypeSizeInBits(IntTy) > 64) return; 2134 2135 // Don't do this if there is more than one offset. 2136 if (LU.MinOffset != LU.MaxOffset) return; 2137 2138 assert(!Base.AM.BaseGV && "ICmpZero use is not legal!"); 2139 2140 // Check each interesting stride. 2141 for (SmallSetVector<int64_t, 8>::const_iterator 2142 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 2143 int64_t Factor = *I; 2144 Formula F = Base; 2145 2146 // Check that the multiplication doesn't overflow. 2147 if (F.AM.BaseOffs == INT64_MIN && Factor == -1) 2148 continue; 2149 F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs * Factor; 2150 if (F.AM.BaseOffs / Factor != Base.AM.BaseOffs) 2151 continue; 2152 2153 // Check that multiplying with the use offset doesn't overflow. 2154 int64_t Offset = LU.MinOffset; 2155 if (Offset == INT64_MIN && Factor == -1) 2156 continue; 2157 Offset = (uint64_t)Offset * Factor; 2158 if (Offset / Factor != LU.MinOffset) 2159 continue; 2160 2161 // Check that this scale is legal. 2162 if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, TLI)) 2163 continue; 2164 2165 // Compensate for the use having MinOffset built into it. 2166 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Offset - LU.MinOffset; 2167 2168 const SCEV *FactorS = SE.getIntegerSCEV(Factor, IntTy); 2169 2170 // Check that multiplying with each base register doesn't overflow. 2171 for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) { 2172 F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS); 2173 if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i]) 2174 goto next; 2175 } 2176 2177 // Check that multiplying with the scaled register doesn't overflow. 2178 if (F.ScaledReg) { 2179 F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS); 2180 if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg) 2181 continue; 2182 } 2183 2184 // If we make it here and it's legal, add it. 2185 (void)InsertFormula(LU, LUIdx, F); 2186 next:; 2187 } 2188} 2189 2190/// GenerateScales - Generate stride factor reuse formulae by making use of 2191/// scaled-offset address modes, for example. 2192void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, 2193 Formula Base) { 2194 // Determine the integer type for the base formula. 2195 const Type *IntTy = Base.getType(); 2196 if (!IntTy) return; 2197 2198 // If this Formula already has a scaled register, we can't add another one. 2199 if (Base.AM.Scale != 0) return; 2200 2201 // Check each interesting stride. 2202 for (SmallSetVector<int64_t, 8>::const_iterator 2203 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 2204 int64_t Factor = *I; 2205 2206 Base.AM.Scale = Factor; 2207 Base.AM.HasBaseReg = Base.BaseRegs.size() > 1; 2208 // Check whether this scale is going to be legal. 2209 if (!isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, 2210 LU.Kind, LU.AccessTy, TLI)) { 2211 // As a special-case, handle special out-of-loop Basic users specially. 2212 // TODO: Reconsider this special case. 2213 if (LU.Kind == LSRUse::Basic && 2214 isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, 2215 LSRUse::Special, LU.AccessTy, TLI) && 2216 LU.AllFixupsOutsideLoop) 2217 LU.Kind = LSRUse::Special; 2218 else 2219 continue; 2220 } 2221 // For an ICmpZero, negating a solitary base register won't lead to 2222 // new solutions. 2223 if (LU.Kind == LSRUse::ICmpZero && 2224 !Base.AM.HasBaseReg && Base.AM.BaseOffs == 0 && !Base.AM.BaseGV) 2225 continue; 2226 // For each addrec base reg, apply the scale, if possible. 2227 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 2228 if (const SCEVAddRecExpr *AR = 2229 dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i])) { 2230 const SCEV *FactorS = SE.getIntegerSCEV(Factor, IntTy); 2231 if (FactorS->isZero()) 2232 continue; 2233 // Divide out the factor, ignoring high bits, since we'll be 2234 // scaling the value back up in the end. 2235 if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) { 2236 // TODO: This could be optimized to avoid all the copying. 2237 Formula F = Base; 2238 F.ScaledReg = Quotient; 2239 std::swap(F.BaseRegs[i], F.BaseRegs.back()); 2240 F.BaseRegs.pop_back(); 2241 (void)InsertFormula(LU, LUIdx, F); 2242 } 2243 } 2244 } 2245} 2246 2247/// GenerateTruncates - Generate reuse formulae from different IV types. 2248void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, 2249 Formula Base) { 2250 // This requires TargetLowering to tell us which truncates are free. 2251 if (!TLI) return; 2252 2253 // Don't bother truncating symbolic values. 2254 if (Base.AM.BaseGV) return; 2255 2256 // Determine the integer type for the base formula. 2257 const Type *DstTy = Base.getType(); 2258 if (!DstTy) return; 2259 DstTy = SE.getEffectiveSCEVType(DstTy); 2260 2261 for (SmallSetVector<const Type *, 4>::const_iterator 2262 I = Types.begin(), E = Types.end(); I != E; ++I) { 2263 const Type *SrcTy = *I; 2264 if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) { 2265 Formula F = Base; 2266 2267 if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I); 2268 for (SmallVectorImpl<const SCEV *>::iterator J = F.BaseRegs.begin(), 2269 JE = F.BaseRegs.end(); J != JE; ++J) 2270 *J = SE.getAnyExtendExpr(*J, SrcTy); 2271 2272 // TODO: This assumes we've done basic processing on all uses and 2273 // have an idea what the register usage is. 2274 if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses)) 2275 continue; 2276 2277 (void)InsertFormula(LU, LUIdx, F); 2278 } 2279 } 2280} 2281 2282namespace { 2283 2284/// WorkItem - Helper class for GenerateCrossUseConstantOffsets. It's used to 2285/// defer modifications so that the search phase doesn't have to worry about 2286/// the data structures moving underneath it. 2287struct WorkItem { 2288 size_t LUIdx; 2289 int64_t Imm; 2290 const SCEV *OrigReg; 2291 2292 WorkItem(size_t LI, int64_t I, const SCEV *R) 2293 : LUIdx(LI), Imm(I), OrigReg(R) {} 2294 2295 void print(raw_ostream &OS) const; 2296 void dump() const; 2297}; 2298 2299} 2300 2301void WorkItem::print(raw_ostream &OS) const { 2302 OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx 2303 << " , add offset " << Imm; 2304} 2305 2306void WorkItem::dump() const { 2307 print(errs()); errs() << '\n'; 2308} 2309 2310/// GenerateCrossUseConstantOffsets - Look for registers which are a constant 2311/// distance apart and try to form reuse opportunities between them. 2312void LSRInstance::GenerateCrossUseConstantOffsets() { 2313 // Group the registers by their value without any added constant offset. 2314 typedef std::map<int64_t, const SCEV *> ImmMapTy; 2315 typedef DenseMap<const SCEV *, ImmMapTy> RegMapTy; 2316 RegMapTy Map; 2317 DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap; 2318 SmallVector<const SCEV *, 8> Sequence; 2319 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 2320 I != E; ++I) { 2321 const SCEV *Reg = *I; 2322 int64_t Imm = ExtractImmediate(Reg, SE); 2323 std::pair<RegMapTy::iterator, bool> Pair = 2324 Map.insert(std::make_pair(Reg, ImmMapTy())); 2325 if (Pair.second) 2326 Sequence.push_back(Reg); 2327 Pair.first->second.insert(std::make_pair(Imm, *I)); 2328 UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(*I); 2329 } 2330 2331 // Now examine each set of registers with the same base value. Build up 2332 // a list of work to do and do the work in a separate step so that we're 2333 // not adding formulae and register counts while we're searching. 2334 SmallVector<WorkItem, 32> WorkItems; 2335 SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems; 2336 for (SmallVectorImpl<const SCEV *>::const_iterator I = Sequence.begin(), 2337 E = Sequence.end(); I != E; ++I) { 2338 const SCEV *Reg = *I; 2339 const ImmMapTy &Imms = Map.find(Reg)->second; 2340 2341 // It's not worthwhile looking for reuse if there's only one offset. 2342 if (Imms.size() == 1) 2343 continue; 2344 2345 DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':'; 2346 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 2347 J != JE; ++J) 2348 dbgs() << ' ' << J->first; 2349 dbgs() << '\n'); 2350 2351 // Examine each offset. 2352 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 2353 J != JE; ++J) { 2354 const SCEV *OrigReg = J->second; 2355 2356 int64_t JImm = J->first; 2357 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg); 2358 2359 if (!isa<SCEVConstant>(OrigReg) && 2360 UsedByIndicesMap[Reg].count() == 1) { 2361 DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg << '\n'); 2362 continue; 2363 } 2364 2365 // Conservatively examine offsets between this orig reg a few selected 2366 // other orig regs. 2367 ImmMapTy::const_iterator OtherImms[] = { 2368 Imms.begin(), prior(Imms.end()), 2369 Imms.upper_bound((Imms.begin()->first + prior(Imms.end())->first) / 2) 2370 }; 2371 for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { 2372 ImmMapTy::const_iterator M = OtherImms[i]; 2373 if (M == J || M == JE) continue; 2374 2375 // Compute the difference between the two. 2376 int64_t Imm = (uint64_t)JImm - M->first; 2377 for (int LUIdx = UsedByIndices.find_first(); LUIdx != -1; 2378 LUIdx = UsedByIndices.find_next(LUIdx)) 2379 // Make a memo of this use, offset, and register tuple. 2380 if (UniqueItems.insert(std::make_pair(LUIdx, Imm))) 2381 WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg)); 2382 } 2383 } 2384 } 2385 2386 Map.clear(); 2387 Sequence.clear(); 2388 UsedByIndicesMap.clear(); 2389 UniqueItems.clear(); 2390 2391 // Now iterate through the worklist and add new formulae. 2392 for (SmallVectorImpl<WorkItem>::const_iterator I = WorkItems.begin(), 2393 E = WorkItems.end(); I != E; ++I) { 2394 const WorkItem &WI = *I; 2395 size_t LUIdx = WI.LUIdx; 2396 LSRUse &LU = Uses[LUIdx]; 2397 int64_t Imm = WI.Imm; 2398 const SCEV *OrigReg = WI.OrigReg; 2399 2400 const Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType()); 2401 const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm)); 2402 unsigned BitWidth = SE.getTypeSizeInBits(IntTy); 2403 2404 // TODO: Use a more targeted data structure. 2405 for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) { 2406 Formula F = LU.Formulae[L]; 2407 // Use the immediate in the scaled register. 2408 if (F.ScaledReg == OrigReg) { 2409 int64_t Offs = (uint64_t)F.AM.BaseOffs + 2410 Imm * (uint64_t)F.AM.Scale; 2411 // Don't create 50 + reg(-50). 2412 if (F.referencesReg(SE.getSCEV( 2413 ConstantInt::get(IntTy, -(uint64_t)Offs)))) 2414 continue; 2415 Formula NewF = F; 2416 NewF.AM.BaseOffs = Offs; 2417 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, 2418 LU.Kind, LU.AccessTy, TLI)) 2419 continue; 2420 NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg); 2421 2422 // If the new scale is a constant in a register, and adding the constant 2423 // value to the immediate would produce a value closer to zero than the 2424 // immediate itself, then the formula isn't worthwhile. 2425 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg)) 2426 if (C->getValue()->getValue().isNegative() != 2427 (NewF.AM.BaseOffs < 0) && 2428 (C->getValue()->getValue().abs() * APInt(BitWidth, F.AM.Scale)) 2429 .ule(APInt(BitWidth, NewF.AM.BaseOffs).abs())) 2430 continue; 2431 2432 // OK, looks good. 2433 (void)InsertFormula(LU, LUIdx, NewF); 2434 } else { 2435 // Use the immediate in a base register. 2436 for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) { 2437 const SCEV *BaseReg = F.BaseRegs[N]; 2438 if (BaseReg != OrigReg) 2439 continue; 2440 Formula NewF = F; 2441 NewF.AM.BaseOffs = (uint64_t)NewF.AM.BaseOffs + Imm; 2442 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, 2443 LU.Kind, LU.AccessTy, TLI)) 2444 continue; 2445 NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg); 2446 2447 // If the new formula has a constant in a register, and adding the 2448 // constant value to the immediate would produce a value closer to 2449 // zero than the immediate itself, then the formula isn't worthwhile. 2450 for (SmallVectorImpl<const SCEV *>::const_iterator 2451 J = NewF.BaseRegs.begin(), JE = NewF.BaseRegs.end(); 2452 J != JE; ++J) 2453 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*J)) 2454 if (C->getValue()->getValue().isNegative() != 2455 (NewF.AM.BaseOffs < 0) && 2456 C->getValue()->getValue().abs() 2457 .ule(APInt(BitWidth, NewF.AM.BaseOffs).abs())) 2458 goto skip_formula; 2459 2460 // Ok, looks good. 2461 (void)InsertFormula(LU, LUIdx, NewF); 2462 break; 2463 skip_formula:; 2464 } 2465 } 2466 } 2467 } 2468} 2469 2470/// GenerateAllReuseFormulae - Generate formulae for each use. 2471void 2472LSRInstance::GenerateAllReuseFormulae() { 2473 // This is split into multiple loops so that hasRegsUsedByUsesOtherThan 2474 // queries are more precise. 2475 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2476 LSRUse &LU = Uses[LUIdx]; 2477 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2478 GenerateReassociations(LU, LUIdx, LU.Formulae[i]); 2479 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2480 GenerateCombinations(LU, LUIdx, LU.Formulae[i]); 2481 } 2482 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2483 LSRUse &LU = Uses[LUIdx]; 2484 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2485 GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]); 2486 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2487 GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]); 2488 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2489 GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]); 2490 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2491 GenerateScales(LU, LUIdx, LU.Formulae[i]); 2492 } 2493 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2494 LSRUse &LU = Uses[LUIdx]; 2495 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2496 GenerateTruncates(LU, LUIdx, LU.Formulae[i]); 2497 } 2498 2499 GenerateCrossUseConstantOffsets(); 2500} 2501 2502/// If their are multiple formulae with the same set of registers used 2503/// by other uses, pick the best one and delete the others. 2504void LSRInstance::FilterOutUndesirableDedicatedRegisters() { 2505#ifndef NDEBUG 2506 bool Changed = false; 2507#endif 2508 2509 // Collect the best formula for each unique set of shared registers. This 2510 // is reset for each use. 2511 typedef DenseMap<SmallVector<const SCEV *, 2>, size_t, UniquifierDenseMapInfo> 2512 BestFormulaeTy; 2513 BestFormulaeTy BestFormulae; 2514 2515 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2516 LSRUse &LU = Uses[LUIdx]; 2517 FormulaSorter Sorter(L, LU, SE, DT); 2518 2519 // Clear out the set of used regs; it will be recomputed. 2520 LU.Regs.clear(); 2521 2522 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); 2523 FIdx != NumForms; ++FIdx) { 2524 Formula &F = LU.Formulae[FIdx]; 2525 2526 SmallVector<const SCEV *, 2> Key; 2527 for (SmallVectorImpl<const SCEV *>::const_iterator J = F.BaseRegs.begin(), 2528 JE = F.BaseRegs.end(); J != JE; ++J) { 2529 const SCEV *Reg = *J; 2530 if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) 2531 Key.push_back(Reg); 2532 } 2533 if (F.ScaledReg && 2534 RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) 2535 Key.push_back(F.ScaledReg); 2536 // Unstable sort by host order ok, because this is only used for 2537 // uniquifying. 2538 std::sort(Key.begin(), Key.end()); 2539 2540 std::pair<BestFormulaeTy::const_iterator, bool> P = 2541 BestFormulae.insert(std::make_pair(Key, FIdx)); 2542 if (!P.second) { 2543 Formula &Best = LU.Formulae[P.first->second]; 2544 if (Sorter.operator()(F, Best)) 2545 std::swap(F, Best); 2546 DEBUG(dbgs() << "Filtering out "; F.print(dbgs()); 2547 dbgs() << "\n" 2548 " in favor of "; Best.print(dbgs()); 2549 dbgs() << '\n'); 2550#ifndef NDEBUG 2551 Changed = true; 2552#endif 2553 std::swap(F, LU.Formulae.back()); 2554 LU.Formulae.pop_back(); 2555 --FIdx; 2556 --NumForms; 2557 continue; 2558 } 2559 if (F.ScaledReg) LU.Regs.insert(F.ScaledReg); 2560 LU.Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 2561 } 2562 BestFormulae.clear(); 2563 } 2564 2565 DEBUG(if (Changed) { 2566 dbgs() << "\n" 2567 "After filtering out undesirable candidates:\n"; 2568 print_uses(dbgs()); 2569 }); 2570} 2571 2572/// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of 2573/// formulae to choose from, use some rough heuristics to prune down the number 2574/// of formulae. This keeps the main solver from taking an extraordinary amount 2575/// of time in some worst-case scenarios. 2576void LSRInstance::NarrowSearchSpaceUsingHeuristics() { 2577 // This is a rough guess that seems to work fairly well. 2578 const size_t Limit = UINT16_MAX; 2579 2580 SmallPtrSet<const SCEV *, 4> Taken; 2581 for (;;) { 2582 // Estimate the worst-case number of solutions we might consider. We almost 2583 // never consider this many solutions because we prune the search space, 2584 // but the pruning isn't always sufficient. 2585 uint32_t Power = 1; 2586 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 2587 E = Uses.end(); I != E; ++I) { 2588 size_t FSize = I->Formulae.size(); 2589 if (FSize >= Limit) { 2590 Power = Limit; 2591 break; 2592 } 2593 Power *= FSize; 2594 if (Power >= Limit) 2595 break; 2596 } 2597 if (Power < Limit) 2598 break; 2599 2600 // Ok, we have too many of formulae on our hands to conveniently handle. 2601 // Use a rough heuristic to thin out the list. 2602 2603 // Pick the register which is used by the most LSRUses, which is likely 2604 // to be a good reuse register candidate. 2605 const SCEV *Best = 0; 2606 unsigned BestNum = 0; 2607 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 2608 I != E; ++I) { 2609 const SCEV *Reg = *I; 2610 if (Taken.count(Reg)) 2611 continue; 2612 if (!Best) 2613 Best = Reg; 2614 else { 2615 unsigned Count = RegUses.getUsedByIndices(Reg).count(); 2616 if (Count > BestNum) { 2617 Best = Reg; 2618 BestNum = Count; 2619 } 2620 } 2621 } 2622 2623 DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best 2624 << " will yield profitable reuse.\n"); 2625 Taken.insert(Best); 2626 2627 // In any use with formulae which references this register, delete formulae 2628 // which don't reference it. 2629 for (SmallVectorImpl<LSRUse>::iterator I = Uses.begin(), 2630 E = Uses.end(); I != E; ++I) { 2631 LSRUse &LU = *I; 2632 if (!LU.Regs.count(Best)) continue; 2633 2634 // Clear out the set of used regs; it will be recomputed. 2635 LU.Regs.clear(); 2636 2637 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 2638 Formula &F = LU.Formulae[i]; 2639 if (!F.referencesReg(Best)) { 2640 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 2641 std::swap(LU.Formulae.back(), F); 2642 LU.Formulae.pop_back(); 2643 --e; 2644 --i; 2645 continue; 2646 } 2647 2648 if (F.ScaledReg) LU.Regs.insert(F.ScaledReg); 2649 LU.Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 2650 } 2651 } 2652 2653 DEBUG(dbgs() << "After pre-selection:\n"; 2654 print_uses(dbgs())); 2655 } 2656} 2657 2658/// SolveRecurse - This is the recursive solver. 2659void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 2660 Cost &SolutionCost, 2661 SmallVectorImpl<const Formula *> &Workspace, 2662 const Cost &CurCost, 2663 const SmallPtrSet<const SCEV *, 16> &CurRegs, 2664 DenseSet<const SCEV *> &VisitedRegs) const { 2665 // Some ideas: 2666 // - prune more: 2667 // - use more aggressive filtering 2668 // - sort the formula so that the most profitable solutions are found first 2669 // - sort the uses too 2670 // - search faster: 2671 // - don't compute a cost, and then compare. compare while computing a cost 2672 // and bail early. 2673 // - track register sets with SmallBitVector 2674 2675 const LSRUse &LU = Uses[Workspace.size()]; 2676 2677 // If this use references any register that's already a part of the 2678 // in-progress solution, consider it a requirement that a formula must 2679 // reference that register in order to be considered. This prunes out 2680 // unprofitable searching. 2681 SmallSetVector<const SCEV *, 4> ReqRegs; 2682 for (SmallPtrSet<const SCEV *, 16>::const_iterator I = CurRegs.begin(), 2683 E = CurRegs.end(); I != E; ++I) 2684 if (LU.Regs.count(*I)) 2685 ReqRegs.insert(*I); 2686 2687 bool AnySatisfiedReqRegs = false; 2688 SmallPtrSet<const SCEV *, 16> NewRegs; 2689 Cost NewCost; 2690retry: 2691 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 2692 E = LU.Formulae.end(); I != E; ++I) { 2693 const Formula &F = *I; 2694 2695 // Ignore formulae which do not use any of the required registers. 2696 for (SmallSetVector<const SCEV *, 4>::const_iterator J = ReqRegs.begin(), 2697 JE = ReqRegs.end(); J != JE; ++J) { 2698 const SCEV *Reg = *J; 2699 if ((!F.ScaledReg || F.ScaledReg != Reg) && 2700 std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) == 2701 F.BaseRegs.end()) 2702 goto skip; 2703 } 2704 AnySatisfiedReqRegs = true; 2705 2706 // Evaluate the cost of the current formula. If it's already worse than 2707 // the current best, prune the search at that point. 2708 NewCost = CurCost; 2709 NewRegs = CurRegs; 2710 NewCost.RateFormula(F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT); 2711 if (NewCost < SolutionCost) { 2712 Workspace.push_back(&F); 2713 if (Workspace.size() != Uses.size()) { 2714 SolveRecurse(Solution, SolutionCost, Workspace, NewCost, 2715 NewRegs, VisitedRegs); 2716 if (F.getNumRegs() == 1 && Workspace.size() == 1) 2717 VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]); 2718 } else { 2719 DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); 2720 dbgs() << ". Regs:"; 2721 for (SmallPtrSet<const SCEV *, 16>::const_iterator 2722 I = NewRegs.begin(), E = NewRegs.end(); I != E; ++I) 2723 dbgs() << ' ' << **I; 2724 dbgs() << '\n'); 2725 2726 SolutionCost = NewCost; 2727 Solution = Workspace; 2728 } 2729 Workspace.pop_back(); 2730 } 2731 skip:; 2732 } 2733 2734 // If none of the formulae had all of the required registers, relax the 2735 // constraint so that we don't exclude all formulae. 2736 if (!AnySatisfiedReqRegs) { 2737 ReqRegs.clear(); 2738 goto retry; 2739 } 2740} 2741 2742void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const { 2743 SmallVector<const Formula *, 8> Workspace; 2744 Cost SolutionCost; 2745 SolutionCost.Loose(); 2746 Cost CurCost; 2747 SmallPtrSet<const SCEV *, 16> CurRegs; 2748 DenseSet<const SCEV *> VisitedRegs; 2749 Workspace.reserve(Uses.size()); 2750 2751 SolveRecurse(Solution, SolutionCost, Workspace, CurCost, 2752 CurRegs, VisitedRegs); 2753 2754 // Ok, we've now made all our decisions. 2755 DEBUG(dbgs() << "\n" 2756 "The chosen solution requires "; SolutionCost.print(dbgs()); 2757 dbgs() << ":\n"; 2758 for (size_t i = 0, e = Uses.size(); i != e; ++i) { 2759 dbgs() << " "; 2760 Uses[i].print(dbgs()); 2761 dbgs() << "\n" 2762 " "; 2763 Solution[i]->print(dbgs()); 2764 dbgs() << '\n'; 2765 }); 2766} 2767 2768/// getImmediateDominator - A handy utility for the specific DominatorTree 2769/// query that we need here. 2770/// 2771static BasicBlock *getImmediateDominator(BasicBlock *BB, DominatorTree &DT) { 2772 DomTreeNode *Node = DT.getNode(BB); 2773 if (!Node) return 0; 2774 Node = Node->getIDom(); 2775 if (!Node) return 0; 2776 return Node->getBlock(); 2777} 2778 2779Value *LSRInstance::Expand(const LSRFixup &LF, 2780 const Formula &F, 2781 BasicBlock::iterator IP, 2782 SCEVExpander &Rewriter, 2783 SmallVectorImpl<WeakVH> &DeadInsts) const { 2784 const LSRUse &LU = Uses[LF.LUIdx]; 2785 2786 // Then, collect some instructions which we will remain dominated by when 2787 // expanding the replacement. These must be dominated by any operands that 2788 // will be required in the expansion. 2789 SmallVector<Instruction *, 4> Inputs; 2790 if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace)) 2791 Inputs.push_back(I); 2792 if (LU.Kind == LSRUse::ICmpZero) 2793 if (Instruction *I = 2794 dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1))) 2795 Inputs.push_back(I); 2796 if (LF.PostIncLoop) { 2797 if (!L->contains(LF.UserInst)) 2798 Inputs.push_back(L->getLoopLatch()->getTerminator()); 2799 else 2800 Inputs.push_back(IVIncInsertPos); 2801 } 2802 2803 // Then, climb up the immediate dominator tree as far as we can go while 2804 // still being dominated by the input positions. 2805 for (;;) { 2806 bool AllDominate = true; 2807 Instruction *BetterPos = 0; 2808 BasicBlock *IDom = getImmediateDominator(IP->getParent(), DT); 2809 if (!IDom) break; 2810 Instruction *Tentative = IDom->getTerminator(); 2811 for (SmallVectorImpl<Instruction *>::const_iterator I = Inputs.begin(), 2812 E = Inputs.end(); I != E; ++I) { 2813 Instruction *Inst = *I; 2814 if (Inst == Tentative || !DT.dominates(Inst, Tentative)) { 2815 AllDominate = false; 2816 break; 2817 } 2818 if (IDom == Inst->getParent() && 2819 (!BetterPos || DT.dominates(BetterPos, Inst))) 2820 BetterPos = next(BasicBlock::iterator(Inst)); 2821 } 2822 if (!AllDominate) 2823 break; 2824 if (BetterPos) 2825 IP = BetterPos; 2826 else 2827 IP = Tentative; 2828 } 2829 while (isa<PHINode>(IP)) ++IP; 2830 while (isa<DbgInfoIntrinsic>(IP)) ++IP; 2831 2832 // Inform the Rewriter if we have a post-increment use, so that it can 2833 // perform an advantageous expansion. 2834 Rewriter.setPostInc(LF.PostIncLoop); 2835 2836 // This is the type that the user actually needs. 2837 const Type *OpTy = LF.OperandValToReplace->getType(); 2838 // This will be the type that we'll initially expand to. 2839 const Type *Ty = F.getType(); 2840 if (!Ty) 2841 // No type known; just expand directly to the ultimate type. 2842 Ty = OpTy; 2843 else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy)) 2844 // Expand directly to the ultimate type if it's the right size. 2845 Ty = OpTy; 2846 // This is the type to do integer arithmetic in. 2847 const Type *IntTy = SE.getEffectiveSCEVType(Ty); 2848 2849 // Build up a list of operands to add together to form the full base. 2850 SmallVector<const SCEV *, 8> Ops; 2851 2852 // Expand the BaseRegs portion. 2853 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 2854 E = F.BaseRegs.end(); I != E; ++I) { 2855 const SCEV *Reg = *I; 2856 assert(!Reg->isZero() && "Zero allocated in a base register!"); 2857 2858 // If we're expanding for a post-inc user for the add-rec's loop, make the 2859 // post-inc adjustment. 2860 const SCEV *Start = Reg; 2861 while (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Start)) { 2862 if (AR->getLoop() == LF.PostIncLoop) { 2863 Reg = SE.getAddExpr(Reg, AR->getStepRecurrence(SE)); 2864 // If the user is inside the loop, insert the code after the increment 2865 // so that it is dominated by its operand. If the original insert point 2866 // was already dominated by the increment, keep it, because there may 2867 // be loop-variant operands that need to be respected also. 2868 if (L->contains(LF.UserInst) && !DT.dominates(IVIncInsertPos, IP)) { 2869 IP = IVIncInsertPos; 2870 while (isa<DbgInfoIntrinsic>(IP)) ++IP; 2871 } 2872 break; 2873 } 2874 Start = AR->getStart(); 2875 } 2876 2877 Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP))); 2878 } 2879 2880 // Flush the operand list to suppress SCEVExpander hoisting. 2881 if (!Ops.empty()) { 2882 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 2883 Ops.clear(); 2884 Ops.push_back(SE.getUnknown(FullV)); 2885 } 2886 2887 // Expand the ScaledReg portion. 2888 Value *ICmpScaledV = 0; 2889 if (F.AM.Scale != 0) { 2890 const SCEV *ScaledS = F.ScaledReg; 2891 2892 // If we're expanding for a post-inc user for the add-rec's loop, make the 2893 // post-inc adjustment. 2894 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ScaledS)) 2895 if (AR->getLoop() == LF.PostIncLoop) 2896 ScaledS = SE.getAddExpr(ScaledS, AR->getStepRecurrence(SE)); 2897 2898 if (LU.Kind == LSRUse::ICmpZero) { 2899 // An interesting way of "folding" with an icmp is to use a negated 2900 // scale, which we'll implement by inserting it into the other operand 2901 // of the icmp. 2902 assert(F.AM.Scale == -1 && 2903 "The only scale supported by ICmpZero uses is -1!"); 2904 ICmpScaledV = Rewriter.expandCodeFor(ScaledS, 0, IP); 2905 } else { 2906 // Otherwise just expand the scaled register and an explicit scale, 2907 // which is expected to be matched as part of the address. 2908 ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP)); 2909 ScaledS = SE.getMulExpr(ScaledS, 2910 SE.getIntegerSCEV(F.AM.Scale, 2911 ScaledS->getType())); 2912 Ops.push_back(ScaledS); 2913 2914 // Flush the operand list to suppress SCEVExpander hoisting. 2915 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 2916 Ops.clear(); 2917 Ops.push_back(SE.getUnknown(FullV)); 2918 } 2919 } 2920 2921 // Expand the GV portion. 2922 if (F.AM.BaseGV) { 2923 Ops.push_back(SE.getUnknown(F.AM.BaseGV)); 2924 2925 // Flush the operand list to suppress SCEVExpander hoisting. 2926 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 2927 Ops.clear(); 2928 Ops.push_back(SE.getUnknown(FullV)); 2929 } 2930 2931 // Expand the immediate portion. 2932 int64_t Offset = (uint64_t)F.AM.BaseOffs + LF.Offset; 2933 if (Offset != 0) { 2934 if (LU.Kind == LSRUse::ICmpZero) { 2935 // The other interesting way of "folding" with an ICmpZero is to use a 2936 // negated immediate. 2937 if (!ICmpScaledV) 2938 ICmpScaledV = ConstantInt::get(IntTy, -Offset); 2939 else { 2940 Ops.push_back(SE.getUnknown(ICmpScaledV)); 2941 ICmpScaledV = ConstantInt::get(IntTy, Offset); 2942 } 2943 } else { 2944 // Just add the immediate values. These again are expected to be matched 2945 // as part of the address. 2946 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset))); 2947 } 2948 } 2949 2950 // Emit instructions summing all the operands. 2951 const SCEV *FullS = Ops.empty() ? 2952 SE.getIntegerSCEV(0, IntTy) : 2953 SE.getAddExpr(Ops); 2954 Value *FullV = Rewriter.expandCodeFor(FullS, Ty, IP); 2955 2956 // We're done expanding now, so reset the rewriter. 2957 Rewriter.setPostInc(0); 2958 2959 // An ICmpZero Formula represents an ICmp which we're handling as a 2960 // comparison against zero. Now that we've expanded an expression for that 2961 // form, update the ICmp's other operand. 2962 if (LU.Kind == LSRUse::ICmpZero) { 2963 ICmpInst *CI = cast<ICmpInst>(LF.UserInst); 2964 DeadInsts.push_back(CI->getOperand(1)); 2965 assert(!F.AM.BaseGV && "ICmp does not support folding a global value and " 2966 "a scale at the same time!"); 2967 if (F.AM.Scale == -1) { 2968 if (ICmpScaledV->getType() != OpTy) { 2969 Instruction *Cast = 2970 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false, 2971 OpTy, false), 2972 ICmpScaledV, OpTy, "tmp", CI); 2973 ICmpScaledV = Cast; 2974 } 2975 CI->setOperand(1, ICmpScaledV); 2976 } else { 2977 assert(F.AM.Scale == 0 && 2978 "ICmp does not support folding a global value and " 2979 "a scale at the same time!"); 2980 Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy), 2981 -(uint64_t)Offset); 2982 if (C->getType() != OpTy) 2983 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 2984 OpTy, false), 2985 C, OpTy); 2986 2987 CI->setOperand(1, C); 2988 } 2989 } 2990 2991 return FullV; 2992} 2993 2994/// RewriteForPHI - Helper for Rewrite. PHI nodes are special because the use 2995/// of their operands effectively happens in their predecessor blocks, so the 2996/// expression may need to be expanded in multiple places. 2997void LSRInstance::RewriteForPHI(PHINode *PN, 2998 const LSRFixup &LF, 2999 const Formula &F, 3000 SCEVExpander &Rewriter, 3001 SmallVectorImpl<WeakVH> &DeadInsts, 3002 Pass *P) const { 3003 DenseMap<BasicBlock *, Value *> Inserted; 3004 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 3005 if (PN->getIncomingValue(i) == LF.OperandValToReplace) { 3006 BasicBlock *BB = PN->getIncomingBlock(i); 3007 3008 // If this is a critical edge, split the edge so that we do not insert 3009 // the code on all predecessor/successor paths. We do this unless this 3010 // is the canonical backedge for this loop, which complicates post-inc 3011 // users. 3012 if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 && 3013 !isa<IndirectBrInst>(BB->getTerminator()) && 3014 (PN->getParent() != L->getHeader() || !L->contains(BB))) { 3015 // Split the critical edge. 3016 BasicBlock *NewBB = SplitCriticalEdge(BB, PN->getParent(), P); 3017 3018 // If PN is outside of the loop and BB is in the loop, we want to 3019 // move the block to be immediately before the PHI block, not 3020 // immediately after BB. 3021 if (L->contains(BB) && !L->contains(PN)) 3022 NewBB->moveBefore(PN->getParent()); 3023 3024 // Splitting the edge can reduce the number of PHI entries we have. 3025 e = PN->getNumIncomingValues(); 3026 BB = NewBB; 3027 i = PN->getBasicBlockIndex(BB); 3028 } 3029 3030 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair = 3031 Inserted.insert(std::make_pair(BB, static_cast<Value *>(0))); 3032 if (!Pair.second) 3033 PN->setIncomingValue(i, Pair.first->second); 3034 else { 3035 Value *FullV = Expand(LF, F, BB->getTerminator(), Rewriter, DeadInsts); 3036 3037 // If this is reuse-by-noop-cast, insert the noop cast. 3038 const Type *OpTy = LF.OperandValToReplace->getType(); 3039 if (FullV->getType() != OpTy) 3040 FullV = 3041 CastInst::Create(CastInst::getCastOpcode(FullV, false, 3042 OpTy, false), 3043 FullV, LF.OperandValToReplace->getType(), 3044 "tmp", BB->getTerminator()); 3045 3046 PN->setIncomingValue(i, FullV); 3047 Pair.first->second = FullV; 3048 } 3049 } 3050} 3051 3052/// Rewrite - Emit instructions for the leading candidate expression for this 3053/// LSRUse (this is called "expanding"), and update the UserInst to reference 3054/// the newly expanded value. 3055void LSRInstance::Rewrite(const LSRFixup &LF, 3056 const Formula &F, 3057 SCEVExpander &Rewriter, 3058 SmallVectorImpl<WeakVH> &DeadInsts, 3059 Pass *P) const { 3060 // First, find an insertion point that dominates UserInst. For PHI nodes, 3061 // find the nearest block which dominates all the relevant uses. 3062 if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) { 3063 RewriteForPHI(PN, LF, F, Rewriter, DeadInsts, P); 3064 } else { 3065 Value *FullV = Expand(LF, F, LF.UserInst, Rewriter, DeadInsts); 3066 3067 // If this is reuse-by-noop-cast, insert the noop cast. 3068 const Type *OpTy = LF.OperandValToReplace->getType(); 3069 if (FullV->getType() != OpTy) { 3070 Instruction *Cast = 3071 CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false), 3072 FullV, OpTy, "tmp", LF.UserInst); 3073 FullV = Cast; 3074 } 3075 3076 // Update the user. ICmpZero is handled specially here (for now) because 3077 // Expand may have updated one of the operands of the icmp already, and 3078 // its new value may happen to be equal to LF.OperandValToReplace, in 3079 // which case doing replaceUsesOfWith leads to replacing both operands 3080 // with the same value. TODO: Reorganize this. 3081 if (Uses[LF.LUIdx].Kind == LSRUse::ICmpZero) 3082 LF.UserInst->setOperand(0, FullV); 3083 else 3084 LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV); 3085 } 3086 3087 DeadInsts.push_back(LF.OperandValToReplace); 3088} 3089 3090void 3091LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 3092 Pass *P) { 3093 // Keep track of instructions we may have made dead, so that 3094 // we can remove them after we are done working. 3095 SmallVector<WeakVH, 16> DeadInsts; 3096 3097 SCEVExpander Rewriter(SE); 3098 Rewriter.disableCanonicalMode(); 3099 Rewriter.setIVIncInsertPos(L, IVIncInsertPos); 3100 3101 // Expand the new value definitions and update the users. 3102 for (size_t i = 0, e = Fixups.size(); i != e; ++i) { 3103 size_t LUIdx = Fixups[i].LUIdx; 3104 3105 Rewrite(Fixups[i], *Solution[LUIdx], Rewriter, DeadInsts, P); 3106 3107 Changed = true; 3108 } 3109 3110 // Clean up after ourselves. This must be done before deleting any 3111 // instructions. 3112 Rewriter.clear(); 3113 3114 Changed |= DeleteTriviallyDeadInstructions(DeadInsts); 3115} 3116 3117LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P) 3118 : IU(P->getAnalysis<IVUsers>()), 3119 SE(P->getAnalysis<ScalarEvolution>()), 3120 DT(P->getAnalysis<DominatorTree>()), 3121 TLI(tli), L(l), Changed(false), IVIncInsertPos(0) { 3122 3123 // If LoopSimplify form is not available, stay out of trouble. 3124 if (!L->isLoopSimplifyForm()) return; 3125 3126 // If there's no interesting work to be done, bail early. 3127 if (IU.empty()) return; 3128 3129 DEBUG(dbgs() << "\nLSR on loop "; 3130 WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false); 3131 dbgs() << ":\n"); 3132 3133 /// OptimizeShadowIV - If IV is used in a int-to-float cast 3134 /// inside the loop then try to eliminate the cast operation. 3135 OptimizeShadowIV(); 3136 3137 // Change loop terminating condition to use the postinc iv when possible. 3138 Changed |= OptimizeLoopTermCond(); 3139 3140 CollectInterestingTypesAndFactors(); 3141 CollectFixupsAndInitialFormulae(); 3142 CollectLoopInvariantFixupsAndFormulae(); 3143 3144 DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n"; 3145 print_uses(dbgs())); 3146 3147 // Now use the reuse data to generate a bunch of interesting ways 3148 // to formulate the values needed for the uses. 3149 GenerateAllReuseFormulae(); 3150 3151 DEBUG(dbgs() << "\n" 3152 "After generating reuse formulae:\n"; 3153 print_uses(dbgs())); 3154 3155 FilterOutUndesirableDedicatedRegisters(); 3156 NarrowSearchSpaceUsingHeuristics(); 3157 3158 SmallVector<const Formula *, 8> Solution; 3159 Solve(Solution); 3160 assert(Solution.size() == Uses.size() && "Malformed solution!"); 3161 3162 // Release memory that is no longer needed. 3163 Factors.clear(); 3164 Types.clear(); 3165 RegUses.clear(); 3166 3167#ifndef NDEBUG 3168 // Formulae should be legal. 3169 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 3170 E = Uses.end(); I != E; ++I) { 3171 const LSRUse &LU = *I; 3172 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 3173 JE = LU.Formulae.end(); J != JE; ++J) 3174 assert(isLegalUse(J->AM, LU.MinOffset, LU.MaxOffset, 3175 LU.Kind, LU.AccessTy, TLI) && 3176 "Illegal formula generated!"); 3177 }; 3178#endif 3179 3180 // Now that we've decided what we want, make it so. 3181 ImplementSolution(Solution, P); 3182} 3183 3184void LSRInstance::print_factors_and_types(raw_ostream &OS) const { 3185 if (Factors.empty() && Types.empty()) return; 3186 3187 OS << "LSR has identified the following interesting factors and types: "; 3188 bool First = true; 3189 3190 for (SmallSetVector<int64_t, 8>::const_iterator 3191 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 3192 if (!First) OS << ", "; 3193 First = false; 3194 OS << '*' << *I; 3195 } 3196 3197 for (SmallSetVector<const Type *, 4>::const_iterator 3198 I = Types.begin(), E = Types.end(); I != E; ++I) { 3199 if (!First) OS << ", "; 3200 First = false; 3201 OS << '(' << **I << ')'; 3202 } 3203 OS << '\n'; 3204} 3205 3206void LSRInstance::print_fixups(raw_ostream &OS) const { 3207 OS << "LSR is examining the following fixup sites:\n"; 3208 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(), 3209 E = Fixups.end(); I != E; ++I) { 3210 const LSRFixup &LF = *I; 3211 dbgs() << " "; 3212 LF.print(OS); 3213 OS << '\n'; 3214 } 3215} 3216 3217void LSRInstance::print_uses(raw_ostream &OS) const { 3218 OS << "LSR is examining the following uses:\n"; 3219 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 3220 E = Uses.end(); I != E; ++I) { 3221 const LSRUse &LU = *I; 3222 dbgs() << " "; 3223 LU.print(OS); 3224 OS << '\n'; 3225 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 3226 JE = LU.Formulae.end(); J != JE; ++J) { 3227 OS << " "; 3228 J->print(OS); 3229 OS << '\n'; 3230 } 3231 } 3232} 3233 3234void LSRInstance::print(raw_ostream &OS) const { 3235 print_factors_and_types(OS); 3236 print_fixups(OS); 3237 print_uses(OS); 3238} 3239 3240void LSRInstance::dump() const { 3241 print(errs()); errs() << '\n'; 3242} 3243 3244namespace { 3245 3246class LoopStrengthReduce : public LoopPass { 3247 /// TLI - Keep a pointer of a TargetLowering to consult for determining 3248 /// transformation profitability. 3249 const TargetLowering *const TLI; 3250 3251public: 3252 static char ID; // Pass ID, replacement for typeid 3253 explicit LoopStrengthReduce(const TargetLowering *tli = 0); 3254 3255private: 3256 bool runOnLoop(Loop *L, LPPassManager &LPM); 3257 void getAnalysisUsage(AnalysisUsage &AU) const; 3258}; 3259 3260} 3261 3262char LoopStrengthReduce::ID = 0; 3263static RegisterPass<LoopStrengthReduce> 3264X("loop-reduce", "Loop Strength Reduction"); 3265 3266Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) { 3267 return new LoopStrengthReduce(TLI); 3268} 3269 3270LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli) 3271 : LoopPass(&ID), TLI(tli) {} 3272 3273void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { 3274 // We split critical edges, so we change the CFG. However, we do update 3275 // many analyses if they are around. 3276 AU.addPreservedID(LoopSimplifyID); 3277 AU.addPreserved<LoopInfo>(); 3278 AU.addPreserved("domfrontier"); 3279 3280 AU.addRequiredID(LoopSimplifyID); 3281 AU.addRequired<DominatorTree>(); 3282 AU.addPreserved<DominatorTree>(); 3283 AU.addRequired<ScalarEvolution>(); 3284 AU.addPreserved<ScalarEvolution>(); 3285 AU.addRequired<IVUsers>(); 3286 AU.addPreserved<IVUsers>(); 3287} 3288 3289bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { 3290 bool Changed = false; 3291 3292 // Run the main LSR transformation. 3293 Changed |= LSRInstance(TLI, L, this).getChanged(); 3294 3295 // At this point, it is worth checking to see if any recurrence PHIs are also 3296 // dead, so that we can remove them as well. 3297 Changed |= DeleteDeadPHIs(L->getHeader()); 3298 3299 return Changed; 3300} 3301