LoopStrengthReduce.cpp revision 210299
1//===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation analyzes and transforms the induction variables (and 11// computations derived from them) into forms suitable for efficient execution 12// on the target. 13// 14// This pass performs a strength reduction on array references inside loops that 15// have as one or more of their components the loop induction variable, it 16// rewrites expressions to take advantage of scaled-index addressing modes 17// available on the target, and it performs a variety of other optimizations 18// related to loop induction variables. 19// 20// Terminology note: this code has a lot of handling for "post-increment" or 21// "post-inc" users. This is not talking about post-increment addressing modes; 22// it is instead talking about code like this: 23// 24// %i = phi [ 0, %entry ], [ %i.next, %latch ] 25// ... 26// %i.next = add %i, 1 27// %c = icmp eq %i.next, %n 28// 29// The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however 30// it's useful to think about these as the same register, with some uses using 31// the value of the register before the add and some using // it after. In this 32// example, the icmp is a post-increment user, since it uses %i.next, which is 33// the value of the induction variable after the increment. The other common 34// case of post-increment users is users outside the loop. 35// 36// TODO: More sophistication in the way Formulae are generated and filtered. 37// 38// TODO: Handle multiple loops at a time. 39// 40// TODO: Should TargetLowering::AddrMode::BaseGV be changed to a ConstantExpr 41// instead of a GlobalValue? 42// 43// TODO: When truncation is free, truncate ICmp users' operands to make it a 44// smaller encoding (on x86 at least). 45// 46// TODO: When a negated register is used by an add (such as in a list of 47// multiple base registers, or as the increment expression in an addrec), 48// we may not actually need both reg and (-1 * reg) in registers; the 49// negation can be implemented by using a sub instead of an add. The 50// lack of support for taking this into consideration when making 51// register pressure decisions is partly worked around by the "Special" 52// use kind. 53// 54//===----------------------------------------------------------------------===// 55 56#define DEBUG_TYPE "loop-reduce" 57#include "llvm/Transforms/Scalar.h" 58#include "llvm/Constants.h" 59#include "llvm/Instructions.h" 60#include "llvm/IntrinsicInst.h" 61#include "llvm/DerivedTypes.h" 62#include "llvm/Analysis/IVUsers.h" 63#include "llvm/Analysis/Dominators.h" 64#include "llvm/Analysis/LoopPass.h" 65#include "llvm/Analysis/ScalarEvolutionExpander.h" 66#include "llvm/Transforms/Utils/BasicBlockUtils.h" 67#include "llvm/Transforms/Utils/Local.h" 68#include "llvm/ADT/SmallBitVector.h" 69#include "llvm/ADT/SetVector.h" 70#include "llvm/ADT/DenseSet.h" 71#include "llvm/Support/Debug.h" 72#include "llvm/Support/ValueHandle.h" 73#include "llvm/Support/raw_ostream.h" 74#include "llvm/Target/TargetLowering.h" 75#include <algorithm> 76using namespace llvm; 77 78namespace { 79 80/// RegSortData - This class holds data which is used to order reuse candidates. 81class RegSortData { 82public: 83 /// UsedByIndices - This represents the set of LSRUse indices which reference 84 /// a particular register. 85 SmallBitVector UsedByIndices; 86 87 RegSortData() {} 88 89 void print(raw_ostream &OS) const; 90 void dump() const; 91}; 92 93} 94 95void RegSortData::print(raw_ostream &OS) const { 96 OS << "[NumUses=" << UsedByIndices.count() << ']'; 97} 98 99void RegSortData::dump() const { 100 print(errs()); errs() << '\n'; 101} 102 103namespace { 104 105/// RegUseTracker - Map register candidates to information about how they are 106/// used. 107class RegUseTracker { 108 typedef DenseMap<const SCEV *, RegSortData> RegUsesTy; 109 110 RegUsesTy RegUsesMap; 111 SmallVector<const SCEV *, 16> RegSequence; 112 113public: 114 void CountRegister(const SCEV *Reg, size_t LUIdx); 115 void DropRegister(const SCEV *Reg, size_t LUIdx); 116 void DropUse(size_t LUIdx); 117 118 bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const; 119 120 const SmallBitVector &getUsedByIndices(const SCEV *Reg) const; 121 122 void clear(); 123 124 typedef SmallVectorImpl<const SCEV *>::iterator iterator; 125 typedef SmallVectorImpl<const SCEV *>::const_iterator const_iterator; 126 iterator begin() { return RegSequence.begin(); } 127 iterator end() { return RegSequence.end(); } 128 const_iterator begin() const { return RegSequence.begin(); } 129 const_iterator end() const { return RegSequence.end(); } 130}; 131 132} 133 134void 135RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) { 136 std::pair<RegUsesTy::iterator, bool> Pair = 137 RegUsesMap.insert(std::make_pair(Reg, RegSortData())); 138 RegSortData &RSD = Pair.first->second; 139 if (Pair.second) 140 RegSequence.push_back(Reg); 141 RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1)); 142 RSD.UsedByIndices.set(LUIdx); 143} 144 145void 146RegUseTracker::DropRegister(const SCEV *Reg, size_t LUIdx) { 147 RegUsesTy::iterator It = RegUsesMap.find(Reg); 148 assert(It != RegUsesMap.end()); 149 RegSortData &RSD = It->second; 150 assert(RSD.UsedByIndices.size() > LUIdx); 151 RSD.UsedByIndices.reset(LUIdx); 152} 153 154void 155RegUseTracker::DropUse(size_t LUIdx) { 156 // Remove the use index from every register's use list. 157 for (RegUsesTy::iterator I = RegUsesMap.begin(), E = RegUsesMap.end(); 158 I != E; ++I) 159 I->second.UsedByIndices.reset(LUIdx); 160} 161 162bool 163RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const { 164 if (!RegUsesMap.count(Reg)) return false; 165 const SmallBitVector &UsedByIndices = 166 RegUsesMap.find(Reg)->second.UsedByIndices; 167 int i = UsedByIndices.find_first(); 168 if (i == -1) return false; 169 if ((size_t)i != LUIdx) return true; 170 return UsedByIndices.find_next(i) != -1; 171} 172 173const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const { 174 RegUsesTy::const_iterator I = RegUsesMap.find(Reg); 175 assert(I != RegUsesMap.end() && "Unknown register!"); 176 return I->second.UsedByIndices; 177} 178 179void RegUseTracker::clear() { 180 RegUsesMap.clear(); 181 RegSequence.clear(); 182} 183 184namespace { 185 186/// Formula - This class holds information that describes a formula for 187/// computing satisfying a use. It may include broken-out immediates and scaled 188/// registers. 189struct Formula { 190 /// AM - This is used to represent complex addressing, as well as other kinds 191 /// of interesting uses. 192 TargetLowering::AddrMode AM; 193 194 /// BaseRegs - The list of "base" registers for this use. When this is 195 /// non-empty, AM.HasBaseReg should be set to true. 196 SmallVector<const SCEV *, 2> BaseRegs; 197 198 /// ScaledReg - The 'scaled' register for this use. This should be non-null 199 /// when AM.Scale is not zero. 200 const SCEV *ScaledReg; 201 202 Formula() : ScaledReg(0) {} 203 204 void InitialMatch(const SCEV *S, Loop *L, 205 ScalarEvolution &SE, DominatorTree &DT); 206 207 unsigned getNumRegs() const; 208 const Type *getType() const; 209 210 void DeleteBaseReg(const SCEV *&S); 211 212 bool referencesReg(const SCEV *S) const; 213 bool hasRegsUsedByUsesOtherThan(size_t LUIdx, 214 const RegUseTracker &RegUses) const; 215 216 void print(raw_ostream &OS) const; 217 void dump() const; 218}; 219 220} 221 222/// DoInitialMatch - Recursion helper for InitialMatch. 223static void DoInitialMatch(const SCEV *S, Loop *L, 224 SmallVectorImpl<const SCEV *> &Good, 225 SmallVectorImpl<const SCEV *> &Bad, 226 ScalarEvolution &SE, DominatorTree &DT) { 227 // Collect expressions which properly dominate the loop header. 228 if (S->properlyDominates(L->getHeader(), &DT)) { 229 Good.push_back(S); 230 return; 231 } 232 233 // Look at add operands. 234 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 235 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 236 I != E; ++I) 237 DoInitialMatch(*I, L, Good, Bad, SE, DT); 238 return; 239 } 240 241 // Look at addrec operands. 242 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 243 if (!AR->getStart()->isZero()) { 244 DoInitialMatch(AR->getStart(), L, Good, Bad, SE, DT); 245 DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), 246 AR->getStepRecurrence(SE), 247 AR->getLoop()), 248 L, Good, Bad, SE, DT); 249 return; 250 } 251 252 // Handle a multiplication by -1 (negation) if it didn't fold. 253 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) 254 if (Mul->getOperand(0)->isAllOnesValue()) { 255 SmallVector<const SCEV *, 4> Ops(Mul->op_begin()+1, Mul->op_end()); 256 const SCEV *NewMul = SE.getMulExpr(Ops); 257 258 SmallVector<const SCEV *, 4> MyGood; 259 SmallVector<const SCEV *, 4> MyBad; 260 DoInitialMatch(NewMul, L, MyGood, MyBad, SE, DT); 261 const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue( 262 SE.getEffectiveSCEVType(NewMul->getType()))); 263 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyGood.begin(), 264 E = MyGood.end(); I != E; ++I) 265 Good.push_back(SE.getMulExpr(NegOne, *I)); 266 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyBad.begin(), 267 E = MyBad.end(); I != E; ++I) 268 Bad.push_back(SE.getMulExpr(NegOne, *I)); 269 return; 270 } 271 272 // Ok, we can't do anything interesting. Just stuff the whole thing into a 273 // register and hope for the best. 274 Bad.push_back(S); 275} 276 277/// InitialMatch - Incorporate loop-variant parts of S into this Formula, 278/// attempting to keep all loop-invariant and loop-computable values in a 279/// single base register. 280void Formula::InitialMatch(const SCEV *S, Loop *L, 281 ScalarEvolution &SE, DominatorTree &DT) { 282 SmallVector<const SCEV *, 4> Good; 283 SmallVector<const SCEV *, 4> Bad; 284 DoInitialMatch(S, L, Good, Bad, SE, DT); 285 if (!Good.empty()) { 286 const SCEV *Sum = SE.getAddExpr(Good); 287 if (!Sum->isZero()) 288 BaseRegs.push_back(Sum); 289 AM.HasBaseReg = true; 290 } 291 if (!Bad.empty()) { 292 const SCEV *Sum = SE.getAddExpr(Bad); 293 if (!Sum->isZero()) 294 BaseRegs.push_back(Sum); 295 AM.HasBaseReg = true; 296 } 297} 298 299/// getNumRegs - Return the total number of register operands used by this 300/// formula. This does not include register uses implied by non-constant 301/// addrec strides. 302unsigned Formula::getNumRegs() const { 303 return !!ScaledReg + BaseRegs.size(); 304} 305 306/// getType - Return the type of this formula, if it has one, or null 307/// otherwise. This type is meaningless except for the bit size. 308const Type *Formula::getType() const { 309 return !BaseRegs.empty() ? BaseRegs.front()->getType() : 310 ScaledReg ? ScaledReg->getType() : 311 AM.BaseGV ? AM.BaseGV->getType() : 312 0; 313} 314 315/// DeleteBaseReg - Delete the given base reg from the BaseRegs list. 316void Formula::DeleteBaseReg(const SCEV *&S) { 317 if (&S != &BaseRegs.back()) 318 std::swap(S, BaseRegs.back()); 319 BaseRegs.pop_back(); 320} 321 322/// referencesReg - Test if this formula references the given register. 323bool Formula::referencesReg(const SCEV *S) const { 324 return S == ScaledReg || 325 std::find(BaseRegs.begin(), BaseRegs.end(), S) != BaseRegs.end(); 326} 327 328/// hasRegsUsedByUsesOtherThan - Test whether this formula uses registers 329/// which are used by uses other than the use with the given index. 330bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx, 331 const RegUseTracker &RegUses) const { 332 if (ScaledReg) 333 if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx)) 334 return true; 335 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 336 E = BaseRegs.end(); I != E; ++I) 337 if (RegUses.isRegUsedByUsesOtherThan(*I, LUIdx)) 338 return true; 339 return false; 340} 341 342void Formula::print(raw_ostream &OS) const { 343 bool First = true; 344 if (AM.BaseGV) { 345 if (!First) OS << " + "; else First = false; 346 WriteAsOperand(OS, AM.BaseGV, /*PrintType=*/false); 347 } 348 if (AM.BaseOffs != 0) { 349 if (!First) OS << " + "; else First = false; 350 OS << AM.BaseOffs; 351 } 352 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 353 E = BaseRegs.end(); I != E; ++I) { 354 if (!First) OS << " + "; else First = false; 355 OS << "reg(" << **I << ')'; 356 } 357 if (AM.HasBaseReg && BaseRegs.empty()) { 358 if (!First) OS << " + "; else First = false; 359 OS << "**error: HasBaseReg**"; 360 } else if (!AM.HasBaseReg && !BaseRegs.empty()) { 361 if (!First) OS << " + "; else First = false; 362 OS << "**error: !HasBaseReg**"; 363 } 364 if (AM.Scale != 0) { 365 if (!First) OS << " + "; else First = false; 366 OS << AM.Scale << "*reg("; 367 if (ScaledReg) 368 OS << *ScaledReg; 369 else 370 OS << "<unknown>"; 371 OS << ')'; 372 } 373} 374 375void Formula::dump() const { 376 print(errs()); errs() << '\n'; 377} 378 379/// isAddRecSExtable - Return true if the given addrec can be sign-extended 380/// without changing its value. 381static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { 382 const Type *WideTy = 383 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1); 384 return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); 385} 386 387/// isAddSExtable - Return true if the given add can be sign-extended 388/// without changing its value. 389static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) { 390 const Type *WideTy = 391 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1); 392 return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy)); 393} 394 395/// isMulSExtable - Return true if the given mul can be sign-extended 396/// without changing its value. 397static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) { 398 const Type *WideTy = 399 IntegerType::get(SE.getContext(), 400 SE.getTypeSizeInBits(M->getType()) * M->getNumOperands()); 401 return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy)); 402} 403 404/// getExactSDiv - Return an expression for LHS /s RHS, if it can be determined 405/// and if the remainder is known to be zero, or null otherwise. If 406/// IgnoreSignificantBits is true, expressions like (X * Y) /s Y are simplified 407/// to Y, ignoring that the multiplication may overflow, which is useful when 408/// the result will be used in a context where the most significant bits are 409/// ignored. 410static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, 411 ScalarEvolution &SE, 412 bool IgnoreSignificantBits = false) { 413 // Handle the trivial case, which works for any SCEV type. 414 if (LHS == RHS) 415 return SE.getConstant(LHS->getType(), 1); 416 417 // Handle a few RHS special cases. 418 const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS); 419 if (RC) { 420 const APInt &RA = RC->getValue()->getValue(); 421 // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do 422 // some folding. 423 if (RA.isAllOnesValue()) 424 return SE.getMulExpr(LHS, RC); 425 // Handle x /s 1 as x. 426 if (RA == 1) 427 return LHS; 428 } 429 430 // Check for a division of a constant by a constant. 431 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) { 432 if (!RC) 433 return 0; 434 const APInt &LA = C->getValue()->getValue(); 435 const APInt &RA = RC->getValue()->getValue(); 436 if (LA.srem(RA) != 0) 437 return 0; 438 return SE.getConstant(LA.sdiv(RA)); 439 } 440 441 // Distribute the sdiv over addrec operands, if the addrec doesn't overflow. 442 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) { 443 if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) { 444 const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE, 445 IgnoreSignificantBits); 446 if (!Start) return 0; 447 const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE, 448 IgnoreSignificantBits); 449 if (!Step) return 0; 450 return SE.getAddRecExpr(Start, Step, AR->getLoop()); 451 } 452 return 0; 453 } 454 455 // Distribute the sdiv over add operands, if the add doesn't overflow. 456 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) { 457 if (IgnoreSignificantBits || isAddSExtable(Add, SE)) { 458 SmallVector<const SCEV *, 8> Ops; 459 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 460 I != E; ++I) { 461 const SCEV *Op = getExactSDiv(*I, RHS, SE, 462 IgnoreSignificantBits); 463 if (!Op) return 0; 464 Ops.push_back(Op); 465 } 466 return SE.getAddExpr(Ops); 467 } 468 return 0; 469 } 470 471 // Check for a multiply operand that we can pull RHS out of. 472 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) { 473 if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) { 474 SmallVector<const SCEV *, 4> Ops; 475 bool Found = false; 476 for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end(); 477 I != E; ++I) { 478 const SCEV *S = *I; 479 if (!Found) 480 if (const SCEV *Q = getExactSDiv(S, RHS, SE, 481 IgnoreSignificantBits)) { 482 S = Q; 483 Found = true; 484 } 485 Ops.push_back(S); 486 } 487 return Found ? SE.getMulExpr(Ops) : 0; 488 } 489 return 0; 490 } 491 492 // Otherwise we don't know. 493 return 0; 494} 495 496/// ExtractImmediate - If S involves the addition of a constant integer value, 497/// return that integer value, and mutate S to point to a new SCEV with that 498/// value excluded. 499static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) { 500 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 501 if (C->getValue()->getValue().getMinSignedBits() <= 64) { 502 S = SE.getConstant(C->getType(), 0); 503 return C->getValue()->getSExtValue(); 504 } 505 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 506 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 507 int64_t Result = ExtractImmediate(NewOps.front(), SE); 508 S = SE.getAddExpr(NewOps); 509 return Result; 510 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 511 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 512 int64_t Result = ExtractImmediate(NewOps.front(), SE); 513 S = SE.getAddRecExpr(NewOps, AR->getLoop()); 514 return Result; 515 } 516 return 0; 517} 518 519/// ExtractSymbol - If S involves the addition of a GlobalValue address, 520/// return that symbol, and mutate S to point to a new SCEV with that 521/// value excluded. 522static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { 523 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 524 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) { 525 S = SE.getConstant(GV->getType(), 0); 526 return GV; 527 } 528 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 529 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 530 GlobalValue *Result = ExtractSymbol(NewOps.back(), SE); 531 S = SE.getAddExpr(NewOps); 532 return Result; 533 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 534 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 535 GlobalValue *Result = ExtractSymbol(NewOps.front(), SE); 536 S = SE.getAddRecExpr(NewOps, AR->getLoop()); 537 return Result; 538 } 539 return 0; 540} 541 542/// isAddressUse - Returns true if the specified instruction is using the 543/// specified value as an address. 544static bool isAddressUse(Instruction *Inst, Value *OperandVal) { 545 bool isAddress = isa<LoadInst>(Inst); 546 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 547 if (SI->getOperand(1) == OperandVal) 548 isAddress = true; 549 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 550 // Addressing modes can also be folded into prefetches and a variety 551 // of intrinsics. 552 switch (II->getIntrinsicID()) { 553 default: break; 554 case Intrinsic::prefetch: 555 case Intrinsic::x86_sse2_loadu_dq: 556 case Intrinsic::x86_sse2_loadu_pd: 557 case Intrinsic::x86_sse_loadu_ps: 558 case Intrinsic::x86_sse_storeu_ps: 559 case Intrinsic::x86_sse2_storeu_pd: 560 case Intrinsic::x86_sse2_storeu_dq: 561 case Intrinsic::x86_sse2_storel_dq: 562 if (II->getArgOperand(0) == OperandVal) 563 isAddress = true; 564 break; 565 } 566 } 567 return isAddress; 568} 569 570/// getAccessType - Return the type of the memory being accessed. 571static const Type *getAccessType(const Instruction *Inst) { 572 const Type *AccessTy = Inst->getType(); 573 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) 574 AccessTy = SI->getOperand(0)->getType(); 575 else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 576 // Addressing modes can also be folded into prefetches and a variety 577 // of intrinsics. 578 switch (II->getIntrinsicID()) { 579 default: break; 580 case Intrinsic::x86_sse_storeu_ps: 581 case Intrinsic::x86_sse2_storeu_pd: 582 case Intrinsic::x86_sse2_storeu_dq: 583 case Intrinsic::x86_sse2_storel_dq: 584 AccessTy = II->getArgOperand(0)->getType(); 585 break; 586 } 587 } 588 589 // All pointers have the same requirements, so canonicalize them to an 590 // arbitrary pointer type to minimize variation. 591 if (const PointerType *PTy = dyn_cast<PointerType>(AccessTy)) 592 AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1), 593 PTy->getAddressSpace()); 594 595 return AccessTy; 596} 597 598/// DeleteTriviallyDeadInstructions - If any of the instructions is the 599/// specified set are trivially dead, delete them and see if this makes any of 600/// their operands subsequently dead. 601static bool 602DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) { 603 bool Changed = false; 604 605 while (!DeadInsts.empty()) { 606 Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()); 607 608 if (I == 0 || !isInstructionTriviallyDead(I)) 609 continue; 610 611 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) 612 if (Instruction *U = dyn_cast<Instruction>(*OI)) { 613 *OI = 0; 614 if (U->use_empty()) 615 DeadInsts.push_back(U); 616 } 617 618 I->eraseFromParent(); 619 Changed = true; 620 } 621 622 return Changed; 623} 624 625namespace { 626 627/// Cost - This class is used to measure and compare candidate formulae. 628class Cost { 629 /// TODO: Some of these could be merged. Also, a lexical ordering 630 /// isn't always optimal. 631 unsigned NumRegs; 632 unsigned AddRecCost; 633 unsigned NumIVMuls; 634 unsigned NumBaseAdds; 635 unsigned ImmCost; 636 unsigned SetupCost; 637 638public: 639 Cost() 640 : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0), 641 SetupCost(0) {} 642 643 unsigned getNumRegs() const { return NumRegs; } 644 645 bool operator<(const Cost &Other) const; 646 647 void Loose(); 648 649 void RateFormula(const Formula &F, 650 SmallPtrSet<const SCEV *, 16> &Regs, 651 const DenseSet<const SCEV *> &VisitedRegs, 652 const Loop *L, 653 const SmallVectorImpl<int64_t> &Offsets, 654 ScalarEvolution &SE, DominatorTree &DT); 655 656 void print(raw_ostream &OS) const; 657 void dump() const; 658 659private: 660 void RateRegister(const SCEV *Reg, 661 SmallPtrSet<const SCEV *, 16> &Regs, 662 const Loop *L, 663 ScalarEvolution &SE, DominatorTree &DT); 664 void RatePrimaryRegister(const SCEV *Reg, 665 SmallPtrSet<const SCEV *, 16> &Regs, 666 const Loop *L, 667 ScalarEvolution &SE, DominatorTree &DT); 668}; 669 670} 671 672/// RateRegister - Tally up interesting quantities from the given register. 673void Cost::RateRegister(const SCEV *Reg, 674 SmallPtrSet<const SCEV *, 16> &Regs, 675 const Loop *L, 676 ScalarEvolution &SE, DominatorTree &DT) { 677 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) { 678 if (AR->getLoop() == L) 679 AddRecCost += 1; /// TODO: This should be a function of the stride. 680 681 // If this is an addrec for a loop that's already been visited by LSR, 682 // don't second-guess its addrec phi nodes. LSR isn't currently smart 683 // enough to reason about more than one loop at a time. Consider these 684 // registers free and leave them alone. 685 else if (L->contains(AR->getLoop()) || 686 (!AR->getLoop()->contains(L) && 687 DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))) { 688 for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin(); 689 PHINode *PN = dyn_cast<PHINode>(I); ++I) 690 if (SE.isSCEVable(PN->getType()) && 691 (SE.getEffectiveSCEVType(PN->getType()) == 692 SE.getEffectiveSCEVType(AR->getType())) && 693 SE.getSCEV(PN) == AR) 694 return; 695 696 // If this isn't one of the addrecs that the loop already has, it 697 // would require a costly new phi and add. TODO: This isn't 698 // precisely modeled right now. 699 ++NumBaseAdds; 700 if (!Regs.count(AR->getStart())) 701 RateRegister(AR->getStart(), Regs, L, SE, DT); 702 } 703 704 // Add the step value register, if it needs one. 705 // TODO: The non-affine case isn't precisely modeled here. 706 if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) 707 if (!Regs.count(AR->getStart())) 708 RateRegister(AR->getOperand(1), Regs, L, SE, DT); 709 } 710 ++NumRegs; 711 712 // Rough heuristic; favor registers which don't require extra setup 713 // instructions in the preheader. 714 if (!isa<SCEVUnknown>(Reg) && 715 !isa<SCEVConstant>(Reg) && 716 !(isa<SCEVAddRecExpr>(Reg) && 717 (isa<SCEVUnknown>(cast<SCEVAddRecExpr>(Reg)->getStart()) || 718 isa<SCEVConstant>(cast<SCEVAddRecExpr>(Reg)->getStart())))) 719 ++SetupCost; 720} 721 722/// RatePrimaryRegister - Record this register in the set. If we haven't seen it 723/// before, rate it. 724void Cost::RatePrimaryRegister(const SCEV *Reg, 725 SmallPtrSet<const SCEV *, 16> &Regs, 726 const Loop *L, 727 ScalarEvolution &SE, DominatorTree &DT) { 728 if (Regs.insert(Reg)) 729 RateRegister(Reg, Regs, L, SE, DT); 730} 731 732void Cost::RateFormula(const Formula &F, 733 SmallPtrSet<const SCEV *, 16> &Regs, 734 const DenseSet<const SCEV *> &VisitedRegs, 735 const Loop *L, 736 const SmallVectorImpl<int64_t> &Offsets, 737 ScalarEvolution &SE, DominatorTree &DT) { 738 // Tally up the registers. 739 if (const SCEV *ScaledReg = F.ScaledReg) { 740 if (VisitedRegs.count(ScaledReg)) { 741 Loose(); 742 return; 743 } 744 RatePrimaryRegister(ScaledReg, Regs, L, SE, DT); 745 } 746 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 747 E = F.BaseRegs.end(); I != E; ++I) { 748 const SCEV *BaseReg = *I; 749 if (VisitedRegs.count(BaseReg)) { 750 Loose(); 751 return; 752 } 753 RatePrimaryRegister(BaseReg, Regs, L, SE, DT); 754 755 NumIVMuls += isa<SCEVMulExpr>(BaseReg) && 756 BaseReg->hasComputableLoopEvolution(L); 757 } 758 759 if (F.BaseRegs.size() > 1) 760 NumBaseAdds += F.BaseRegs.size() - 1; 761 762 // Tally up the non-zero immediates. 763 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 764 E = Offsets.end(); I != E; ++I) { 765 int64_t Offset = (uint64_t)*I + F.AM.BaseOffs; 766 if (F.AM.BaseGV) 767 ImmCost += 64; // Handle symbolic values conservatively. 768 // TODO: This should probably be the pointer size. 769 else if (Offset != 0) 770 ImmCost += APInt(64, Offset, true).getMinSignedBits(); 771 } 772} 773 774/// Loose - Set this cost to a loosing value. 775void Cost::Loose() { 776 NumRegs = ~0u; 777 AddRecCost = ~0u; 778 NumIVMuls = ~0u; 779 NumBaseAdds = ~0u; 780 ImmCost = ~0u; 781 SetupCost = ~0u; 782} 783 784/// operator< - Choose the lower cost. 785bool Cost::operator<(const Cost &Other) const { 786 if (NumRegs != Other.NumRegs) 787 return NumRegs < Other.NumRegs; 788 if (AddRecCost != Other.AddRecCost) 789 return AddRecCost < Other.AddRecCost; 790 if (NumIVMuls != Other.NumIVMuls) 791 return NumIVMuls < Other.NumIVMuls; 792 if (NumBaseAdds != Other.NumBaseAdds) 793 return NumBaseAdds < Other.NumBaseAdds; 794 if (ImmCost != Other.ImmCost) 795 return ImmCost < Other.ImmCost; 796 if (SetupCost != Other.SetupCost) 797 return SetupCost < Other.SetupCost; 798 return false; 799} 800 801void Cost::print(raw_ostream &OS) const { 802 OS << NumRegs << " reg" << (NumRegs == 1 ? "" : "s"); 803 if (AddRecCost != 0) 804 OS << ", with addrec cost " << AddRecCost; 805 if (NumIVMuls != 0) 806 OS << ", plus " << NumIVMuls << " IV mul" << (NumIVMuls == 1 ? "" : "s"); 807 if (NumBaseAdds != 0) 808 OS << ", plus " << NumBaseAdds << " base add" 809 << (NumBaseAdds == 1 ? "" : "s"); 810 if (ImmCost != 0) 811 OS << ", plus " << ImmCost << " imm cost"; 812 if (SetupCost != 0) 813 OS << ", plus " << SetupCost << " setup cost"; 814} 815 816void Cost::dump() const { 817 print(errs()); errs() << '\n'; 818} 819 820namespace { 821 822/// LSRFixup - An operand value in an instruction which is to be replaced 823/// with some equivalent, possibly strength-reduced, replacement. 824struct LSRFixup { 825 /// UserInst - The instruction which will be updated. 826 Instruction *UserInst; 827 828 /// OperandValToReplace - The operand of the instruction which will 829 /// be replaced. The operand may be used more than once; every instance 830 /// will be replaced. 831 Value *OperandValToReplace; 832 833 /// PostIncLoops - If this user is to use the post-incremented value of an 834 /// induction variable, this variable is non-null and holds the loop 835 /// associated with the induction variable. 836 PostIncLoopSet PostIncLoops; 837 838 /// LUIdx - The index of the LSRUse describing the expression which 839 /// this fixup needs, minus an offset (below). 840 size_t LUIdx; 841 842 /// Offset - A constant offset to be added to the LSRUse expression. 843 /// This allows multiple fixups to share the same LSRUse with different 844 /// offsets, for example in an unrolled loop. 845 int64_t Offset; 846 847 bool isUseFullyOutsideLoop(const Loop *L) const; 848 849 LSRFixup(); 850 851 void print(raw_ostream &OS) const; 852 void dump() const; 853}; 854 855} 856 857LSRFixup::LSRFixup() 858 : UserInst(0), OperandValToReplace(0), LUIdx(~size_t(0)), Offset(0) {} 859 860/// isUseFullyOutsideLoop - Test whether this fixup always uses its 861/// value outside of the given loop. 862bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const { 863 // PHI nodes use their value in their incoming blocks. 864 if (const PHINode *PN = dyn_cast<PHINode>(UserInst)) { 865 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 866 if (PN->getIncomingValue(i) == OperandValToReplace && 867 L->contains(PN->getIncomingBlock(i))) 868 return false; 869 return true; 870 } 871 872 return !L->contains(UserInst); 873} 874 875void LSRFixup::print(raw_ostream &OS) const { 876 OS << "UserInst="; 877 // Store is common and interesting enough to be worth special-casing. 878 if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) { 879 OS << "store "; 880 WriteAsOperand(OS, Store->getOperand(0), /*PrintType=*/false); 881 } else if (UserInst->getType()->isVoidTy()) 882 OS << UserInst->getOpcodeName(); 883 else 884 WriteAsOperand(OS, UserInst, /*PrintType=*/false); 885 886 OS << ", OperandValToReplace="; 887 WriteAsOperand(OS, OperandValToReplace, /*PrintType=*/false); 888 889 for (PostIncLoopSet::const_iterator I = PostIncLoops.begin(), 890 E = PostIncLoops.end(); I != E; ++I) { 891 OS << ", PostIncLoop="; 892 WriteAsOperand(OS, (*I)->getHeader(), /*PrintType=*/false); 893 } 894 895 if (LUIdx != ~size_t(0)) 896 OS << ", LUIdx=" << LUIdx; 897 898 if (Offset != 0) 899 OS << ", Offset=" << Offset; 900} 901 902void LSRFixup::dump() const { 903 print(errs()); errs() << '\n'; 904} 905 906namespace { 907 908/// UniquifierDenseMapInfo - A DenseMapInfo implementation for holding 909/// DenseMaps and DenseSets of sorted SmallVectors of const SCEV*. 910struct UniquifierDenseMapInfo { 911 static SmallVector<const SCEV *, 2> getEmptyKey() { 912 SmallVector<const SCEV *, 2> V; 913 V.push_back(reinterpret_cast<const SCEV *>(-1)); 914 return V; 915 } 916 917 static SmallVector<const SCEV *, 2> getTombstoneKey() { 918 SmallVector<const SCEV *, 2> V; 919 V.push_back(reinterpret_cast<const SCEV *>(-2)); 920 return V; 921 } 922 923 static unsigned getHashValue(const SmallVector<const SCEV *, 2> &V) { 924 unsigned Result = 0; 925 for (SmallVectorImpl<const SCEV *>::const_iterator I = V.begin(), 926 E = V.end(); I != E; ++I) 927 Result ^= DenseMapInfo<const SCEV *>::getHashValue(*I); 928 return Result; 929 } 930 931 static bool isEqual(const SmallVector<const SCEV *, 2> &LHS, 932 const SmallVector<const SCEV *, 2> &RHS) { 933 return LHS == RHS; 934 } 935}; 936 937/// LSRUse - This class holds the state that LSR keeps for each use in 938/// IVUsers, as well as uses invented by LSR itself. It includes information 939/// about what kinds of things can be folded into the user, information about 940/// the user itself, and information about how the use may be satisfied. 941/// TODO: Represent multiple users of the same expression in common? 942class LSRUse { 943 DenseSet<SmallVector<const SCEV *, 2>, UniquifierDenseMapInfo> Uniquifier; 944 945public: 946 /// KindType - An enum for a kind of use, indicating what types of 947 /// scaled and immediate operands it might support. 948 enum KindType { 949 Basic, ///< A normal use, with no folding. 950 Special, ///< A special case of basic, allowing -1 scales. 951 Address, ///< An address use; folding according to TargetLowering 952 ICmpZero ///< An equality icmp with both operands folded into one. 953 // TODO: Add a generic icmp too? 954 }; 955 956 KindType Kind; 957 const Type *AccessTy; 958 959 SmallVector<int64_t, 8> Offsets; 960 int64_t MinOffset; 961 int64_t MaxOffset; 962 963 /// AllFixupsOutsideLoop - This records whether all of the fixups using this 964 /// LSRUse are outside of the loop, in which case some special-case heuristics 965 /// may be used. 966 bool AllFixupsOutsideLoop; 967 968 /// Formulae - A list of ways to build a value that can satisfy this user. 969 /// After the list is populated, one of these is selected heuristically and 970 /// used to formulate a replacement for OperandValToReplace in UserInst. 971 SmallVector<Formula, 12> Formulae; 972 973 /// Regs - The set of register candidates used by all formulae in this LSRUse. 974 SmallPtrSet<const SCEV *, 4> Regs; 975 976 LSRUse(KindType K, const Type *T) : Kind(K), AccessTy(T), 977 MinOffset(INT64_MAX), 978 MaxOffset(INT64_MIN), 979 AllFixupsOutsideLoop(true) {} 980 981 bool HasFormulaWithSameRegs(const Formula &F) const; 982 bool InsertFormula(const Formula &F); 983 void DeleteFormula(Formula &F); 984 void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses); 985 986 void check() const; 987 988 void print(raw_ostream &OS) const; 989 void dump() const; 990}; 991 992} 993 994/// HasFormula - Test whether this use as a formula which has the same 995/// registers as the given formula. 996bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const { 997 SmallVector<const SCEV *, 2> Key = F.BaseRegs; 998 if (F.ScaledReg) Key.push_back(F.ScaledReg); 999 // Unstable sort by host order ok, because this is only used for uniquifying. 1000 std::sort(Key.begin(), Key.end()); 1001 return Uniquifier.count(Key); 1002} 1003 1004/// InsertFormula - If the given formula has not yet been inserted, add it to 1005/// the list, and return true. Return false otherwise. 1006bool LSRUse::InsertFormula(const Formula &F) { 1007 SmallVector<const SCEV *, 2> Key = F.BaseRegs; 1008 if (F.ScaledReg) Key.push_back(F.ScaledReg); 1009 // Unstable sort by host order ok, because this is only used for uniquifying. 1010 std::sort(Key.begin(), Key.end()); 1011 1012 if (!Uniquifier.insert(Key).second) 1013 return false; 1014 1015 // Using a register to hold the value of 0 is not profitable. 1016 assert((!F.ScaledReg || !F.ScaledReg->isZero()) && 1017 "Zero allocated in a scaled register!"); 1018#ifndef NDEBUG 1019 for (SmallVectorImpl<const SCEV *>::const_iterator I = 1020 F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) 1021 assert(!(*I)->isZero() && "Zero allocated in a base register!"); 1022#endif 1023 1024 // Add the formula to the list. 1025 Formulae.push_back(F); 1026 1027 // Record registers now being used by this use. 1028 if (F.ScaledReg) Regs.insert(F.ScaledReg); 1029 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 1030 1031 return true; 1032} 1033 1034/// DeleteFormula - Remove the given formula from this use's list. 1035void LSRUse::DeleteFormula(Formula &F) { 1036 if (&F != &Formulae.back()) 1037 std::swap(F, Formulae.back()); 1038 Formulae.pop_back(); 1039 assert(!Formulae.empty() && "LSRUse has no formulae left!"); 1040} 1041 1042/// RecomputeRegs - Recompute the Regs field, and update RegUses. 1043void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) { 1044 // Now that we've filtered out some formulae, recompute the Regs set. 1045 SmallPtrSet<const SCEV *, 4> OldRegs = Regs; 1046 Regs.clear(); 1047 for (SmallVectorImpl<Formula>::const_iterator I = Formulae.begin(), 1048 E = Formulae.end(); I != E; ++I) { 1049 const Formula &F = *I; 1050 if (F.ScaledReg) Regs.insert(F.ScaledReg); 1051 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 1052 } 1053 1054 // Update the RegTracker. 1055 for (SmallPtrSet<const SCEV *, 4>::iterator I = OldRegs.begin(), 1056 E = OldRegs.end(); I != E; ++I) 1057 if (!Regs.count(*I)) 1058 RegUses.DropRegister(*I, LUIdx); 1059} 1060 1061void LSRUse::print(raw_ostream &OS) const { 1062 OS << "LSR Use: Kind="; 1063 switch (Kind) { 1064 case Basic: OS << "Basic"; break; 1065 case Special: OS << "Special"; break; 1066 case ICmpZero: OS << "ICmpZero"; break; 1067 case Address: 1068 OS << "Address of "; 1069 if (AccessTy->isPointerTy()) 1070 OS << "pointer"; // the full pointer type could be really verbose 1071 else 1072 OS << *AccessTy; 1073 } 1074 1075 OS << ", Offsets={"; 1076 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 1077 E = Offsets.end(); I != E; ++I) { 1078 OS << *I; 1079 if (next(I) != E) 1080 OS << ','; 1081 } 1082 OS << '}'; 1083 1084 if (AllFixupsOutsideLoop) 1085 OS << ", all-fixups-outside-loop"; 1086} 1087 1088void LSRUse::dump() const { 1089 print(errs()); errs() << '\n'; 1090} 1091 1092/// isLegalUse - Test whether the use described by AM is "legal", meaning it can 1093/// be completely folded into the user instruction at isel time. This includes 1094/// address-mode folding and special icmp tricks. 1095static bool isLegalUse(const TargetLowering::AddrMode &AM, 1096 LSRUse::KindType Kind, const Type *AccessTy, 1097 const TargetLowering *TLI) { 1098 switch (Kind) { 1099 case LSRUse::Address: 1100 // If we have low-level target information, ask the target if it can 1101 // completely fold this address. 1102 if (TLI) return TLI->isLegalAddressingMode(AM, AccessTy); 1103 1104 // Otherwise, just guess that reg+reg addressing is legal. 1105 return !AM.BaseGV && AM.BaseOffs == 0 && AM.Scale <= 1; 1106 1107 case LSRUse::ICmpZero: 1108 // There's not even a target hook for querying whether it would be legal to 1109 // fold a GV into an ICmp. 1110 if (AM.BaseGV) 1111 return false; 1112 1113 // ICmp only has two operands; don't allow more than two non-trivial parts. 1114 if (AM.Scale != 0 && AM.HasBaseReg && AM.BaseOffs != 0) 1115 return false; 1116 1117 // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by 1118 // putting the scaled register in the other operand of the icmp. 1119 if (AM.Scale != 0 && AM.Scale != -1) 1120 return false; 1121 1122 // If we have low-level target information, ask the target if it can fold an 1123 // integer immediate on an icmp. 1124 if (AM.BaseOffs != 0) { 1125 if (TLI) return TLI->isLegalICmpImmediate(-AM.BaseOffs); 1126 return false; 1127 } 1128 1129 return true; 1130 1131 case LSRUse::Basic: 1132 // Only handle single-register values. 1133 return !AM.BaseGV && AM.Scale == 0 && AM.BaseOffs == 0; 1134 1135 case LSRUse::Special: 1136 // Only handle -1 scales, or no scale. 1137 return AM.Scale == 0 || AM.Scale == -1; 1138 } 1139 1140 return false; 1141} 1142 1143static bool isLegalUse(TargetLowering::AddrMode AM, 1144 int64_t MinOffset, int64_t MaxOffset, 1145 LSRUse::KindType Kind, const Type *AccessTy, 1146 const TargetLowering *TLI) { 1147 // Check for overflow. 1148 if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) != 1149 (MinOffset > 0)) 1150 return false; 1151 AM.BaseOffs = (uint64_t)AM.BaseOffs + MinOffset; 1152 if (isLegalUse(AM, Kind, AccessTy, TLI)) { 1153 AM.BaseOffs = (uint64_t)AM.BaseOffs - MinOffset; 1154 // Check for overflow. 1155 if (((int64_t)((uint64_t)AM.BaseOffs + MaxOffset) > AM.BaseOffs) != 1156 (MaxOffset > 0)) 1157 return false; 1158 AM.BaseOffs = (uint64_t)AM.BaseOffs + MaxOffset; 1159 return isLegalUse(AM, Kind, AccessTy, TLI); 1160 } 1161 return false; 1162} 1163 1164static bool isAlwaysFoldable(int64_t BaseOffs, 1165 GlobalValue *BaseGV, 1166 bool HasBaseReg, 1167 LSRUse::KindType Kind, const Type *AccessTy, 1168 const TargetLowering *TLI) { 1169 // Fast-path: zero is always foldable. 1170 if (BaseOffs == 0 && !BaseGV) return true; 1171 1172 // Conservatively, create an address with an immediate and a 1173 // base and a scale. 1174 TargetLowering::AddrMode AM; 1175 AM.BaseOffs = BaseOffs; 1176 AM.BaseGV = BaseGV; 1177 AM.HasBaseReg = HasBaseReg; 1178 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1179 1180 // Canonicalize a scale of 1 to a base register if the formula doesn't 1181 // already have a base register. 1182 if (!AM.HasBaseReg && AM.Scale == 1) { 1183 AM.Scale = 0; 1184 AM.HasBaseReg = true; 1185 } 1186 1187 return isLegalUse(AM, Kind, AccessTy, TLI); 1188} 1189 1190static bool isAlwaysFoldable(const SCEV *S, 1191 int64_t MinOffset, int64_t MaxOffset, 1192 bool HasBaseReg, 1193 LSRUse::KindType Kind, const Type *AccessTy, 1194 const TargetLowering *TLI, 1195 ScalarEvolution &SE) { 1196 // Fast-path: zero is always foldable. 1197 if (S->isZero()) return true; 1198 1199 // Conservatively, create an address with an immediate and a 1200 // base and a scale. 1201 int64_t BaseOffs = ExtractImmediate(S, SE); 1202 GlobalValue *BaseGV = ExtractSymbol(S, SE); 1203 1204 // If there's anything else involved, it's not foldable. 1205 if (!S->isZero()) return false; 1206 1207 // Fast-path: zero is always foldable. 1208 if (BaseOffs == 0 && !BaseGV) return true; 1209 1210 // Conservatively, create an address with an immediate and a 1211 // base and a scale. 1212 TargetLowering::AddrMode AM; 1213 AM.BaseOffs = BaseOffs; 1214 AM.BaseGV = BaseGV; 1215 AM.HasBaseReg = HasBaseReg; 1216 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1217 1218 return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI); 1219} 1220 1221namespace { 1222 1223/// UseMapDenseMapInfo - A DenseMapInfo implementation for holding 1224/// DenseMaps and DenseSets of pairs of const SCEV* and LSRUse::Kind. 1225struct UseMapDenseMapInfo { 1226 static std::pair<const SCEV *, LSRUse::KindType> getEmptyKey() { 1227 return std::make_pair(reinterpret_cast<const SCEV *>(-1), LSRUse::Basic); 1228 } 1229 1230 static std::pair<const SCEV *, LSRUse::KindType> getTombstoneKey() { 1231 return std::make_pair(reinterpret_cast<const SCEV *>(-2), LSRUse::Basic); 1232 } 1233 1234 static unsigned 1235 getHashValue(const std::pair<const SCEV *, LSRUse::KindType> &V) { 1236 unsigned Result = DenseMapInfo<const SCEV *>::getHashValue(V.first); 1237 Result ^= DenseMapInfo<unsigned>::getHashValue(unsigned(V.second)); 1238 return Result; 1239 } 1240 1241 static bool isEqual(const std::pair<const SCEV *, LSRUse::KindType> &LHS, 1242 const std::pair<const SCEV *, LSRUse::KindType> &RHS) { 1243 return LHS == RHS; 1244 } 1245}; 1246 1247/// FormulaSorter - This class implements an ordering for formulae which sorts 1248/// the by their standalone cost. 1249class FormulaSorter { 1250 /// These two sets are kept empty, so that we compute standalone costs. 1251 DenseSet<const SCEV *> VisitedRegs; 1252 SmallPtrSet<const SCEV *, 16> Regs; 1253 Loop *L; 1254 LSRUse *LU; 1255 ScalarEvolution &SE; 1256 DominatorTree &DT; 1257 1258public: 1259 FormulaSorter(Loop *l, LSRUse &lu, ScalarEvolution &se, DominatorTree &dt) 1260 : L(l), LU(&lu), SE(se), DT(dt) {} 1261 1262 bool operator()(const Formula &A, const Formula &B) { 1263 Cost CostA; 1264 CostA.RateFormula(A, Regs, VisitedRegs, L, LU->Offsets, SE, DT); 1265 Regs.clear(); 1266 Cost CostB; 1267 CostB.RateFormula(B, Regs, VisitedRegs, L, LU->Offsets, SE, DT); 1268 Regs.clear(); 1269 return CostA < CostB; 1270 } 1271}; 1272 1273/// LSRInstance - This class holds state for the main loop strength reduction 1274/// logic. 1275class LSRInstance { 1276 IVUsers &IU; 1277 ScalarEvolution &SE; 1278 DominatorTree &DT; 1279 LoopInfo &LI; 1280 const TargetLowering *const TLI; 1281 Loop *const L; 1282 bool Changed; 1283 1284 /// IVIncInsertPos - This is the insert position that the current loop's 1285 /// induction variable increment should be placed. In simple loops, this is 1286 /// the latch block's terminator. But in more complicated cases, this is a 1287 /// position which will dominate all the in-loop post-increment users. 1288 Instruction *IVIncInsertPos; 1289 1290 /// Factors - Interesting factors between use strides. 1291 SmallSetVector<int64_t, 8> Factors; 1292 1293 /// Types - Interesting use types, to facilitate truncation reuse. 1294 SmallSetVector<const Type *, 4> Types; 1295 1296 /// Fixups - The list of operands which are to be replaced. 1297 SmallVector<LSRFixup, 16> Fixups; 1298 1299 /// Uses - The list of interesting uses. 1300 SmallVector<LSRUse, 16> Uses; 1301 1302 /// RegUses - Track which uses use which register candidates. 1303 RegUseTracker RegUses; 1304 1305 void OptimizeShadowIV(); 1306 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse); 1307 ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse); 1308 void OptimizeLoopTermCond(); 1309 1310 void CollectInterestingTypesAndFactors(); 1311 void CollectFixupsAndInitialFormulae(); 1312 1313 LSRFixup &getNewFixup() { 1314 Fixups.push_back(LSRFixup()); 1315 return Fixups.back(); 1316 } 1317 1318 // Support for sharing of LSRUses between LSRFixups. 1319 typedef DenseMap<std::pair<const SCEV *, LSRUse::KindType>, 1320 size_t, 1321 UseMapDenseMapInfo> UseMapTy; 1322 UseMapTy UseMap; 1323 1324 bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, 1325 LSRUse::KindType Kind, const Type *AccessTy); 1326 1327 std::pair<size_t, int64_t> getUse(const SCEV *&Expr, 1328 LSRUse::KindType Kind, 1329 const Type *AccessTy); 1330 1331 void DeleteUse(LSRUse &LU); 1332 1333 LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU); 1334 1335public: 1336 void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1337 void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1338 void CountRegisters(const Formula &F, size_t LUIdx); 1339 bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F); 1340 1341 void CollectLoopInvariantFixupsAndFormulae(); 1342 1343 void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base, 1344 unsigned Depth = 0); 1345 void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base); 1346 void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1347 void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1348 void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1349 void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1350 void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base); 1351 void GenerateCrossUseConstantOffsets(); 1352 void GenerateAllReuseFormulae(); 1353 1354 void FilterOutUndesirableDedicatedRegisters(); 1355 1356 size_t EstimateSearchSpaceComplexity() const; 1357 void NarrowSearchSpaceUsingHeuristics(); 1358 1359 void SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 1360 Cost &SolutionCost, 1361 SmallVectorImpl<const Formula *> &Workspace, 1362 const Cost &CurCost, 1363 const SmallPtrSet<const SCEV *, 16> &CurRegs, 1364 DenseSet<const SCEV *> &VisitedRegs) const; 1365 void Solve(SmallVectorImpl<const Formula *> &Solution) const; 1366 1367 BasicBlock::iterator 1368 HoistInsertPosition(BasicBlock::iterator IP, 1369 const SmallVectorImpl<Instruction *> &Inputs) const; 1370 BasicBlock::iterator AdjustInsertPositionForExpand(BasicBlock::iterator IP, 1371 const LSRFixup &LF, 1372 const LSRUse &LU) const; 1373 1374 Value *Expand(const LSRFixup &LF, 1375 const Formula &F, 1376 BasicBlock::iterator IP, 1377 SCEVExpander &Rewriter, 1378 SmallVectorImpl<WeakVH> &DeadInsts) const; 1379 void RewriteForPHI(PHINode *PN, const LSRFixup &LF, 1380 const Formula &F, 1381 SCEVExpander &Rewriter, 1382 SmallVectorImpl<WeakVH> &DeadInsts, 1383 Pass *P) const; 1384 void Rewrite(const LSRFixup &LF, 1385 const Formula &F, 1386 SCEVExpander &Rewriter, 1387 SmallVectorImpl<WeakVH> &DeadInsts, 1388 Pass *P) const; 1389 void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 1390 Pass *P); 1391 1392 LSRInstance(const TargetLowering *tli, Loop *l, Pass *P); 1393 1394 bool getChanged() const { return Changed; } 1395 1396 void print_factors_and_types(raw_ostream &OS) const; 1397 void print_fixups(raw_ostream &OS) const; 1398 void print_uses(raw_ostream &OS) const; 1399 void print(raw_ostream &OS) const; 1400 void dump() const; 1401}; 1402 1403} 1404 1405/// OptimizeShadowIV - If IV is used in a int-to-float cast 1406/// inside the loop then try to eliminate the cast operation. 1407void LSRInstance::OptimizeShadowIV() { 1408 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1409 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1410 return; 1411 1412 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); 1413 UI != E; /* empty */) { 1414 IVUsers::const_iterator CandidateUI = UI; 1415 ++UI; 1416 Instruction *ShadowUse = CandidateUI->getUser(); 1417 const Type *DestTy = NULL; 1418 1419 /* If shadow use is a int->float cast then insert a second IV 1420 to eliminate this cast. 1421 1422 for (unsigned i = 0; i < n; ++i) 1423 foo((double)i); 1424 1425 is transformed into 1426 1427 double d = 0.0; 1428 for (unsigned i = 0; i < n; ++i, ++d) 1429 foo(d); 1430 */ 1431 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) 1432 DestTy = UCast->getDestTy(); 1433 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) 1434 DestTy = SCast->getDestTy(); 1435 if (!DestTy) continue; 1436 1437 if (TLI) { 1438 // If target does not support DestTy natively then do not apply 1439 // this transformation. 1440 EVT DVT = TLI->getValueType(DestTy); 1441 if (!TLI->isTypeLegal(DVT)) continue; 1442 } 1443 1444 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0)); 1445 if (!PH) continue; 1446 if (PH->getNumIncomingValues() != 2) continue; 1447 1448 const Type *SrcTy = PH->getType(); 1449 int Mantissa = DestTy->getFPMantissaWidth(); 1450 if (Mantissa == -1) continue; 1451 if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa) 1452 continue; 1453 1454 unsigned Entry, Latch; 1455 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) { 1456 Entry = 0; 1457 Latch = 1; 1458 } else { 1459 Entry = 1; 1460 Latch = 0; 1461 } 1462 1463 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry)); 1464 if (!Init) continue; 1465 Constant *NewInit = ConstantFP::get(DestTy, Init->getZExtValue()); 1466 1467 BinaryOperator *Incr = 1468 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch)); 1469 if (!Incr) continue; 1470 if (Incr->getOpcode() != Instruction::Add 1471 && Incr->getOpcode() != Instruction::Sub) 1472 continue; 1473 1474 /* Initialize new IV, double d = 0.0 in above example. */ 1475 ConstantInt *C = NULL; 1476 if (Incr->getOperand(0) == PH) 1477 C = dyn_cast<ConstantInt>(Incr->getOperand(1)); 1478 else if (Incr->getOperand(1) == PH) 1479 C = dyn_cast<ConstantInt>(Incr->getOperand(0)); 1480 else 1481 continue; 1482 1483 if (!C) continue; 1484 1485 // Ignore negative constants, as the code below doesn't handle them 1486 // correctly. TODO: Remove this restriction. 1487 if (!C->getValue().isStrictlyPositive()) continue; 1488 1489 /* Add new PHINode. */ 1490 PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH); 1491 1492 /* create new increment. '++d' in above example. */ 1493 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue()); 1494 BinaryOperator *NewIncr = 1495 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ? 1496 Instruction::FAdd : Instruction::FSub, 1497 NewPH, CFP, "IV.S.next.", Incr); 1498 1499 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry)); 1500 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch)); 1501 1502 /* Remove cast operation */ 1503 ShadowUse->replaceAllUsesWith(NewPH); 1504 ShadowUse->eraseFromParent(); 1505 Changed = true; 1506 break; 1507 } 1508} 1509 1510/// FindIVUserForCond - If Cond has an operand that is an expression of an IV, 1511/// set the IV user and stride information and return true, otherwise return 1512/// false. 1513bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) { 1514 for (IVUsers::iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1515 if (UI->getUser() == Cond) { 1516 // NOTE: we could handle setcc instructions with multiple uses here, but 1517 // InstCombine does it as well for simple uses, it's not clear that it 1518 // occurs enough in real life to handle. 1519 CondUse = UI; 1520 return true; 1521 } 1522 return false; 1523} 1524 1525/// OptimizeMax - Rewrite the loop's terminating condition if it uses 1526/// a max computation. 1527/// 1528/// This is a narrow solution to a specific, but acute, problem. For loops 1529/// like this: 1530/// 1531/// i = 0; 1532/// do { 1533/// p[i] = 0.0; 1534/// } while (++i < n); 1535/// 1536/// the trip count isn't just 'n', because 'n' might not be positive. And 1537/// unfortunately this can come up even for loops where the user didn't use 1538/// a C do-while loop. For example, seemingly well-behaved top-test loops 1539/// will commonly be lowered like this: 1540// 1541/// if (n > 0) { 1542/// i = 0; 1543/// do { 1544/// p[i] = 0.0; 1545/// } while (++i < n); 1546/// } 1547/// 1548/// and then it's possible for subsequent optimization to obscure the if 1549/// test in such a way that indvars can't find it. 1550/// 1551/// When indvars can't find the if test in loops like this, it creates a 1552/// max expression, which allows it to give the loop a canonical 1553/// induction variable: 1554/// 1555/// i = 0; 1556/// max = n < 1 ? 1 : n; 1557/// do { 1558/// p[i] = 0.0; 1559/// } while (++i != max); 1560/// 1561/// Canonical induction variables are necessary because the loop passes 1562/// are designed around them. The most obvious example of this is the 1563/// LoopInfo analysis, which doesn't remember trip count values. It 1564/// expects to be able to rediscover the trip count each time it is 1565/// needed, and it does this using a simple analysis that only succeeds if 1566/// the loop has a canonical induction variable. 1567/// 1568/// However, when it comes time to generate code, the maximum operation 1569/// can be quite costly, especially if it's inside of an outer loop. 1570/// 1571/// This function solves this problem by detecting this type of loop and 1572/// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting 1573/// the instructions for the maximum computation. 1574/// 1575ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { 1576 // Check that the loop matches the pattern we're looking for. 1577 if (Cond->getPredicate() != CmpInst::ICMP_EQ && 1578 Cond->getPredicate() != CmpInst::ICMP_NE) 1579 return Cond; 1580 1581 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1)); 1582 if (!Sel || !Sel->hasOneUse()) return Cond; 1583 1584 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1585 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1586 return Cond; 1587 const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1); 1588 1589 // Add one to the backedge-taken count to get the trip count. 1590 const SCEV *IterationCount = SE.getAddExpr(BackedgeTakenCount, One); 1591 if (IterationCount != SE.getSCEV(Sel)) return Cond; 1592 1593 // Check for a max calculation that matches the pattern. There's no check 1594 // for ICMP_ULE here because the comparison would be with zero, which 1595 // isn't interesting. 1596 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1597 const SCEVNAryExpr *Max = 0; 1598 if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) { 1599 Pred = ICmpInst::ICMP_SLE; 1600 Max = S; 1601 } else if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(IterationCount)) { 1602 Pred = ICmpInst::ICMP_SLT; 1603 Max = S; 1604 } else if (const SCEVUMaxExpr *U = dyn_cast<SCEVUMaxExpr>(IterationCount)) { 1605 Pred = ICmpInst::ICMP_ULT; 1606 Max = U; 1607 } else { 1608 // No match; bail. 1609 return Cond; 1610 } 1611 1612 // To handle a max with more than two operands, this optimization would 1613 // require additional checking and setup. 1614 if (Max->getNumOperands() != 2) 1615 return Cond; 1616 1617 const SCEV *MaxLHS = Max->getOperand(0); 1618 const SCEV *MaxRHS = Max->getOperand(1); 1619 1620 // ScalarEvolution canonicalizes constants to the left. For < and >, look 1621 // for a comparison with 1. For <= and >=, a comparison with zero. 1622 if (!MaxLHS || 1623 (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->isZero() : (MaxLHS != One))) 1624 return Cond; 1625 1626 // Check the relevant induction variable for conformance to 1627 // the pattern. 1628 const SCEV *IV = SE.getSCEV(Cond->getOperand(0)); 1629 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); 1630 if (!AR || !AR->isAffine() || 1631 AR->getStart() != One || 1632 AR->getStepRecurrence(SE) != One) 1633 return Cond; 1634 1635 assert(AR->getLoop() == L && 1636 "Loop condition operand is an addrec in a different loop!"); 1637 1638 // Check the right operand of the select, and remember it, as it will 1639 // be used in the new comparison instruction. 1640 Value *NewRHS = 0; 1641 if (ICmpInst::isTrueWhenEqual(Pred)) { 1642 // Look for n+1, and grab n. 1643 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1))) 1644 if (isa<ConstantInt>(BO->getOperand(1)) && 1645 cast<ConstantInt>(BO->getOperand(1))->isOne() && 1646 SE.getSCEV(BO->getOperand(0)) == MaxRHS) 1647 NewRHS = BO->getOperand(0); 1648 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2))) 1649 if (isa<ConstantInt>(BO->getOperand(1)) && 1650 cast<ConstantInt>(BO->getOperand(1))->isOne() && 1651 SE.getSCEV(BO->getOperand(0)) == MaxRHS) 1652 NewRHS = BO->getOperand(0); 1653 if (!NewRHS) 1654 return Cond; 1655 } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS) 1656 NewRHS = Sel->getOperand(1); 1657 else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS) 1658 NewRHS = Sel->getOperand(2); 1659 else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS)) 1660 NewRHS = SU->getValue(); 1661 else 1662 // Max doesn't match expected pattern. 1663 return Cond; 1664 1665 // Determine the new comparison opcode. It may be signed or unsigned, 1666 // and the original comparison may be either equality or inequality. 1667 if (Cond->getPredicate() == CmpInst::ICMP_EQ) 1668 Pred = CmpInst::getInversePredicate(Pred); 1669 1670 // Ok, everything looks ok to change the condition into an SLT or SGE and 1671 // delete the max calculation. 1672 ICmpInst *NewCond = 1673 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp"); 1674 1675 // Delete the max calculation instructions. 1676 Cond->replaceAllUsesWith(NewCond); 1677 CondUse->setUser(NewCond); 1678 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0)); 1679 Cond->eraseFromParent(); 1680 Sel->eraseFromParent(); 1681 if (Cmp->use_empty()) 1682 Cmp->eraseFromParent(); 1683 return NewCond; 1684} 1685 1686/// OptimizeLoopTermCond - Change loop terminating condition to use the 1687/// postinc iv when possible. 1688void 1689LSRInstance::OptimizeLoopTermCond() { 1690 SmallPtrSet<Instruction *, 4> PostIncs; 1691 1692 BasicBlock *LatchBlock = L->getLoopLatch(); 1693 SmallVector<BasicBlock*, 8> ExitingBlocks; 1694 L->getExitingBlocks(ExitingBlocks); 1695 1696 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 1697 BasicBlock *ExitingBlock = ExitingBlocks[i]; 1698 1699 // Get the terminating condition for the loop if possible. If we 1700 // can, we want to change it to use a post-incremented version of its 1701 // induction variable, to allow coalescing the live ranges for the IV into 1702 // one register value. 1703 1704 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 1705 if (!TermBr) 1706 continue; 1707 // FIXME: Overly conservative, termination condition could be an 'or' etc.. 1708 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition())) 1709 continue; 1710 1711 // Search IVUsesByStride to find Cond's IVUse if there is one. 1712 IVStrideUse *CondUse = 0; 1713 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); 1714 if (!FindIVUserForCond(Cond, CondUse)) 1715 continue; 1716 1717 // If the trip count is computed in terms of a max (due to ScalarEvolution 1718 // being unable to find a sufficient guard, for example), change the loop 1719 // comparison to use SLT or ULT instead of NE. 1720 // One consequence of doing this now is that it disrupts the count-down 1721 // optimization. That's not always a bad thing though, because in such 1722 // cases it may still be worthwhile to avoid a max. 1723 Cond = OptimizeMax(Cond, CondUse); 1724 1725 // If this exiting block dominates the latch block, it may also use 1726 // the post-inc value if it won't be shared with other uses. 1727 // Check for dominance. 1728 if (!DT.dominates(ExitingBlock, LatchBlock)) 1729 continue; 1730 1731 // Conservatively avoid trying to use the post-inc value in non-latch 1732 // exits if there may be pre-inc users in intervening blocks. 1733 if (LatchBlock != ExitingBlock) 1734 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1735 // Test if the use is reachable from the exiting block. This dominator 1736 // query is a conservative approximation of reachability. 1737 if (&*UI != CondUse && 1738 !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) { 1739 // Conservatively assume there may be reuse if the quotient of their 1740 // strides could be a legal scale. 1741 const SCEV *A = IU.getStride(*CondUse, L); 1742 const SCEV *B = IU.getStride(*UI, L); 1743 if (!A || !B) continue; 1744 if (SE.getTypeSizeInBits(A->getType()) != 1745 SE.getTypeSizeInBits(B->getType())) { 1746 if (SE.getTypeSizeInBits(A->getType()) > 1747 SE.getTypeSizeInBits(B->getType())) 1748 B = SE.getSignExtendExpr(B, A->getType()); 1749 else 1750 A = SE.getSignExtendExpr(A, B->getType()); 1751 } 1752 if (const SCEVConstant *D = 1753 dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) { 1754 const ConstantInt *C = D->getValue(); 1755 // Stride of one or negative one can have reuse with non-addresses. 1756 if (C->isOne() || C->isAllOnesValue()) 1757 goto decline_post_inc; 1758 // Avoid weird situations. 1759 if (C->getValue().getMinSignedBits() >= 64 || 1760 C->getValue().isMinSignedValue()) 1761 goto decline_post_inc; 1762 // Without TLI, assume that any stride might be valid, and so any 1763 // use might be shared. 1764 if (!TLI) 1765 goto decline_post_inc; 1766 // Check for possible scaled-address reuse. 1767 const Type *AccessTy = getAccessType(UI->getUser()); 1768 TargetLowering::AddrMode AM; 1769 AM.Scale = C->getSExtValue(); 1770 if (TLI->isLegalAddressingMode(AM, AccessTy)) 1771 goto decline_post_inc; 1772 AM.Scale = -AM.Scale; 1773 if (TLI->isLegalAddressingMode(AM, AccessTy)) 1774 goto decline_post_inc; 1775 } 1776 } 1777 1778 DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: " 1779 << *Cond << '\n'); 1780 1781 // It's possible for the setcc instruction to be anywhere in the loop, and 1782 // possible for it to have multiple users. If it is not immediately before 1783 // the exiting block branch, move it. 1784 if (&*++BasicBlock::iterator(Cond) != TermBr) { 1785 if (Cond->hasOneUse()) { 1786 Cond->moveBefore(TermBr); 1787 } else { 1788 // Clone the terminating condition and insert into the loopend. 1789 ICmpInst *OldCond = Cond; 1790 Cond = cast<ICmpInst>(Cond->clone()); 1791 Cond->setName(L->getHeader()->getName() + ".termcond"); 1792 ExitingBlock->getInstList().insert(TermBr, Cond); 1793 1794 // Clone the IVUse, as the old use still exists! 1795 CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace()); 1796 TermBr->replaceUsesOfWith(OldCond, Cond); 1797 } 1798 } 1799 1800 // If we get to here, we know that we can transform the setcc instruction to 1801 // use the post-incremented version of the IV, allowing us to coalesce the 1802 // live ranges for the IV correctly. 1803 CondUse->transformToPostInc(L); 1804 Changed = true; 1805 1806 PostIncs.insert(Cond); 1807 decline_post_inc:; 1808 } 1809 1810 // Determine an insertion point for the loop induction variable increment. It 1811 // must dominate all the post-inc comparisons we just set up, and it must 1812 // dominate the loop latch edge. 1813 IVIncInsertPos = L->getLoopLatch()->getTerminator(); 1814 for (SmallPtrSet<Instruction *, 4>::const_iterator I = PostIncs.begin(), 1815 E = PostIncs.end(); I != E; ++I) { 1816 BasicBlock *BB = 1817 DT.findNearestCommonDominator(IVIncInsertPos->getParent(), 1818 (*I)->getParent()); 1819 if (BB == (*I)->getParent()) 1820 IVIncInsertPos = *I; 1821 else if (BB != IVIncInsertPos->getParent()) 1822 IVIncInsertPos = BB->getTerminator(); 1823 } 1824} 1825 1826/// reconcileNewOffset - Determine if the given use can accomodate a fixup 1827/// at the given offset and other details. If so, update the use and 1828/// return true. 1829bool 1830LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, 1831 LSRUse::KindType Kind, const Type *AccessTy) { 1832 int64_t NewMinOffset = LU.MinOffset; 1833 int64_t NewMaxOffset = LU.MaxOffset; 1834 const Type *NewAccessTy = AccessTy; 1835 1836 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to 1837 // something conservative, however this can pessimize in the case that one of 1838 // the uses will have all its uses outside the loop, for example. 1839 if (LU.Kind != Kind) 1840 return false; 1841 // Conservatively assume HasBaseReg is true for now. 1842 if (NewOffset < LU.MinOffset) { 1843 if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, HasBaseReg, 1844 Kind, AccessTy, TLI)) 1845 return false; 1846 NewMinOffset = NewOffset; 1847 } else if (NewOffset > LU.MaxOffset) { 1848 if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, HasBaseReg, 1849 Kind, AccessTy, TLI)) 1850 return false; 1851 NewMaxOffset = NewOffset; 1852 } 1853 // Check for a mismatched access type, and fall back conservatively as needed. 1854 // TODO: Be less conservative when the type is similar and can use the same 1855 // addressing modes. 1856 if (Kind == LSRUse::Address && AccessTy != LU.AccessTy) 1857 NewAccessTy = Type::getVoidTy(AccessTy->getContext()); 1858 1859 // Update the use. 1860 LU.MinOffset = NewMinOffset; 1861 LU.MaxOffset = NewMaxOffset; 1862 LU.AccessTy = NewAccessTy; 1863 if (NewOffset != LU.Offsets.back()) 1864 LU.Offsets.push_back(NewOffset); 1865 return true; 1866} 1867 1868/// getUse - Return an LSRUse index and an offset value for a fixup which 1869/// needs the given expression, with the given kind and optional access type. 1870/// Either reuse an existing use or create a new one, as needed. 1871std::pair<size_t, int64_t> 1872LSRInstance::getUse(const SCEV *&Expr, 1873 LSRUse::KindType Kind, const Type *AccessTy) { 1874 const SCEV *Copy = Expr; 1875 int64_t Offset = ExtractImmediate(Expr, SE); 1876 1877 // Basic uses can't accept any offset, for example. 1878 if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true, Kind, AccessTy, TLI)) { 1879 Expr = Copy; 1880 Offset = 0; 1881 } 1882 1883 std::pair<UseMapTy::iterator, bool> P = 1884 UseMap.insert(std::make_pair(std::make_pair(Expr, Kind), 0)); 1885 if (!P.second) { 1886 // A use already existed with this base. 1887 size_t LUIdx = P.first->second; 1888 LSRUse &LU = Uses[LUIdx]; 1889 if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy)) 1890 // Reuse this use. 1891 return std::make_pair(LUIdx, Offset); 1892 } 1893 1894 // Create a new use. 1895 size_t LUIdx = Uses.size(); 1896 P.first->second = LUIdx; 1897 Uses.push_back(LSRUse(Kind, AccessTy)); 1898 LSRUse &LU = Uses[LUIdx]; 1899 1900 // We don't need to track redundant offsets, but we don't need to go out 1901 // of our way here to avoid them. 1902 if (LU.Offsets.empty() || Offset != LU.Offsets.back()) 1903 LU.Offsets.push_back(Offset); 1904 1905 LU.MinOffset = Offset; 1906 LU.MaxOffset = Offset; 1907 return std::make_pair(LUIdx, Offset); 1908} 1909 1910/// DeleteUse - Delete the given use from the Uses list. 1911void LSRInstance::DeleteUse(LSRUse &LU) { 1912 if (&LU != &Uses.back()) 1913 std::swap(LU, Uses.back()); 1914 Uses.pop_back(); 1915} 1916 1917/// FindUseWithFormula - Look for a use distinct from OrigLU which is has 1918/// a formula that has the same registers as the given formula. 1919LSRUse * 1920LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF, 1921 const LSRUse &OrigLU) { 1922 // Search all uses for the formula. This could be more clever. Ignore 1923 // ICmpZero uses because they may contain formulae generated by 1924 // GenerateICmpZeroScales, in which case adding fixup offsets may 1925 // be invalid. 1926 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 1927 LSRUse &LU = Uses[LUIdx]; 1928 if (&LU != &OrigLU && 1929 LU.Kind != LSRUse::ICmpZero && 1930 LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy && 1931 LU.HasFormulaWithSameRegs(OrigF)) { 1932 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 1933 E = LU.Formulae.end(); I != E; ++I) { 1934 const Formula &F = *I; 1935 if (F.BaseRegs == OrigF.BaseRegs && 1936 F.ScaledReg == OrigF.ScaledReg && 1937 F.AM.BaseGV == OrigF.AM.BaseGV && 1938 F.AM.Scale == OrigF.AM.Scale && 1939 LU.Kind) { 1940 if (F.AM.BaseOffs == 0) 1941 return &LU; 1942 break; 1943 } 1944 } 1945 } 1946 } 1947 1948 return 0; 1949} 1950 1951void LSRInstance::CollectInterestingTypesAndFactors() { 1952 SmallSetVector<const SCEV *, 4> Strides; 1953 1954 // Collect interesting types and strides. 1955 SmallVector<const SCEV *, 4> Worklist; 1956 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 1957 const SCEV *Expr = IU.getExpr(*UI); 1958 1959 // Collect interesting types. 1960 Types.insert(SE.getEffectiveSCEVType(Expr->getType())); 1961 1962 // Add strides for mentioned loops. 1963 Worklist.push_back(Expr); 1964 do { 1965 const SCEV *S = Worklist.pop_back_val(); 1966 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 1967 Strides.insert(AR->getStepRecurrence(SE)); 1968 Worklist.push_back(AR->getStart()); 1969 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 1970 Worklist.append(Add->op_begin(), Add->op_end()); 1971 } 1972 } while (!Worklist.empty()); 1973 } 1974 1975 // Compute interesting factors from the set of interesting strides. 1976 for (SmallSetVector<const SCEV *, 4>::const_iterator 1977 I = Strides.begin(), E = Strides.end(); I != E; ++I) 1978 for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter = 1979 next(I); NewStrideIter != E; ++NewStrideIter) { 1980 const SCEV *OldStride = *I; 1981 const SCEV *NewStride = *NewStrideIter; 1982 1983 if (SE.getTypeSizeInBits(OldStride->getType()) != 1984 SE.getTypeSizeInBits(NewStride->getType())) { 1985 if (SE.getTypeSizeInBits(OldStride->getType()) > 1986 SE.getTypeSizeInBits(NewStride->getType())) 1987 NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType()); 1988 else 1989 OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType()); 1990 } 1991 if (const SCEVConstant *Factor = 1992 dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride, 1993 SE, true))) { 1994 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 1995 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 1996 } else if (const SCEVConstant *Factor = 1997 dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride, 1998 NewStride, 1999 SE, true))) { 2000 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 2001 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 2002 } 2003 } 2004 2005 // If all uses use the same type, don't bother looking for truncation-based 2006 // reuse. 2007 if (Types.size() == 1) 2008 Types.clear(); 2009 2010 DEBUG(print_factors_and_types(dbgs())); 2011} 2012 2013void LSRInstance::CollectFixupsAndInitialFormulae() { 2014 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 2015 // Record the uses. 2016 LSRFixup &LF = getNewFixup(); 2017 LF.UserInst = UI->getUser(); 2018 LF.OperandValToReplace = UI->getOperandValToReplace(); 2019 LF.PostIncLoops = UI->getPostIncLoops(); 2020 2021 LSRUse::KindType Kind = LSRUse::Basic; 2022 const Type *AccessTy = 0; 2023 if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) { 2024 Kind = LSRUse::Address; 2025 AccessTy = getAccessType(LF.UserInst); 2026 } 2027 2028 const SCEV *S = IU.getExpr(*UI); 2029 2030 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as 2031 // (N - i == 0), and this allows (N - i) to be the expression that we work 2032 // with rather than just N or i, so we can consider the register 2033 // requirements for both N and i at the same time. Limiting this code to 2034 // equality icmps is not a problem because all interesting loops use 2035 // equality icmps, thanks to IndVarSimplify. 2036 if (ICmpInst *CI = dyn_cast<ICmpInst>(LF.UserInst)) 2037 if (CI->isEquality()) { 2038 // Swap the operands if needed to put the OperandValToReplace on the 2039 // left, for consistency. 2040 Value *NV = CI->getOperand(1); 2041 if (NV == LF.OperandValToReplace) { 2042 CI->setOperand(1, CI->getOperand(0)); 2043 CI->setOperand(0, NV); 2044 NV = CI->getOperand(1); 2045 Changed = true; 2046 } 2047 2048 // x == y --> x - y == 0 2049 const SCEV *N = SE.getSCEV(NV); 2050 if (N->isLoopInvariant(L)) { 2051 Kind = LSRUse::ICmpZero; 2052 S = SE.getMinusSCEV(N, S); 2053 } 2054 2055 // -1 and the negations of all interesting strides (except the negation 2056 // of -1) are now also interesting. 2057 for (size_t i = 0, e = Factors.size(); i != e; ++i) 2058 if (Factors[i] != -1) 2059 Factors.insert(-(uint64_t)Factors[i]); 2060 Factors.insert(-1); 2061 } 2062 2063 // Set up the initial formula for this use. 2064 std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy); 2065 LF.LUIdx = P.first; 2066 LF.Offset = P.second; 2067 LSRUse &LU = Uses[LF.LUIdx]; 2068 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); 2069 2070 // If this is the first use of this LSRUse, give it a formula. 2071 if (LU.Formulae.empty()) { 2072 InsertInitialFormula(S, LU, LF.LUIdx); 2073 CountRegisters(LU.Formulae.back(), LF.LUIdx); 2074 } 2075 } 2076 2077 DEBUG(print_fixups(dbgs())); 2078} 2079 2080/// InsertInitialFormula - Insert a formula for the given expression into 2081/// the given use, separating out loop-variant portions from loop-invariant 2082/// and loop-computable portions. 2083void 2084LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { 2085 Formula F; 2086 F.InitialMatch(S, L, SE, DT); 2087 bool Inserted = InsertFormula(LU, LUIdx, F); 2088 assert(Inserted && "Initial formula already exists!"); (void)Inserted; 2089} 2090 2091/// InsertSupplementalFormula - Insert a simple single-register formula for 2092/// the given expression into the given use. 2093void 2094LSRInstance::InsertSupplementalFormula(const SCEV *S, 2095 LSRUse &LU, size_t LUIdx) { 2096 Formula F; 2097 F.BaseRegs.push_back(S); 2098 F.AM.HasBaseReg = true; 2099 bool Inserted = InsertFormula(LU, LUIdx, F); 2100 assert(Inserted && "Supplemental formula already exists!"); (void)Inserted; 2101} 2102 2103/// CountRegisters - Note which registers are used by the given formula, 2104/// updating RegUses. 2105void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) { 2106 if (F.ScaledReg) 2107 RegUses.CountRegister(F.ScaledReg, LUIdx); 2108 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 2109 E = F.BaseRegs.end(); I != E; ++I) 2110 RegUses.CountRegister(*I, LUIdx); 2111} 2112 2113/// InsertFormula - If the given formula has not yet been inserted, add it to 2114/// the list, and return true. Return false otherwise. 2115bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) { 2116 if (!LU.InsertFormula(F)) 2117 return false; 2118 2119 CountRegisters(F, LUIdx); 2120 return true; 2121} 2122 2123/// CollectLoopInvariantFixupsAndFormulae - Check for other uses of 2124/// loop-invariant values which we're tracking. These other uses will pin these 2125/// values in registers, making them less profitable for elimination. 2126/// TODO: This currently misses non-constant addrec step registers. 2127/// TODO: Should this give more weight to users inside the loop? 2128void 2129LSRInstance::CollectLoopInvariantFixupsAndFormulae() { 2130 SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end()); 2131 SmallPtrSet<const SCEV *, 8> Inserted; 2132 2133 while (!Worklist.empty()) { 2134 const SCEV *S = Worklist.pop_back_val(); 2135 2136 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) 2137 Worklist.append(N->op_begin(), N->op_end()); 2138 else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) 2139 Worklist.push_back(C->getOperand()); 2140 else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 2141 Worklist.push_back(D->getLHS()); 2142 Worklist.push_back(D->getRHS()); 2143 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2144 if (!Inserted.insert(U)) continue; 2145 const Value *V = U->getValue(); 2146 if (const Instruction *Inst = dyn_cast<Instruction>(V)) { 2147 // Look for instructions defined outside the loop. 2148 if (L->contains(Inst)) continue; 2149 } else if (isa<UndefValue>(V)) 2150 // Undef doesn't have a live range, so it doesn't matter. 2151 continue; 2152 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end(); 2153 UI != UE; ++UI) { 2154 const Instruction *UserInst = dyn_cast<Instruction>(*UI); 2155 // Ignore non-instructions. 2156 if (!UserInst) 2157 continue; 2158 // Ignore instructions in other functions (as can happen with 2159 // Constants). 2160 if (UserInst->getParent()->getParent() != L->getHeader()->getParent()) 2161 continue; 2162 // Ignore instructions not dominated by the loop. 2163 const BasicBlock *UseBB = !isa<PHINode>(UserInst) ? 2164 UserInst->getParent() : 2165 cast<PHINode>(UserInst)->getIncomingBlock( 2166 PHINode::getIncomingValueNumForOperand(UI.getOperandNo())); 2167 if (!DT.dominates(L->getHeader(), UseBB)) 2168 continue; 2169 // Ignore uses which are part of other SCEV expressions, to avoid 2170 // analyzing them multiple times. 2171 if (SE.isSCEVable(UserInst->getType())) { 2172 const SCEV *UserS = SE.getSCEV(const_cast<Instruction *>(UserInst)); 2173 // If the user is a no-op, look through to its uses. 2174 if (!isa<SCEVUnknown>(UserS)) 2175 continue; 2176 if (UserS == U) { 2177 Worklist.push_back( 2178 SE.getUnknown(const_cast<Instruction *>(UserInst))); 2179 continue; 2180 } 2181 } 2182 // Ignore icmp instructions which are already being analyzed. 2183 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) { 2184 unsigned OtherIdx = !UI.getOperandNo(); 2185 Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx)); 2186 if (SE.getSCEV(OtherOp)->hasComputableLoopEvolution(L)) 2187 continue; 2188 } 2189 2190 LSRFixup &LF = getNewFixup(); 2191 LF.UserInst = const_cast<Instruction *>(UserInst); 2192 LF.OperandValToReplace = UI.getUse(); 2193 std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, 0); 2194 LF.LUIdx = P.first; 2195 LF.Offset = P.second; 2196 LSRUse &LU = Uses[LF.LUIdx]; 2197 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); 2198 InsertSupplementalFormula(U, LU, LF.LUIdx); 2199 CountRegisters(LU.Formulae.back(), Uses.size() - 1); 2200 break; 2201 } 2202 } 2203 } 2204} 2205 2206/// CollectSubexprs - Split S into subexpressions which can be pulled out into 2207/// separate registers. If C is non-null, multiply each subexpression by C. 2208static void CollectSubexprs(const SCEV *S, const SCEVConstant *C, 2209 SmallVectorImpl<const SCEV *> &Ops, 2210 SmallVectorImpl<const SCEV *> &UninterestingOps, 2211 const Loop *L, 2212 ScalarEvolution &SE) { 2213 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 2214 // Break out add operands. 2215 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 2216 I != E; ++I) 2217 CollectSubexprs(*I, C, Ops, UninterestingOps, L, SE); 2218 return; 2219 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 2220 // Split a non-zero base out of an addrec. 2221 if (!AR->getStart()->isZero()) { 2222 CollectSubexprs(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), 2223 AR->getStepRecurrence(SE), 2224 AR->getLoop()), 2225 C, Ops, UninterestingOps, L, SE); 2226 CollectSubexprs(AR->getStart(), C, Ops, UninterestingOps, L, SE); 2227 return; 2228 } 2229 } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 2230 // Break (C * (a + b + c)) into C*a + C*b + C*c. 2231 if (Mul->getNumOperands() == 2) 2232 if (const SCEVConstant *Op0 = 2233 dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 2234 CollectSubexprs(Mul->getOperand(1), 2235 C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0, 2236 Ops, UninterestingOps, L, SE); 2237 return; 2238 } 2239 } 2240 2241 // Otherwise use the value itself. Loop-variant "unknown" values are 2242 // uninteresting; we won't be able to do anything meaningful with them. 2243 if (!C && isa<SCEVUnknown>(S) && !S->isLoopInvariant(L)) 2244 UninterestingOps.push_back(S); 2245 else 2246 Ops.push_back(C ? SE.getMulExpr(C, S) : S); 2247} 2248 2249/// GenerateReassociations - Split out subexpressions from adds and the bases of 2250/// addrecs. 2251void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, 2252 Formula Base, 2253 unsigned Depth) { 2254 // Arbitrarily cap recursion to protect compile time. 2255 if (Depth >= 3) return; 2256 2257 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 2258 const SCEV *BaseReg = Base.BaseRegs[i]; 2259 2260 SmallVector<const SCEV *, 8> AddOps, UninterestingAddOps; 2261 CollectSubexprs(BaseReg, 0, AddOps, UninterestingAddOps, L, SE); 2262 2263 // Add any uninteresting values as one register, as we won't be able to 2264 // form any interesting reassociation opportunities with them. They'll 2265 // just have to be added inside the loop no matter what we do. 2266 if (!UninterestingAddOps.empty()) 2267 AddOps.push_back(SE.getAddExpr(UninterestingAddOps)); 2268 2269 if (AddOps.size() == 1) continue; 2270 2271 for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(), 2272 JE = AddOps.end(); J != JE; ++J) { 2273 // Don't pull a constant into a register if the constant could be folded 2274 // into an immediate field. 2275 if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset, 2276 Base.getNumRegs() > 1, 2277 LU.Kind, LU.AccessTy, TLI, SE)) 2278 continue; 2279 2280 // Collect all operands except *J. 2281 SmallVector<const SCEV *, 8> InnerAddOps 2282 ( ((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J); 2283 InnerAddOps.append 2284 (next(J), ((const SmallVector<const SCEV *, 8> &)AddOps).end()); 2285 2286 // Don't leave just a constant behind in a register if the constant could 2287 // be folded into an immediate field. 2288 if (InnerAddOps.size() == 1 && 2289 isAlwaysFoldable(InnerAddOps[0], LU.MinOffset, LU.MaxOffset, 2290 Base.getNumRegs() > 1, 2291 LU.Kind, LU.AccessTy, TLI, SE)) 2292 continue; 2293 2294 const SCEV *InnerSum = SE.getAddExpr(InnerAddOps); 2295 if (InnerSum->isZero()) 2296 continue; 2297 Formula F = Base; 2298 F.BaseRegs[i] = InnerSum; 2299 F.BaseRegs.push_back(*J); 2300 if (InsertFormula(LU, LUIdx, F)) 2301 // If that formula hadn't been seen before, recurse to find more like 2302 // it. 2303 GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth+1); 2304 } 2305 } 2306} 2307 2308/// GenerateCombinations - Generate a formula consisting of all of the 2309/// loop-dominating registers added into a single register. 2310void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, 2311 Formula Base) { 2312 // This method is only interesting on a plurality of registers. 2313 if (Base.BaseRegs.size() <= 1) return; 2314 2315 Formula F = Base; 2316 F.BaseRegs.clear(); 2317 SmallVector<const SCEV *, 4> Ops; 2318 for (SmallVectorImpl<const SCEV *>::const_iterator 2319 I = Base.BaseRegs.begin(), E = Base.BaseRegs.end(); I != E; ++I) { 2320 const SCEV *BaseReg = *I; 2321 if (BaseReg->properlyDominates(L->getHeader(), &DT) && 2322 !BaseReg->hasComputableLoopEvolution(L)) 2323 Ops.push_back(BaseReg); 2324 else 2325 F.BaseRegs.push_back(BaseReg); 2326 } 2327 if (Ops.size() > 1) { 2328 const SCEV *Sum = SE.getAddExpr(Ops); 2329 // TODO: If Sum is zero, it probably means ScalarEvolution missed an 2330 // opportunity to fold something. For now, just ignore such cases 2331 // rather than proceed with zero in a register. 2332 if (!Sum->isZero()) { 2333 F.BaseRegs.push_back(Sum); 2334 (void)InsertFormula(LU, LUIdx, F); 2335 } 2336 } 2337} 2338 2339/// GenerateSymbolicOffsets - Generate reuse formulae using symbolic offsets. 2340void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, 2341 Formula Base) { 2342 // We can't add a symbolic offset if the address already contains one. 2343 if (Base.AM.BaseGV) return; 2344 2345 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 2346 const SCEV *G = Base.BaseRegs[i]; 2347 GlobalValue *GV = ExtractSymbol(G, SE); 2348 if (G->isZero() || !GV) 2349 continue; 2350 Formula F = Base; 2351 F.AM.BaseGV = GV; 2352 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, 2353 LU.Kind, LU.AccessTy, TLI)) 2354 continue; 2355 F.BaseRegs[i] = G; 2356 (void)InsertFormula(LU, LUIdx, F); 2357 } 2358} 2359 2360/// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets. 2361void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, 2362 Formula Base) { 2363 // TODO: For now, just add the min and max offset, because it usually isn't 2364 // worthwhile looking at everything inbetween. 2365 SmallVector<int64_t, 2> Worklist; 2366 Worklist.push_back(LU.MinOffset); 2367 if (LU.MaxOffset != LU.MinOffset) 2368 Worklist.push_back(LU.MaxOffset); 2369 2370 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 2371 const SCEV *G = Base.BaseRegs[i]; 2372 2373 for (SmallVectorImpl<int64_t>::const_iterator I = Worklist.begin(), 2374 E = Worklist.end(); I != E; ++I) { 2375 Formula F = Base; 2376 F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I; 2377 if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I, 2378 LU.Kind, LU.AccessTy, TLI)) { 2379 // Add the offset to the base register. 2380 const SCEV *NewG = SE.getAddExpr(G, SE.getConstant(G->getType(), *I)); 2381 // If it cancelled out, drop the base register, otherwise update it. 2382 if (NewG->isZero()) { 2383 std::swap(F.BaseRegs[i], F.BaseRegs.back()); 2384 F.BaseRegs.pop_back(); 2385 } else 2386 F.BaseRegs[i] = NewG; 2387 2388 (void)InsertFormula(LU, LUIdx, F); 2389 } 2390 } 2391 2392 int64_t Imm = ExtractImmediate(G, SE); 2393 if (G->isZero() || Imm == 0) 2394 continue; 2395 Formula F = Base; 2396 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Imm; 2397 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, 2398 LU.Kind, LU.AccessTy, TLI)) 2399 continue; 2400 F.BaseRegs[i] = G; 2401 (void)InsertFormula(LU, LUIdx, F); 2402 } 2403} 2404 2405/// GenerateICmpZeroScales - For ICmpZero, check to see if we can scale up 2406/// the comparison. For example, x == y -> x*c == y*c. 2407void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, 2408 Formula Base) { 2409 if (LU.Kind != LSRUse::ICmpZero) return; 2410 2411 // Determine the integer type for the base formula. 2412 const Type *IntTy = Base.getType(); 2413 if (!IntTy) return; 2414 if (SE.getTypeSizeInBits(IntTy) > 64) return; 2415 2416 // Don't do this if there is more than one offset. 2417 if (LU.MinOffset != LU.MaxOffset) return; 2418 2419 assert(!Base.AM.BaseGV && "ICmpZero use is not legal!"); 2420 2421 // Check each interesting stride. 2422 for (SmallSetVector<int64_t, 8>::const_iterator 2423 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 2424 int64_t Factor = *I; 2425 2426 // Check that the multiplication doesn't overflow. 2427 if (Base.AM.BaseOffs == INT64_MIN && Factor == -1) 2428 continue; 2429 int64_t NewBaseOffs = (uint64_t)Base.AM.BaseOffs * Factor; 2430 if (NewBaseOffs / Factor != Base.AM.BaseOffs) 2431 continue; 2432 2433 // Check that multiplying with the use offset doesn't overflow. 2434 int64_t Offset = LU.MinOffset; 2435 if (Offset == INT64_MIN && Factor == -1) 2436 continue; 2437 Offset = (uint64_t)Offset * Factor; 2438 if (Offset / Factor != LU.MinOffset) 2439 continue; 2440 2441 Formula F = Base; 2442 F.AM.BaseOffs = NewBaseOffs; 2443 2444 // Check that this scale is legal. 2445 if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, TLI)) 2446 continue; 2447 2448 // Compensate for the use having MinOffset built into it. 2449 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Offset - LU.MinOffset; 2450 2451 const SCEV *FactorS = SE.getConstant(IntTy, Factor); 2452 2453 // Check that multiplying with each base register doesn't overflow. 2454 for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) { 2455 F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS); 2456 if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i]) 2457 goto next; 2458 } 2459 2460 // Check that multiplying with the scaled register doesn't overflow. 2461 if (F.ScaledReg) { 2462 F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS); 2463 if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg) 2464 continue; 2465 } 2466 2467 // If we make it here and it's legal, add it. 2468 (void)InsertFormula(LU, LUIdx, F); 2469 next:; 2470 } 2471} 2472 2473/// GenerateScales - Generate stride factor reuse formulae by making use of 2474/// scaled-offset address modes, for example. 2475void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) { 2476 // Determine the integer type for the base formula. 2477 const Type *IntTy = Base.getType(); 2478 if (!IntTy) return; 2479 2480 // If this Formula already has a scaled register, we can't add another one. 2481 if (Base.AM.Scale != 0) return; 2482 2483 // Check each interesting stride. 2484 for (SmallSetVector<int64_t, 8>::const_iterator 2485 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 2486 int64_t Factor = *I; 2487 2488 Base.AM.Scale = Factor; 2489 Base.AM.HasBaseReg = Base.BaseRegs.size() > 1; 2490 // Check whether this scale is going to be legal. 2491 if (!isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, 2492 LU.Kind, LU.AccessTy, TLI)) { 2493 // As a special-case, handle special out-of-loop Basic users specially. 2494 // TODO: Reconsider this special case. 2495 if (LU.Kind == LSRUse::Basic && 2496 isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, 2497 LSRUse::Special, LU.AccessTy, TLI) && 2498 LU.AllFixupsOutsideLoop) 2499 LU.Kind = LSRUse::Special; 2500 else 2501 continue; 2502 } 2503 // For an ICmpZero, negating a solitary base register won't lead to 2504 // new solutions. 2505 if (LU.Kind == LSRUse::ICmpZero && 2506 !Base.AM.HasBaseReg && Base.AM.BaseOffs == 0 && !Base.AM.BaseGV) 2507 continue; 2508 // For each addrec base reg, apply the scale, if possible. 2509 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 2510 if (const SCEVAddRecExpr *AR = 2511 dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i])) { 2512 const SCEV *FactorS = SE.getConstant(IntTy, Factor); 2513 if (FactorS->isZero()) 2514 continue; 2515 // Divide out the factor, ignoring high bits, since we'll be 2516 // scaling the value back up in the end. 2517 if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) { 2518 // TODO: This could be optimized to avoid all the copying. 2519 Formula F = Base; 2520 F.ScaledReg = Quotient; 2521 F.DeleteBaseReg(F.BaseRegs[i]); 2522 (void)InsertFormula(LU, LUIdx, F); 2523 } 2524 } 2525 } 2526} 2527 2528/// GenerateTruncates - Generate reuse formulae from different IV types. 2529void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) { 2530 // This requires TargetLowering to tell us which truncates are free. 2531 if (!TLI) return; 2532 2533 // Don't bother truncating symbolic values. 2534 if (Base.AM.BaseGV) return; 2535 2536 // Determine the integer type for the base formula. 2537 const Type *DstTy = Base.getType(); 2538 if (!DstTy) return; 2539 DstTy = SE.getEffectiveSCEVType(DstTy); 2540 2541 for (SmallSetVector<const Type *, 4>::const_iterator 2542 I = Types.begin(), E = Types.end(); I != E; ++I) { 2543 const Type *SrcTy = *I; 2544 if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) { 2545 Formula F = Base; 2546 2547 if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I); 2548 for (SmallVectorImpl<const SCEV *>::iterator J = F.BaseRegs.begin(), 2549 JE = F.BaseRegs.end(); J != JE; ++J) 2550 *J = SE.getAnyExtendExpr(*J, SrcTy); 2551 2552 // TODO: This assumes we've done basic processing on all uses and 2553 // have an idea what the register usage is. 2554 if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses)) 2555 continue; 2556 2557 (void)InsertFormula(LU, LUIdx, F); 2558 } 2559 } 2560} 2561 2562namespace { 2563 2564/// WorkItem - Helper class for GenerateCrossUseConstantOffsets. It's used to 2565/// defer modifications so that the search phase doesn't have to worry about 2566/// the data structures moving underneath it. 2567struct WorkItem { 2568 size_t LUIdx; 2569 int64_t Imm; 2570 const SCEV *OrigReg; 2571 2572 WorkItem(size_t LI, int64_t I, const SCEV *R) 2573 : LUIdx(LI), Imm(I), OrigReg(R) {} 2574 2575 void print(raw_ostream &OS) const; 2576 void dump() const; 2577}; 2578 2579} 2580 2581void WorkItem::print(raw_ostream &OS) const { 2582 OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx 2583 << " , add offset " << Imm; 2584} 2585 2586void WorkItem::dump() const { 2587 print(errs()); errs() << '\n'; 2588} 2589 2590/// GenerateCrossUseConstantOffsets - Look for registers which are a constant 2591/// distance apart and try to form reuse opportunities between them. 2592void LSRInstance::GenerateCrossUseConstantOffsets() { 2593 // Group the registers by their value without any added constant offset. 2594 typedef std::map<int64_t, const SCEV *> ImmMapTy; 2595 typedef DenseMap<const SCEV *, ImmMapTy> RegMapTy; 2596 RegMapTy Map; 2597 DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap; 2598 SmallVector<const SCEV *, 8> Sequence; 2599 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 2600 I != E; ++I) { 2601 const SCEV *Reg = *I; 2602 int64_t Imm = ExtractImmediate(Reg, SE); 2603 std::pair<RegMapTy::iterator, bool> Pair = 2604 Map.insert(std::make_pair(Reg, ImmMapTy())); 2605 if (Pair.second) 2606 Sequence.push_back(Reg); 2607 Pair.first->second.insert(std::make_pair(Imm, *I)); 2608 UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(*I); 2609 } 2610 2611 // Now examine each set of registers with the same base value. Build up 2612 // a list of work to do and do the work in a separate step so that we're 2613 // not adding formulae and register counts while we're searching. 2614 SmallVector<WorkItem, 32> WorkItems; 2615 SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems; 2616 for (SmallVectorImpl<const SCEV *>::const_iterator I = Sequence.begin(), 2617 E = Sequence.end(); I != E; ++I) { 2618 const SCEV *Reg = *I; 2619 const ImmMapTy &Imms = Map.find(Reg)->second; 2620 2621 // It's not worthwhile looking for reuse if there's only one offset. 2622 if (Imms.size() == 1) 2623 continue; 2624 2625 DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':'; 2626 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 2627 J != JE; ++J) 2628 dbgs() << ' ' << J->first; 2629 dbgs() << '\n'); 2630 2631 // Examine each offset. 2632 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 2633 J != JE; ++J) { 2634 const SCEV *OrigReg = J->second; 2635 2636 int64_t JImm = J->first; 2637 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg); 2638 2639 if (!isa<SCEVConstant>(OrigReg) && 2640 UsedByIndicesMap[Reg].count() == 1) { 2641 DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg << '\n'); 2642 continue; 2643 } 2644 2645 // Conservatively examine offsets between this orig reg a few selected 2646 // other orig regs. 2647 ImmMapTy::const_iterator OtherImms[] = { 2648 Imms.begin(), prior(Imms.end()), 2649 Imms.upper_bound((Imms.begin()->first + prior(Imms.end())->first) / 2) 2650 }; 2651 for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { 2652 ImmMapTy::const_iterator M = OtherImms[i]; 2653 if (M == J || M == JE) continue; 2654 2655 // Compute the difference between the two. 2656 int64_t Imm = (uint64_t)JImm - M->first; 2657 for (int LUIdx = UsedByIndices.find_first(); LUIdx != -1; 2658 LUIdx = UsedByIndices.find_next(LUIdx)) 2659 // Make a memo of this use, offset, and register tuple. 2660 if (UniqueItems.insert(std::make_pair(LUIdx, Imm))) 2661 WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg)); 2662 } 2663 } 2664 } 2665 2666 Map.clear(); 2667 Sequence.clear(); 2668 UsedByIndicesMap.clear(); 2669 UniqueItems.clear(); 2670 2671 // Now iterate through the worklist and add new formulae. 2672 for (SmallVectorImpl<WorkItem>::const_iterator I = WorkItems.begin(), 2673 E = WorkItems.end(); I != E; ++I) { 2674 const WorkItem &WI = *I; 2675 size_t LUIdx = WI.LUIdx; 2676 LSRUse &LU = Uses[LUIdx]; 2677 int64_t Imm = WI.Imm; 2678 const SCEV *OrigReg = WI.OrigReg; 2679 2680 const Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType()); 2681 const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm)); 2682 unsigned BitWidth = SE.getTypeSizeInBits(IntTy); 2683 2684 // TODO: Use a more targeted data structure. 2685 for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) { 2686 const Formula &F = LU.Formulae[L]; 2687 // Use the immediate in the scaled register. 2688 if (F.ScaledReg == OrigReg) { 2689 int64_t Offs = (uint64_t)F.AM.BaseOffs + 2690 Imm * (uint64_t)F.AM.Scale; 2691 // Don't create 50 + reg(-50). 2692 if (F.referencesReg(SE.getSCEV( 2693 ConstantInt::get(IntTy, -(uint64_t)Offs)))) 2694 continue; 2695 Formula NewF = F; 2696 NewF.AM.BaseOffs = Offs; 2697 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, 2698 LU.Kind, LU.AccessTy, TLI)) 2699 continue; 2700 NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg); 2701 2702 // If the new scale is a constant in a register, and adding the constant 2703 // value to the immediate would produce a value closer to zero than the 2704 // immediate itself, then the formula isn't worthwhile. 2705 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg)) 2706 if (C->getValue()->getValue().isNegative() != 2707 (NewF.AM.BaseOffs < 0) && 2708 (C->getValue()->getValue().abs() * APInt(BitWidth, F.AM.Scale)) 2709 .ule(abs64(NewF.AM.BaseOffs))) 2710 continue; 2711 2712 // OK, looks good. 2713 (void)InsertFormula(LU, LUIdx, NewF); 2714 } else { 2715 // Use the immediate in a base register. 2716 for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) { 2717 const SCEV *BaseReg = F.BaseRegs[N]; 2718 if (BaseReg != OrigReg) 2719 continue; 2720 Formula NewF = F; 2721 NewF.AM.BaseOffs = (uint64_t)NewF.AM.BaseOffs + Imm; 2722 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, 2723 LU.Kind, LU.AccessTy, TLI)) 2724 continue; 2725 NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg); 2726 2727 // If the new formula has a constant in a register, and adding the 2728 // constant value to the immediate would produce a value closer to 2729 // zero than the immediate itself, then the formula isn't worthwhile. 2730 for (SmallVectorImpl<const SCEV *>::const_iterator 2731 J = NewF.BaseRegs.begin(), JE = NewF.BaseRegs.end(); 2732 J != JE; ++J) 2733 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*J)) 2734 if ((C->getValue()->getValue() + NewF.AM.BaseOffs).abs().slt( 2735 abs64(NewF.AM.BaseOffs)) && 2736 (C->getValue()->getValue() + 2737 NewF.AM.BaseOffs).countTrailingZeros() >= 2738 CountTrailingZeros_64(NewF.AM.BaseOffs)) 2739 goto skip_formula; 2740 2741 // Ok, looks good. 2742 (void)InsertFormula(LU, LUIdx, NewF); 2743 break; 2744 skip_formula:; 2745 } 2746 } 2747 } 2748 } 2749} 2750 2751/// GenerateAllReuseFormulae - Generate formulae for each use. 2752void 2753LSRInstance::GenerateAllReuseFormulae() { 2754 // This is split into multiple loops so that hasRegsUsedByUsesOtherThan 2755 // queries are more precise. 2756 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2757 LSRUse &LU = Uses[LUIdx]; 2758 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2759 GenerateReassociations(LU, LUIdx, LU.Formulae[i]); 2760 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2761 GenerateCombinations(LU, LUIdx, LU.Formulae[i]); 2762 } 2763 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2764 LSRUse &LU = Uses[LUIdx]; 2765 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2766 GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]); 2767 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2768 GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]); 2769 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2770 GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]); 2771 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2772 GenerateScales(LU, LUIdx, LU.Formulae[i]); 2773 } 2774 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2775 LSRUse &LU = Uses[LUIdx]; 2776 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2777 GenerateTruncates(LU, LUIdx, LU.Formulae[i]); 2778 } 2779 2780 GenerateCrossUseConstantOffsets(); 2781} 2782 2783/// If their are multiple formulae with the same set of registers used 2784/// by other uses, pick the best one and delete the others. 2785void LSRInstance::FilterOutUndesirableDedicatedRegisters() { 2786#ifndef NDEBUG 2787 bool ChangedFormulae = false; 2788#endif 2789 2790 // Collect the best formula for each unique set of shared registers. This 2791 // is reset for each use. 2792 typedef DenseMap<SmallVector<const SCEV *, 2>, size_t, UniquifierDenseMapInfo> 2793 BestFormulaeTy; 2794 BestFormulaeTy BestFormulae; 2795 2796 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2797 LSRUse &LU = Uses[LUIdx]; 2798 FormulaSorter Sorter(L, LU, SE, DT); 2799 DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << '\n'); 2800 2801 bool Any = false; 2802 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); 2803 FIdx != NumForms; ++FIdx) { 2804 Formula &F = LU.Formulae[FIdx]; 2805 2806 SmallVector<const SCEV *, 2> Key; 2807 for (SmallVectorImpl<const SCEV *>::const_iterator J = F.BaseRegs.begin(), 2808 JE = F.BaseRegs.end(); J != JE; ++J) { 2809 const SCEV *Reg = *J; 2810 if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) 2811 Key.push_back(Reg); 2812 } 2813 if (F.ScaledReg && 2814 RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) 2815 Key.push_back(F.ScaledReg); 2816 // Unstable sort by host order ok, because this is only used for 2817 // uniquifying. 2818 std::sort(Key.begin(), Key.end()); 2819 2820 std::pair<BestFormulaeTy::const_iterator, bool> P = 2821 BestFormulae.insert(std::make_pair(Key, FIdx)); 2822 if (!P.second) { 2823 Formula &Best = LU.Formulae[P.first->second]; 2824 if (Sorter.operator()(F, Best)) 2825 std::swap(F, Best); 2826 DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); 2827 dbgs() << "\n" 2828 " in favor of formula "; Best.print(dbgs()); 2829 dbgs() << '\n'); 2830#ifndef NDEBUG 2831 ChangedFormulae = true; 2832#endif 2833 LU.DeleteFormula(F); 2834 --FIdx; 2835 --NumForms; 2836 Any = true; 2837 continue; 2838 } 2839 } 2840 2841 // Now that we've filtered out some formulae, recompute the Regs set. 2842 if (Any) 2843 LU.RecomputeRegs(LUIdx, RegUses); 2844 2845 // Reset this to prepare for the next use. 2846 BestFormulae.clear(); 2847 } 2848 2849 DEBUG(if (ChangedFormulae) { 2850 dbgs() << "\n" 2851 "After filtering out undesirable candidates:\n"; 2852 print_uses(dbgs()); 2853 }); 2854} 2855 2856// This is a rough guess that seems to work fairly well. 2857static const size_t ComplexityLimit = UINT16_MAX; 2858 2859/// EstimateSearchSpaceComplexity - Estimate the worst-case number of 2860/// solutions the solver might have to consider. It almost never considers 2861/// this many solutions because it prune the search space, but the pruning 2862/// isn't always sufficient. 2863size_t LSRInstance::EstimateSearchSpaceComplexity() const { 2864 uint32_t Power = 1; 2865 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 2866 E = Uses.end(); I != E; ++I) { 2867 size_t FSize = I->Formulae.size(); 2868 if (FSize >= ComplexityLimit) { 2869 Power = ComplexityLimit; 2870 break; 2871 } 2872 Power *= FSize; 2873 if (Power >= ComplexityLimit) 2874 break; 2875 } 2876 return Power; 2877} 2878 2879/// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of 2880/// formulae to choose from, use some rough heuristics to prune down the number 2881/// of formulae. This keeps the main solver from taking an extraordinary amount 2882/// of time in some worst-case scenarios. 2883void LSRInstance::NarrowSearchSpaceUsingHeuristics() { 2884 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 2885 DEBUG(dbgs() << "The search space is too complex.\n"); 2886 2887 DEBUG(dbgs() << "Narrowing the search space by eliminating formulae " 2888 "which use a superset of registers used by other " 2889 "formulae.\n"); 2890 2891 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2892 LSRUse &LU = Uses[LUIdx]; 2893 bool Any = false; 2894 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 2895 Formula &F = LU.Formulae[i]; 2896 // Look for a formula with a constant or GV in a register. If the use 2897 // also has a formula with that same value in an immediate field, 2898 // delete the one that uses a register. 2899 for (SmallVectorImpl<const SCEV *>::const_iterator 2900 I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) { 2901 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*I)) { 2902 Formula NewF = F; 2903 NewF.AM.BaseOffs += C->getValue()->getSExtValue(); 2904 NewF.BaseRegs.erase(NewF.BaseRegs.begin() + 2905 (I - F.BaseRegs.begin())); 2906 if (LU.HasFormulaWithSameRegs(NewF)) { 2907 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 2908 LU.DeleteFormula(F); 2909 --i; 2910 --e; 2911 Any = true; 2912 break; 2913 } 2914 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(*I)) { 2915 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) 2916 if (!F.AM.BaseGV) { 2917 Formula NewF = F; 2918 NewF.AM.BaseGV = GV; 2919 NewF.BaseRegs.erase(NewF.BaseRegs.begin() + 2920 (I - F.BaseRegs.begin())); 2921 if (LU.HasFormulaWithSameRegs(NewF)) { 2922 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); 2923 dbgs() << '\n'); 2924 LU.DeleteFormula(F); 2925 --i; 2926 --e; 2927 Any = true; 2928 break; 2929 } 2930 } 2931 } 2932 } 2933 } 2934 if (Any) 2935 LU.RecomputeRegs(LUIdx, RegUses); 2936 } 2937 2938 DEBUG(dbgs() << "After pre-selection:\n"; 2939 print_uses(dbgs())); 2940 } 2941 2942 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 2943 DEBUG(dbgs() << "The search space is too complex.\n"); 2944 2945 DEBUG(dbgs() << "Narrowing the search space by assuming that uses " 2946 "separated by a constant offset will use the same " 2947 "registers.\n"); 2948 2949 // This is especially useful for unrolled loops. 2950 2951 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2952 LSRUse &LU = Uses[LUIdx]; 2953 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 2954 E = LU.Formulae.end(); I != E; ++I) { 2955 const Formula &F = *I; 2956 if (F.AM.BaseOffs != 0 && F.AM.Scale == 0) { 2957 if (LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU)) { 2958 if (reconcileNewOffset(*LUThatHas, F.AM.BaseOffs, 2959 /*HasBaseReg=*/false, 2960 LU.Kind, LU.AccessTy)) { 2961 DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); 2962 dbgs() << '\n'); 2963 2964 LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop; 2965 2966 // Delete formulae from the new use which are no longer legal. 2967 bool Any = false; 2968 for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) { 2969 Formula &F = LUThatHas->Formulae[i]; 2970 if (!isLegalUse(F.AM, 2971 LUThatHas->MinOffset, LUThatHas->MaxOffset, 2972 LUThatHas->Kind, LUThatHas->AccessTy, TLI)) { 2973 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); 2974 dbgs() << '\n'); 2975 LUThatHas->DeleteFormula(F); 2976 --i; 2977 --e; 2978 Any = true; 2979 } 2980 } 2981 if (Any) 2982 LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses); 2983 2984 // Update the relocs to reference the new use. 2985 for (SmallVectorImpl<LSRFixup>::iterator I = Fixups.begin(), 2986 E = Fixups.end(); I != E; ++I) { 2987 LSRFixup &Fixup = *I; 2988 if (Fixup.LUIdx == LUIdx) { 2989 Fixup.LUIdx = LUThatHas - &Uses.front(); 2990 Fixup.Offset += F.AM.BaseOffs; 2991 DEBUG(errs() << "New fixup has offset " 2992 << Fixup.Offset << '\n'); 2993 } 2994 if (Fixup.LUIdx == NumUses-1) 2995 Fixup.LUIdx = LUIdx; 2996 } 2997 2998 // Delete the old use. 2999 DeleteUse(LU); 3000 --LUIdx; 3001 --NumUses; 3002 break; 3003 } 3004 } 3005 } 3006 } 3007 } 3008 3009 DEBUG(dbgs() << "After pre-selection:\n"; 3010 print_uses(dbgs())); 3011 } 3012 3013 // With all other options exhausted, loop until the system is simple 3014 // enough to handle. 3015 SmallPtrSet<const SCEV *, 4> Taken; 3016 while (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 3017 // Ok, we have too many of formulae on our hands to conveniently handle. 3018 // Use a rough heuristic to thin out the list. 3019 DEBUG(dbgs() << "The search space is too complex.\n"); 3020 3021 // Pick the register which is used by the most LSRUses, which is likely 3022 // to be a good reuse register candidate. 3023 const SCEV *Best = 0; 3024 unsigned BestNum = 0; 3025 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 3026 I != E; ++I) { 3027 const SCEV *Reg = *I; 3028 if (Taken.count(Reg)) 3029 continue; 3030 if (!Best) 3031 Best = Reg; 3032 else { 3033 unsigned Count = RegUses.getUsedByIndices(Reg).count(); 3034 if (Count > BestNum) { 3035 Best = Reg; 3036 BestNum = Count; 3037 } 3038 } 3039 } 3040 3041 DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best 3042 << " will yield profitable reuse.\n"); 3043 Taken.insert(Best); 3044 3045 // In any use with formulae which references this register, delete formulae 3046 // which don't reference it. 3047 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3048 LSRUse &LU = Uses[LUIdx]; 3049 if (!LU.Regs.count(Best)) continue; 3050 3051 bool Any = false; 3052 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 3053 Formula &F = LU.Formulae[i]; 3054 if (!F.referencesReg(Best)) { 3055 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 3056 LU.DeleteFormula(F); 3057 --e; 3058 --i; 3059 Any = true; 3060 assert(e != 0 && "Use has no formulae left! Is Regs inconsistent?"); 3061 continue; 3062 } 3063 } 3064 3065 if (Any) 3066 LU.RecomputeRegs(LUIdx, RegUses); 3067 } 3068 3069 DEBUG(dbgs() << "After pre-selection:\n"; 3070 print_uses(dbgs())); 3071 } 3072} 3073 3074/// SolveRecurse - This is the recursive solver. 3075void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 3076 Cost &SolutionCost, 3077 SmallVectorImpl<const Formula *> &Workspace, 3078 const Cost &CurCost, 3079 const SmallPtrSet<const SCEV *, 16> &CurRegs, 3080 DenseSet<const SCEV *> &VisitedRegs) const { 3081 // Some ideas: 3082 // - prune more: 3083 // - use more aggressive filtering 3084 // - sort the formula so that the most profitable solutions are found first 3085 // - sort the uses too 3086 // - search faster: 3087 // - don't compute a cost, and then compare. compare while computing a cost 3088 // and bail early. 3089 // - track register sets with SmallBitVector 3090 3091 const LSRUse &LU = Uses[Workspace.size()]; 3092 3093 // If this use references any register that's already a part of the 3094 // in-progress solution, consider it a requirement that a formula must 3095 // reference that register in order to be considered. This prunes out 3096 // unprofitable searching. 3097 SmallSetVector<const SCEV *, 4> ReqRegs; 3098 for (SmallPtrSet<const SCEV *, 16>::const_iterator I = CurRegs.begin(), 3099 E = CurRegs.end(); I != E; ++I) 3100 if (LU.Regs.count(*I)) 3101 ReqRegs.insert(*I); 3102 3103 bool AnySatisfiedReqRegs = false; 3104 SmallPtrSet<const SCEV *, 16> NewRegs; 3105 Cost NewCost; 3106retry: 3107 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 3108 E = LU.Formulae.end(); I != E; ++I) { 3109 const Formula &F = *I; 3110 3111 // Ignore formulae which do not use any of the required registers. 3112 for (SmallSetVector<const SCEV *, 4>::const_iterator J = ReqRegs.begin(), 3113 JE = ReqRegs.end(); J != JE; ++J) { 3114 const SCEV *Reg = *J; 3115 if ((!F.ScaledReg || F.ScaledReg != Reg) && 3116 std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) == 3117 F.BaseRegs.end()) 3118 goto skip; 3119 } 3120 AnySatisfiedReqRegs = true; 3121 3122 // Evaluate the cost of the current formula. If it's already worse than 3123 // the current best, prune the search at that point. 3124 NewCost = CurCost; 3125 NewRegs = CurRegs; 3126 NewCost.RateFormula(F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT); 3127 if (NewCost < SolutionCost) { 3128 Workspace.push_back(&F); 3129 if (Workspace.size() != Uses.size()) { 3130 SolveRecurse(Solution, SolutionCost, Workspace, NewCost, 3131 NewRegs, VisitedRegs); 3132 if (F.getNumRegs() == 1 && Workspace.size() == 1) 3133 VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]); 3134 } else { 3135 DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); 3136 dbgs() << ". Regs:"; 3137 for (SmallPtrSet<const SCEV *, 16>::const_iterator 3138 I = NewRegs.begin(), E = NewRegs.end(); I != E; ++I) 3139 dbgs() << ' ' << **I; 3140 dbgs() << '\n'); 3141 3142 SolutionCost = NewCost; 3143 Solution = Workspace; 3144 } 3145 Workspace.pop_back(); 3146 } 3147 skip:; 3148 } 3149 3150 // If none of the formulae had all of the required registers, relax the 3151 // constraint so that we don't exclude all formulae. 3152 if (!AnySatisfiedReqRegs) { 3153 assert(!ReqRegs.empty() && "Solver failed even without required registers"); 3154 ReqRegs.clear(); 3155 goto retry; 3156 } 3157} 3158 3159/// Solve - Choose one formula from each use. Return the results in the given 3160/// Solution vector. 3161void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const { 3162 SmallVector<const Formula *, 8> Workspace; 3163 Cost SolutionCost; 3164 SolutionCost.Loose(); 3165 Cost CurCost; 3166 SmallPtrSet<const SCEV *, 16> CurRegs; 3167 DenseSet<const SCEV *> VisitedRegs; 3168 Workspace.reserve(Uses.size()); 3169 3170 // SolveRecurse does all the work. 3171 SolveRecurse(Solution, SolutionCost, Workspace, CurCost, 3172 CurRegs, VisitedRegs); 3173 3174 // Ok, we've now made all our decisions. 3175 DEBUG(dbgs() << "\n" 3176 "The chosen solution requires "; SolutionCost.print(dbgs()); 3177 dbgs() << ":\n"; 3178 for (size_t i = 0, e = Uses.size(); i != e; ++i) { 3179 dbgs() << " "; 3180 Uses[i].print(dbgs()); 3181 dbgs() << "\n" 3182 " "; 3183 Solution[i]->print(dbgs()); 3184 dbgs() << '\n'; 3185 }); 3186 3187 assert(Solution.size() == Uses.size() && "Malformed solution!"); 3188} 3189 3190/// HoistInsertPosition - Helper for AdjustInsertPositionForExpand. Climb up 3191/// the dominator tree far as we can go while still being dominated by the 3192/// input positions. This helps canonicalize the insert position, which 3193/// encourages sharing. 3194BasicBlock::iterator 3195LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, 3196 const SmallVectorImpl<Instruction *> &Inputs) 3197 const { 3198 for (;;) { 3199 const Loop *IPLoop = LI.getLoopFor(IP->getParent()); 3200 unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0; 3201 3202 BasicBlock *IDom; 3203 for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) { 3204 if (!Rung) return IP; 3205 Rung = Rung->getIDom(); 3206 if (!Rung) return IP; 3207 IDom = Rung->getBlock(); 3208 3209 // Don't climb into a loop though. 3210 const Loop *IDomLoop = LI.getLoopFor(IDom); 3211 unsigned IDomDepth = IDomLoop ? IDomLoop->getLoopDepth() : 0; 3212 if (IDomDepth <= IPLoopDepth && 3213 (IDomDepth != IPLoopDepth || IDomLoop == IPLoop)) 3214 break; 3215 } 3216 3217 bool AllDominate = true; 3218 Instruction *BetterPos = 0; 3219 Instruction *Tentative = IDom->getTerminator(); 3220 for (SmallVectorImpl<Instruction *>::const_iterator I = Inputs.begin(), 3221 E = Inputs.end(); I != E; ++I) { 3222 Instruction *Inst = *I; 3223 if (Inst == Tentative || !DT.dominates(Inst, Tentative)) { 3224 AllDominate = false; 3225 break; 3226 } 3227 // Attempt to find an insert position in the middle of the block, 3228 // instead of at the end, so that it can be used for other expansions. 3229 if (IDom == Inst->getParent() && 3230 (!BetterPos || DT.dominates(BetterPos, Inst))) 3231 BetterPos = llvm::next(BasicBlock::iterator(Inst)); 3232 } 3233 if (!AllDominate) 3234 break; 3235 if (BetterPos) 3236 IP = BetterPos; 3237 else 3238 IP = Tentative; 3239 } 3240 3241 return IP; 3242} 3243 3244/// AdjustInsertPositionForExpand - Determine an input position which will be 3245/// dominated by the operands and which will dominate the result. 3246BasicBlock::iterator 3247LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator IP, 3248 const LSRFixup &LF, 3249 const LSRUse &LU) const { 3250 // Collect some instructions which must be dominated by the 3251 // expanding replacement. These must be dominated by any operands that 3252 // will be required in the expansion. 3253 SmallVector<Instruction *, 4> Inputs; 3254 if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace)) 3255 Inputs.push_back(I); 3256 if (LU.Kind == LSRUse::ICmpZero) 3257 if (Instruction *I = 3258 dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1))) 3259 Inputs.push_back(I); 3260 if (LF.PostIncLoops.count(L)) { 3261 if (LF.isUseFullyOutsideLoop(L)) 3262 Inputs.push_back(L->getLoopLatch()->getTerminator()); 3263 else 3264 Inputs.push_back(IVIncInsertPos); 3265 } 3266 // The expansion must also be dominated by the increment positions of any 3267 // loops it for which it is using post-inc mode. 3268 for (PostIncLoopSet::const_iterator I = LF.PostIncLoops.begin(), 3269 E = LF.PostIncLoops.end(); I != E; ++I) { 3270 const Loop *PIL = *I; 3271 if (PIL == L) continue; 3272 3273 // Be dominated by the loop exit. 3274 SmallVector<BasicBlock *, 4> ExitingBlocks; 3275 PIL->getExitingBlocks(ExitingBlocks); 3276 if (!ExitingBlocks.empty()) { 3277 BasicBlock *BB = ExitingBlocks[0]; 3278 for (unsigned i = 1, e = ExitingBlocks.size(); i != e; ++i) 3279 BB = DT.findNearestCommonDominator(BB, ExitingBlocks[i]); 3280 Inputs.push_back(BB->getTerminator()); 3281 } 3282 } 3283 3284 // Then, climb up the immediate dominator tree as far as we can go while 3285 // still being dominated by the input positions. 3286 IP = HoistInsertPosition(IP, Inputs); 3287 3288 // Don't insert instructions before PHI nodes. 3289 while (isa<PHINode>(IP)) ++IP; 3290 3291 // Ignore debug intrinsics. 3292 while (isa<DbgInfoIntrinsic>(IP)) ++IP; 3293 3294 return IP; 3295} 3296 3297/// Expand - Emit instructions for the leading candidate expression for this 3298/// LSRUse (this is called "expanding"). 3299Value *LSRInstance::Expand(const LSRFixup &LF, 3300 const Formula &F, 3301 BasicBlock::iterator IP, 3302 SCEVExpander &Rewriter, 3303 SmallVectorImpl<WeakVH> &DeadInsts) const { 3304 const LSRUse &LU = Uses[LF.LUIdx]; 3305 3306 // Determine an input position which will be dominated by the operands and 3307 // which will dominate the result. 3308 IP = AdjustInsertPositionForExpand(IP, LF, LU); 3309 3310 // Inform the Rewriter if we have a post-increment use, so that it can 3311 // perform an advantageous expansion. 3312 Rewriter.setPostInc(LF.PostIncLoops); 3313 3314 // This is the type that the user actually needs. 3315 const Type *OpTy = LF.OperandValToReplace->getType(); 3316 // This will be the type that we'll initially expand to. 3317 const Type *Ty = F.getType(); 3318 if (!Ty) 3319 // No type known; just expand directly to the ultimate type. 3320 Ty = OpTy; 3321 else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy)) 3322 // Expand directly to the ultimate type if it's the right size. 3323 Ty = OpTy; 3324 // This is the type to do integer arithmetic in. 3325 const Type *IntTy = SE.getEffectiveSCEVType(Ty); 3326 3327 // Build up a list of operands to add together to form the full base. 3328 SmallVector<const SCEV *, 8> Ops; 3329 3330 // Expand the BaseRegs portion. 3331 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 3332 E = F.BaseRegs.end(); I != E; ++I) { 3333 const SCEV *Reg = *I; 3334 assert(!Reg->isZero() && "Zero allocated in a base register!"); 3335 3336 // If we're expanding for a post-inc user, make the post-inc adjustment. 3337 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops); 3338 Reg = TransformForPostIncUse(Denormalize, Reg, 3339 LF.UserInst, LF.OperandValToReplace, 3340 Loops, SE, DT); 3341 3342 Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP))); 3343 } 3344 3345 // Flush the operand list to suppress SCEVExpander hoisting. 3346 if (!Ops.empty()) { 3347 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 3348 Ops.clear(); 3349 Ops.push_back(SE.getUnknown(FullV)); 3350 } 3351 3352 // Expand the ScaledReg portion. 3353 Value *ICmpScaledV = 0; 3354 if (F.AM.Scale != 0) { 3355 const SCEV *ScaledS = F.ScaledReg; 3356 3357 // If we're expanding for a post-inc user, make the post-inc adjustment. 3358 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops); 3359 ScaledS = TransformForPostIncUse(Denormalize, ScaledS, 3360 LF.UserInst, LF.OperandValToReplace, 3361 Loops, SE, DT); 3362 3363 if (LU.Kind == LSRUse::ICmpZero) { 3364 // An interesting way of "folding" with an icmp is to use a negated 3365 // scale, which we'll implement by inserting it into the other operand 3366 // of the icmp. 3367 assert(F.AM.Scale == -1 && 3368 "The only scale supported by ICmpZero uses is -1!"); 3369 ICmpScaledV = Rewriter.expandCodeFor(ScaledS, 0, IP); 3370 } else { 3371 // Otherwise just expand the scaled register and an explicit scale, 3372 // which is expected to be matched as part of the address. 3373 ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP)); 3374 ScaledS = SE.getMulExpr(ScaledS, 3375 SE.getConstant(ScaledS->getType(), F.AM.Scale)); 3376 Ops.push_back(ScaledS); 3377 3378 // Flush the operand list to suppress SCEVExpander hoisting. 3379 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 3380 Ops.clear(); 3381 Ops.push_back(SE.getUnknown(FullV)); 3382 } 3383 } 3384 3385 // Expand the GV portion. 3386 if (F.AM.BaseGV) { 3387 Ops.push_back(SE.getUnknown(F.AM.BaseGV)); 3388 3389 // Flush the operand list to suppress SCEVExpander hoisting. 3390 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 3391 Ops.clear(); 3392 Ops.push_back(SE.getUnknown(FullV)); 3393 } 3394 3395 // Expand the immediate portion. 3396 int64_t Offset = (uint64_t)F.AM.BaseOffs + LF.Offset; 3397 if (Offset != 0) { 3398 if (LU.Kind == LSRUse::ICmpZero) { 3399 // The other interesting way of "folding" with an ICmpZero is to use a 3400 // negated immediate. 3401 if (!ICmpScaledV) 3402 ICmpScaledV = ConstantInt::get(IntTy, -Offset); 3403 else { 3404 Ops.push_back(SE.getUnknown(ICmpScaledV)); 3405 ICmpScaledV = ConstantInt::get(IntTy, Offset); 3406 } 3407 } else { 3408 // Just add the immediate values. These again are expected to be matched 3409 // as part of the address. 3410 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset))); 3411 } 3412 } 3413 3414 // Emit instructions summing all the operands. 3415 const SCEV *FullS = Ops.empty() ? 3416 SE.getConstant(IntTy, 0) : 3417 SE.getAddExpr(Ops); 3418 Value *FullV = Rewriter.expandCodeFor(FullS, Ty, IP); 3419 3420 // We're done expanding now, so reset the rewriter. 3421 Rewriter.clearPostInc(); 3422 3423 // An ICmpZero Formula represents an ICmp which we're handling as a 3424 // comparison against zero. Now that we've expanded an expression for that 3425 // form, update the ICmp's other operand. 3426 if (LU.Kind == LSRUse::ICmpZero) { 3427 ICmpInst *CI = cast<ICmpInst>(LF.UserInst); 3428 DeadInsts.push_back(CI->getOperand(1)); 3429 assert(!F.AM.BaseGV && "ICmp does not support folding a global value and " 3430 "a scale at the same time!"); 3431 if (F.AM.Scale == -1) { 3432 if (ICmpScaledV->getType() != OpTy) { 3433 Instruction *Cast = 3434 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false, 3435 OpTy, false), 3436 ICmpScaledV, OpTy, "tmp", CI); 3437 ICmpScaledV = Cast; 3438 } 3439 CI->setOperand(1, ICmpScaledV); 3440 } else { 3441 assert(F.AM.Scale == 0 && 3442 "ICmp does not support folding a global value and " 3443 "a scale at the same time!"); 3444 Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy), 3445 -(uint64_t)Offset); 3446 if (C->getType() != OpTy) 3447 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 3448 OpTy, false), 3449 C, OpTy); 3450 3451 CI->setOperand(1, C); 3452 } 3453 } 3454 3455 return FullV; 3456} 3457 3458/// RewriteForPHI - Helper for Rewrite. PHI nodes are special because the use 3459/// of their operands effectively happens in their predecessor blocks, so the 3460/// expression may need to be expanded in multiple places. 3461void LSRInstance::RewriteForPHI(PHINode *PN, 3462 const LSRFixup &LF, 3463 const Formula &F, 3464 SCEVExpander &Rewriter, 3465 SmallVectorImpl<WeakVH> &DeadInsts, 3466 Pass *P) const { 3467 DenseMap<BasicBlock *, Value *> Inserted; 3468 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 3469 if (PN->getIncomingValue(i) == LF.OperandValToReplace) { 3470 BasicBlock *BB = PN->getIncomingBlock(i); 3471 3472 // If this is a critical edge, split the edge so that we do not insert 3473 // the code on all predecessor/successor paths. We do this unless this 3474 // is the canonical backedge for this loop, which complicates post-inc 3475 // users. 3476 if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 && 3477 !isa<IndirectBrInst>(BB->getTerminator()) && 3478 (PN->getParent() != L->getHeader() || !L->contains(BB))) { 3479 // Split the critical edge. 3480 BasicBlock *NewBB = SplitCriticalEdge(BB, PN->getParent(), P); 3481 3482 // If PN is outside of the loop and BB is in the loop, we want to 3483 // move the block to be immediately before the PHI block, not 3484 // immediately after BB. 3485 if (L->contains(BB) && !L->contains(PN)) 3486 NewBB->moveBefore(PN->getParent()); 3487 3488 // Splitting the edge can reduce the number of PHI entries we have. 3489 e = PN->getNumIncomingValues(); 3490 BB = NewBB; 3491 i = PN->getBasicBlockIndex(BB); 3492 } 3493 3494 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair = 3495 Inserted.insert(std::make_pair(BB, static_cast<Value *>(0))); 3496 if (!Pair.second) 3497 PN->setIncomingValue(i, Pair.first->second); 3498 else { 3499 Value *FullV = Expand(LF, F, BB->getTerminator(), Rewriter, DeadInsts); 3500 3501 // If this is reuse-by-noop-cast, insert the noop cast. 3502 const Type *OpTy = LF.OperandValToReplace->getType(); 3503 if (FullV->getType() != OpTy) 3504 FullV = 3505 CastInst::Create(CastInst::getCastOpcode(FullV, false, 3506 OpTy, false), 3507 FullV, LF.OperandValToReplace->getType(), 3508 "tmp", BB->getTerminator()); 3509 3510 PN->setIncomingValue(i, FullV); 3511 Pair.first->second = FullV; 3512 } 3513 } 3514} 3515 3516/// Rewrite - Emit instructions for the leading candidate expression for this 3517/// LSRUse (this is called "expanding"), and update the UserInst to reference 3518/// the newly expanded value. 3519void LSRInstance::Rewrite(const LSRFixup &LF, 3520 const Formula &F, 3521 SCEVExpander &Rewriter, 3522 SmallVectorImpl<WeakVH> &DeadInsts, 3523 Pass *P) const { 3524 // First, find an insertion point that dominates UserInst. For PHI nodes, 3525 // find the nearest block which dominates all the relevant uses. 3526 if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) { 3527 RewriteForPHI(PN, LF, F, Rewriter, DeadInsts, P); 3528 } else { 3529 Value *FullV = Expand(LF, F, LF.UserInst, Rewriter, DeadInsts); 3530 3531 // If this is reuse-by-noop-cast, insert the noop cast. 3532 const Type *OpTy = LF.OperandValToReplace->getType(); 3533 if (FullV->getType() != OpTy) { 3534 Instruction *Cast = 3535 CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false), 3536 FullV, OpTy, "tmp", LF.UserInst); 3537 FullV = Cast; 3538 } 3539 3540 // Update the user. ICmpZero is handled specially here (for now) because 3541 // Expand may have updated one of the operands of the icmp already, and 3542 // its new value may happen to be equal to LF.OperandValToReplace, in 3543 // which case doing replaceUsesOfWith leads to replacing both operands 3544 // with the same value. TODO: Reorganize this. 3545 if (Uses[LF.LUIdx].Kind == LSRUse::ICmpZero) 3546 LF.UserInst->setOperand(0, FullV); 3547 else 3548 LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV); 3549 } 3550 3551 DeadInsts.push_back(LF.OperandValToReplace); 3552} 3553 3554/// ImplementSolution - Rewrite all the fixup locations with new values, 3555/// following the chosen solution. 3556void 3557LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 3558 Pass *P) { 3559 // Keep track of instructions we may have made dead, so that 3560 // we can remove them after we are done working. 3561 SmallVector<WeakVH, 16> DeadInsts; 3562 3563 SCEVExpander Rewriter(SE); 3564 Rewriter.disableCanonicalMode(); 3565 Rewriter.setIVIncInsertPos(L, IVIncInsertPos); 3566 3567 // Expand the new value definitions and update the users. 3568 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(), 3569 E = Fixups.end(); I != E; ++I) { 3570 const LSRFixup &Fixup = *I; 3571 3572 Rewrite(Fixup, *Solution[Fixup.LUIdx], Rewriter, DeadInsts, P); 3573 3574 Changed = true; 3575 } 3576 3577 // Clean up after ourselves. This must be done before deleting any 3578 // instructions. 3579 Rewriter.clear(); 3580 3581 Changed |= DeleteTriviallyDeadInstructions(DeadInsts); 3582} 3583 3584LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P) 3585 : IU(P->getAnalysis<IVUsers>()), 3586 SE(P->getAnalysis<ScalarEvolution>()), 3587 DT(P->getAnalysis<DominatorTree>()), 3588 LI(P->getAnalysis<LoopInfo>()), 3589 TLI(tli), L(l), Changed(false), IVIncInsertPos(0) { 3590 3591 // If LoopSimplify form is not available, stay out of trouble. 3592 if (!L->isLoopSimplifyForm()) return; 3593 3594 // If there's no interesting work to be done, bail early. 3595 if (IU.empty()) return; 3596 3597 DEBUG(dbgs() << "\nLSR on loop "; 3598 WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false); 3599 dbgs() << ":\n"); 3600 3601 // First, perform some low-level loop optimizations. 3602 OptimizeShadowIV(); 3603 OptimizeLoopTermCond(); 3604 3605 // Start collecting data and preparing for the solver. 3606 CollectInterestingTypesAndFactors(); 3607 CollectFixupsAndInitialFormulae(); 3608 CollectLoopInvariantFixupsAndFormulae(); 3609 3610 DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n"; 3611 print_uses(dbgs())); 3612 3613 // Now use the reuse data to generate a bunch of interesting ways 3614 // to formulate the values needed for the uses. 3615 GenerateAllReuseFormulae(); 3616 3617 DEBUG(dbgs() << "\n" 3618 "After generating reuse formulae:\n"; 3619 print_uses(dbgs())); 3620 3621 FilterOutUndesirableDedicatedRegisters(); 3622 NarrowSearchSpaceUsingHeuristics(); 3623 3624 SmallVector<const Formula *, 8> Solution; 3625 Solve(Solution); 3626 3627 // Release memory that is no longer needed. 3628 Factors.clear(); 3629 Types.clear(); 3630 RegUses.clear(); 3631 3632#ifndef NDEBUG 3633 // Formulae should be legal. 3634 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 3635 E = Uses.end(); I != E; ++I) { 3636 const LSRUse &LU = *I; 3637 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 3638 JE = LU.Formulae.end(); J != JE; ++J) 3639 assert(isLegalUse(J->AM, LU.MinOffset, LU.MaxOffset, 3640 LU.Kind, LU.AccessTy, TLI) && 3641 "Illegal formula generated!"); 3642 }; 3643#endif 3644 3645 // Now that we've decided what we want, make it so. 3646 ImplementSolution(Solution, P); 3647} 3648 3649void LSRInstance::print_factors_and_types(raw_ostream &OS) const { 3650 if (Factors.empty() && Types.empty()) return; 3651 3652 OS << "LSR has identified the following interesting factors and types: "; 3653 bool First = true; 3654 3655 for (SmallSetVector<int64_t, 8>::const_iterator 3656 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 3657 if (!First) OS << ", "; 3658 First = false; 3659 OS << '*' << *I; 3660 } 3661 3662 for (SmallSetVector<const Type *, 4>::const_iterator 3663 I = Types.begin(), E = Types.end(); I != E; ++I) { 3664 if (!First) OS << ", "; 3665 First = false; 3666 OS << '(' << **I << ')'; 3667 } 3668 OS << '\n'; 3669} 3670 3671void LSRInstance::print_fixups(raw_ostream &OS) const { 3672 OS << "LSR is examining the following fixup sites:\n"; 3673 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(), 3674 E = Fixups.end(); I != E; ++I) { 3675 dbgs() << " "; 3676 I->print(OS); 3677 OS << '\n'; 3678 } 3679} 3680 3681void LSRInstance::print_uses(raw_ostream &OS) const { 3682 OS << "LSR is examining the following uses:\n"; 3683 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 3684 E = Uses.end(); I != E; ++I) { 3685 const LSRUse &LU = *I; 3686 dbgs() << " "; 3687 LU.print(OS); 3688 OS << '\n'; 3689 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 3690 JE = LU.Formulae.end(); J != JE; ++J) { 3691 OS << " "; 3692 J->print(OS); 3693 OS << '\n'; 3694 } 3695 } 3696} 3697 3698void LSRInstance::print(raw_ostream &OS) const { 3699 print_factors_and_types(OS); 3700 print_fixups(OS); 3701 print_uses(OS); 3702} 3703 3704void LSRInstance::dump() const { 3705 print(errs()); errs() << '\n'; 3706} 3707 3708namespace { 3709 3710class LoopStrengthReduce : public LoopPass { 3711 /// TLI - Keep a pointer of a TargetLowering to consult for determining 3712 /// transformation profitability. 3713 const TargetLowering *const TLI; 3714 3715public: 3716 static char ID; // Pass ID, replacement for typeid 3717 explicit LoopStrengthReduce(const TargetLowering *tli = 0); 3718 3719private: 3720 bool runOnLoop(Loop *L, LPPassManager &LPM); 3721 void getAnalysisUsage(AnalysisUsage &AU) const; 3722}; 3723 3724} 3725 3726char LoopStrengthReduce::ID = 0; 3727static RegisterPass<LoopStrengthReduce> 3728X("loop-reduce", "Loop Strength Reduction"); 3729 3730Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) { 3731 return new LoopStrengthReduce(TLI); 3732} 3733 3734LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli) 3735 : LoopPass(&ID), TLI(tli) {} 3736 3737void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { 3738 // We split critical edges, so we change the CFG. However, we do update 3739 // many analyses if they are around. 3740 AU.addPreservedID(LoopSimplifyID); 3741 AU.addPreserved("domfrontier"); 3742 3743 AU.addRequired<LoopInfo>(); 3744 AU.addPreserved<LoopInfo>(); 3745 AU.addRequiredID(LoopSimplifyID); 3746 AU.addRequired<DominatorTree>(); 3747 AU.addPreserved<DominatorTree>(); 3748 AU.addRequired<ScalarEvolution>(); 3749 AU.addPreserved<ScalarEvolution>(); 3750 AU.addRequired<IVUsers>(); 3751 AU.addPreserved<IVUsers>(); 3752} 3753 3754bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { 3755 bool Changed = false; 3756 3757 // Run the main LSR transformation. 3758 Changed |= LSRInstance(TLI, L, this).getChanged(); 3759 3760 // At this point, it is worth checking to see if any recurrence PHIs are also 3761 // dead, so that we can remove them as well. 3762 Changed |= DeleteDeadPHIs(L->getHeader()); 3763 3764 return Changed; 3765} 3766