LoopStrengthReduce.cpp revision 194710
1//===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation analyzes and transforms the induction variables (and 11// computations derived from them) into forms suitable for efficient execution 12// on the target. 13// 14// This pass performs a strength reduction on array references inside loops that 15// have as one or more of their components the loop induction variable, it 16// rewrites expressions to take advantage of scaled-index addressing modes 17// available on the target, and it performs a variety of other optimizations 18// related to loop induction variables. 19// 20//===----------------------------------------------------------------------===// 21 22#define DEBUG_TYPE "loop-reduce" 23#include "llvm/Transforms/Scalar.h" 24#include "llvm/Constants.h" 25#include "llvm/Instructions.h" 26#include "llvm/IntrinsicInst.h" 27#include "llvm/Type.h" 28#include "llvm/DerivedTypes.h" 29#include "llvm/Analysis/Dominators.h" 30#include "llvm/Analysis/IVUsers.h" 31#include "llvm/Analysis/LoopInfo.h" 32#include "llvm/Analysis/LoopPass.h" 33#include "llvm/Analysis/ScalarEvolutionExpander.h" 34#include "llvm/Transforms/Utils/AddrModeMatcher.h" 35#include "llvm/Transforms/Utils/BasicBlockUtils.h" 36#include "llvm/Transforms/Utils/Local.h" 37#include "llvm/ADT/Statistic.h" 38#include "llvm/Support/CFG.h" 39#include "llvm/Support/Debug.h" 40#include "llvm/Support/Compiler.h" 41#include "llvm/Support/CommandLine.h" 42#include "llvm/Support/ValueHandle.h" 43#include "llvm/Target/TargetLowering.h" 44#include <algorithm> 45using namespace llvm; 46 47STATISTIC(NumReduced , "Number of IV uses strength reduced"); 48STATISTIC(NumInserted, "Number of PHIs inserted"); 49STATISTIC(NumVariable, "Number of PHIs with variable strides"); 50STATISTIC(NumEliminated, "Number of strides eliminated"); 51STATISTIC(NumShadow, "Number of Shadow IVs optimized"); 52STATISTIC(NumImmSunk, "Number of common expr immediates sunk into uses"); 53STATISTIC(NumLoopCond, "Number of loop terminating conds optimized"); 54 55static cl::opt<bool> EnableFullLSRMode("enable-full-lsr", 56 cl::init(false), 57 cl::Hidden); 58 59namespace { 60 61 struct BasedUser; 62 63 /// IVInfo - This structure keeps track of one IV expression inserted during 64 /// StrengthReduceStridedIVUsers. It contains the stride, the common base, as 65 /// well as the PHI node and increment value created for rewrite. 66 struct VISIBILITY_HIDDEN IVExpr { 67 const SCEV* Stride; 68 const SCEV* Base; 69 PHINode *PHI; 70 71 IVExpr(const SCEV* const stride, const SCEV* const base, PHINode *phi) 72 : Stride(stride), Base(base), PHI(phi) {} 73 }; 74 75 /// IVsOfOneStride - This structure keeps track of all IV expression inserted 76 /// during StrengthReduceStridedIVUsers for a particular stride of the IV. 77 struct VISIBILITY_HIDDEN IVsOfOneStride { 78 std::vector<IVExpr> IVs; 79 80 void addIV(const SCEV* const Stride, const SCEV* const Base, PHINode *PHI) { 81 IVs.push_back(IVExpr(Stride, Base, PHI)); 82 } 83 }; 84 85 class VISIBILITY_HIDDEN LoopStrengthReduce : public LoopPass { 86 IVUsers *IU; 87 LoopInfo *LI; 88 DominatorTree *DT; 89 ScalarEvolution *SE; 90 bool Changed; 91 92 /// IVsByStride - Keep track of all IVs that have been inserted for a 93 /// particular stride. 94 std::map<const SCEV*, IVsOfOneStride> IVsByStride; 95 96 /// StrideNoReuse - Keep track of all the strides whose ivs cannot be 97 /// reused (nor should they be rewritten to reuse other strides). 98 SmallSet<const SCEV*, 4> StrideNoReuse; 99 100 /// DeadInsts - Keep track of instructions we may have made dead, so that 101 /// we can remove them after we are done working. 102 SmallVector<WeakVH, 16> DeadInsts; 103 104 /// TLI - Keep a pointer of a TargetLowering to consult for determining 105 /// transformation profitability. 106 const TargetLowering *TLI; 107 108 public: 109 static char ID; // Pass ID, replacement for typeid 110 explicit LoopStrengthReduce(const TargetLowering *tli = NULL) : 111 LoopPass(&ID), TLI(tli) { 112 } 113 114 bool runOnLoop(Loop *L, LPPassManager &LPM); 115 116 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 117 // We split critical edges, so we change the CFG. However, we do update 118 // many analyses if they are around. 119 AU.addPreservedID(LoopSimplifyID); 120 AU.addPreserved<LoopInfo>(); 121 AU.addPreserved<DominanceFrontier>(); 122 AU.addPreserved<DominatorTree>(); 123 124 AU.addRequiredID(LoopSimplifyID); 125 AU.addRequired<LoopInfo>(); 126 AU.addRequired<DominatorTree>(); 127 AU.addRequired<ScalarEvolution>(); 128 AU.addPreserved<ScalarEvolution>(); 129 AU.addRequired<IVUsers>(); 130 AU.addPreserved<IVUsers>(); 131 } 132 133 private: 134 ICmpInst *ChangeCompareStride(Loop *L, ICmpInst *Cond, 135 IVStrideUse* &CondUse, 136 const SCEV* const * &CondStride); 137 138 void OptimizeIndvars(Loop *L); 139 void OptimizeLoopCountIV(Loop *L); 140 void OptimizeLoopTermCond(Loop *L); 141 142 /// OptimizeShadowIV - If IV is used in a int-to-float cast 143 /// inside the loop then try to eliminate the cast opeation. 144 void OptimizeShadowIV(Loop *L); 145 146 /// OptimizeMax - Rewrite the loop's terminating condition 147 /// if it uses a max computation. 148 ICmpInst *OptimizeMax(Loop *L, ICmpInst *Cond, 149 IVStrideUse* &CondUse); 150 151 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse, 152 const SCEV* const * &CondStride); 153 bool RequiresTypeConversion(const Type *Ty, const Type *NewTy); 154 const SCEV* CheckForIVReuse(bool, bool, bool, const SCEV* const&, 155 IVExpr&, const Type*, 156 const std::vector<BasedUser>& UsersToProcess); 157 bool ValidScale(bool, int64_t, 158 const std::vector<BasedUser>& UsersToProcess); 159 bool ValidOffset(bool, int64_t, int64_t, 160 const std::vector<BasedUser>& UsersToProcess); 161 const SCEV* CollectIVUsers(const SCEV* const &Stride, 162 IVUsersOfOneStride &Uses, 163 Loop *L, 164 bool &AllUsesAreAddresses, 165 bool &AllUsesAreOutsideLoop, 166 std::vector<BasedUser> &UsersToProcess); 167 bool ShouldUseFullStrengthReductionMode( 168 const std::vector<BasedUser> &UsersToProcess, 169 const Loop *L, 170 bool AllUsesAreAddresses, 171 const SCEV* Stride); 172 void PrepareToStrengthReduceFully( 173 std::vector<BasedUser> &UsersToProcess, 174 const SCEV* Stride, 175 const SCEV* CommonExprs, 176 const Loop *L, 177 SCEVExpander &PreheaderRewriter); 178 void PrepareToStrengthReduceFromSmallerStride( 179 std::vector<BasedUser> &UsersToProcess, 180 Value *CommonBaseV, 181 const IVExpr &ReuseIV, 182 Instruction *PreInsertPt); 183 void PrepareToStrengthReduceWithNewPhi( 184 std::vector<BasedUser> &UsersToProcess, 185 const SCEV* Stride, 186 const SCEV* CommonExprs, 187 Value *CommonBaseV, 188 Instruction *IVIncInsertPt, 189 const Loop *L, 190 SCEVExpander &PreheaderRewriter); 191 void StrengthReduceStridedIVUsers(const SCEV* const &Stride, 192 IVUsersOfOneStride &Uses, 193 Loop *L); 194 void DeleteTriviallyDeadInstructions(); 195 }; 196} 197 198char LoopStrengthReduce::ID = 0; 199static RegisterPass<LoopStrengthReduce> 200X("loop-reduce", "Loop Strength Reduction"); 201 202Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) { 203 return new LoopStrengthReduce(TLI); 204} 205 206/// DeleteTriviallyDeadInstructions - If any of the instructions is the 207/// specified set are trivially dead, delete them and see if this makes any of 208/// their operands subsequently dead. 209void LoopStrengthReduce::DeleteTriviallyDeadInstructions() { 210 if (DeadInsts.empty()) return; 211 212 while (!DeadInsts.empty()) { 213 Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.back()); 214 DeadInsts.pop_back(); 215 216 if (I == 0 || !isInstructionTriviallyDead(I)) 217 continue; 218 219 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) { 220 if (Instruction *U = dyn_cast<Instruction>(*OI)) { 221 *OI = 0; 222 if (U->use_empty()) 223 DeadInsts.push_back(U); 224 } 225 } 226 227 I->eraseFromParent(); 228 Changed = true; 229 } 230} 231 232/// containsAddRecFromDifferentLoop - Determine whether expression S involves a 233/// subexpression that is an AddRec from a loop other than L. An outer loop 234/// of L is OK, but not an inner loop nor a disjoint loop. 235static bool containsAddRecFromDifferentLoop(const SCEV* S, Loop *L) { 236 // This is very common, put it first. 237 if (isa<SCEVConstant>(S)) 238 return false; 239 if (const SCEVCommutativeExpr *AE = dyn_cast<SCEVCommutativeExpr>(S)) { 240 for (unsigned int i=0; i< AE->getNumOperands(); i++) 241 if (containsAddRecFromDifferentLoop(AE->getOperand(i), L)) 242 return true; 243 return false; 244 } 245 if (const SCEVAddRecExpr *AE = dyn_cast<SCEVAddRecExpr>(S)) { 246 if (const Loop *newLoop = AE->getLoop()) { 247 if (newLoop == L) 248 return false; 249 // if newLoop is an outer loop of L, this is OK. 250 if (!LoopInfoBase<BasicBlock>::isNotAlreadyContainedIn(L, newLoop)) 251 return false; 252 } 253 return true; 254 } 255 if (const SCEVUDivExpr *DE = dyn_cast<SCEVUDivExpr>(S)) 256 return containsAddRecFromDifferentLoop(DE->getLHS(), L) || 257 containsAddRecFromDifferentLoop(DE->getRHS(), L); 258#if 0 259 // SCEVSDivExpr has been backed out temporarily, but will be back; we'll 260 // need this when it is. 261 if (const SCEVSDivExpr *DE = dyn_cast<SCEVSDivExpr>(S)) 262 return containsAddRecFromDifferentLoop(DE->getLHS(), L) || 263 containsAddRecFromDifferentLoop(DE->getRHS(), L); 264#endif 265 if (const SCEVCastExpr *CE = dyn_cast<SCEVCastExpr>(S)) 266 return containsAddRecFromDifferentLoop(CE->getOperand(), L); 267 return false; 268} 269 270/// isAddressUse - Returns true if the specified instruction is using the 271/// specified value as an address. 272static bool isAddressUse(Instruction *Inst, Value *OperandVal) { 273 bool isAddress = isa<LoadInst>(Inst); 274 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 275 if (SI->getOperand(1) == OperandVal) 276 isAddress = true; 277 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 278 // Addressing modes can also be folded into prefetches and a variety 279 // of intrinsics. 280 switch (II->getIntrinsicID()) { 281 default: break; 282 case Intrinsic::prefetch: 283 case Intrinsic::x86_sse2_loadu_dq: 284 case Intrinsic::x86_sse2_loadu_pd: 285 case Intrinsic::x86_sse_loadu_ps: 286 case Intrinsic::x86_sse_storeu_ps: 287 case Intrinsic::x86_sse2_storeu_pd: 288 case Intrinsic::x86_sse2_storeu_dq: 289 case Intrinsic::x86_sse2_storel_dq: 290 if (II->getOperand(1) == OperandVal) 291 isAddress = true; 292 break; 293 } 294 } 295 return isAddress; 296} 297 298/// getAccessType - Return the type of the memory being accessed. 299static const Type *getAccessType(const Instruction *Inst) { 300 const Type *AccessTy = Inst->getType(); 301 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) 302 AccessTy = SI->getOperand(0)->getType(); 303 else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 304 // Addressing modes can also be folded into prefetches and a variety 305 // of intrinsics. 306 switch (II->getIntrinsicID()) { 307 default: break; 308 case Intrinsic::x86_sse_storeu_ps: 309 case Intrinsic::x86_sse2_storeu_pd: 310 case Intrinsic::x86_sse2_storeu_dq: 311 case Intrinsic::x86_sse2_storel_dq: 312 AccessTy = II->getOperand(1)->getType(); 313 break; 314 } 315 } 316 return AccessTy; 317} 318 319namespace { 320 /// BasedUser - For a particular base value, keep information about how we've 321 /// partitioned the expression so far. 322 struct BasedUser { 323 /// SE - The current ScalarEvolution object. 324 ScalarEvolution *SE; 325 326 /// Base - The Base value for the PHI node that needs to be inserted for 327 /// this use. As the use is processed, information gets moved from this 328 /// field to the Imm field (below). BasedUser values are sorted by this 329 /// field. 330 const SCEV* Base; 331 332 /// Inst - The instruction using the induction variable. 333 Instruction *Inst; 334 335 /// OperandValToReplace - The operand value of Inst to replace with the 336 /// EmittedBase. 337 Value *OperandValToReplace; 338 339 /// Imm - The immediate value that should be added to the base immediately 340 /// before Inst, because it will be folded into the imm field of the 341 /// instruction. This is also sometimes used for loop-variant values that 342 /// must be added inside the loop. 343 const SCEV* Imm; 344 345 /// Phi - The induction variable that performs the striding that 346 /// should be used for this user. 347 PHINode *Phi; 348 349 // isUseOfPostIncrementedValue - True if this should use the 350 // post-incremented version of this IV, not the preincremented version. 351 // This can only be set in special cases, such as the terminating setcc 352 // instruction for a loop and uses outside the loop that are dominated by 353 // the loop. 354 bool isUseOfPostIncrementedValue; 355 356 BasedUser(IVStrideUse &IVSU, ScalarEvolution *se) 357 : SE(se), Base(IVSU.getOffset()), Inst(IVSU.getUser()), 358 OperandValToReplace(IVSU.getOperandValToReplace()), 359 Imm(SE->getIntegerSCEV(0, Base->getType())), 360 isUseOfPostIncrementedValue(IVSU.isUseOfPostIncrementedValue()) {} 361 362 // Once we rewrite the code to insert the new IVs we want, update the 363 // operands of Inst to use the new expression 'NewBase', with 'Imm' added 364 // to it. 365 void RewriteInstructionToUseNewBase(const SCEV* const &NewBase, 366 Instruction *InsertPt, 367 SCEVExpander &Rewriter, Loop *L, Pass *P, 368 LoopInfo &LI, 369 SmallVectorImpl<WeakVH> &DeadInsts); 370 371 Value *InsertCodeForBaseAtPosition(const SCEV* const &NewBase, 372 const Type *Ty, 373 SCEVExpander &Rewriter, 374 Instruction *IP, Loop *L, 375 LoopInfo &LI); 376 void dump() const; 377 }; 378} 379 380void BasedUser::dump() const { 381 cerr << " Base=" << *Base; 382 cerr << " Imm=" << *Imm; 383 cerr << " Inst: " << *Inst; 384} 385 386Value *BasedUser::InsertCodeForBaseAtPosition(const SCEV* const &NewBase, 387 const Type *Ty, 388 SCEVExpander &Rewriter, 389 Instruction *IP, Loop *L, 390 LoopInfo &LI) { 391 // Figure out where we *really* want to insert this code. In particular, if 392 // the user is inside of a loop that is nested inside of L, we really don't 393 // want to insert this expression before the user, we'd rather pull it out as 394 // many loops as possible. 395 Instruction *BaseInsertPt = IP; 396 397 // Figure out the most-nested loop that IP is in. 398 Loop *InsertLoop = LI.getLoopFor(IP->getParent()); 399 400 // If InsertLoop is not L, and InsertLoop is nested inside of L, figure out 401 // the preheader of the outer-most loop where NewBase is not loop invariant. 402 if (L->contains(IP->getParent())) 403 while (InsertLoop && NewBase->isLoopInvariant(InsertLoop)) { 404 BaseInsertPt = InsertLoop->getLoopPreheader()->getTerminator(); 405 InsertLoop = InsertLoop->getParentLoop(); 406 } 407 408 Value *Base = Rewriter.expandCodeFor(NewBase, 0, BaseInsertPt); 409 410 const SCEV* NewValSCEV = SE->getUnknown(Base); 411 412 // If there is no immediate value, skip the next part. 413 if (!Imm->isZero()) { 414 // If we are inserting the base and imm values in the same block, make sure 415 // to adjust the IP position if insertion reused a result. 416 if (IP == BaseInsertPt) 417 IP = Rewriter.getInsertionPoint(); 418 419 // Always emit the immediate (if non-zero) into the same block as the user. 420 NewValSCEV = SE->getAddExpr(NewValSCEV, Imm); 421 } 422 423 return Rewriter.expandCodeFor(NewValSCEV, Ty, IP); 424} 425 426 427// Once we rewrite the code to insert the new IVs we want, update the 428// operands of Inst to use the new expression 'NewBase', with 'Imm' added 429// to it. NewBasePt is the last instruction which contributes to the 430// value of NewBase in the case that it's a diffferent instruction from 431// the PHI that NewBase is computed from, or null otherwise. 432// 433void BasedUser::RewriteInstructionToUseNewBase(const SCEV* const &NewBase, 434 Instruction *NewBasePt, 435 SCEVExpander &Rewriter, Loop *L, Pass *P, 436 LoopInfo &LI, 437 SmallVectorImpl<WeakVH> &DeadInsts) { 438 if (!isa<PHINode>(Inst)) { 439 // By default, insert code at the user instruction. 440 BasicBlock::iterator InsertPt = Inst; 441 442 // However, if the Operand is itself an instruction, the (potentially 443 // complex) inserted code may be shared by many users. Because of this, we 444 // want to emit code for the computation of the operand right before its old 445 // computation. This is usually safe, because we obviously used to use the 446 // computation when it was computed in its current block. However, in some 447 // cases (e.g. use of a post-incremented induction variable) the NewBase 448 // value will be pinned to live somewhere after the original computation. 449 // In this case, we have to back off. 450 // 451 // If this is a use outside the loop (which means after, since it is based 452 // on a loop indvar) we use the post-incremented value, so that we don't 453 // artificially make the preinc value live out the bottom of the loop. 454 if (!isUseOfPostIncrementedValue && L->contains(Inst->getParent())) { 455 if (NewBasePt && isa<PHINode>(OperandValToReplace)) { 456 InsertPt = NewBasePt; 457 ++InsertPt; 458 } else if (Instruction *OpInst 459 = dyn_cast<Instruction>(OperandValToReplace)) { 460 InsertPt = OpInst; 461 while (isa<PHINode>(InsertPt)) ++InsertPt; 462 } 463 } 464 Value *NewVal = InsertCodeForBaseAtPosition(NewBase, 465 OperandValToReplace->getType(), 466 Rewriter, InsertPt, L, LI); 467 // Replace the use of the operand Value with the new Phi we just created. 468 Inst->replaceUsesOfWith(OperandValToReplace, NewVal); 469 470 DOUT << " Replacing with "; 471 DEBUG(WriteAsOperand(*DOUT, NewVal, /*PrintType=*/false)); 472 DOUT << ", which has value " << *NewBase << " plus IMM " << *Imm << "\n"; 473 return; 474 } 475 476 // PHI nodes are more complex. We have to insert one copy of the NewBase+Imm 477 // expression into each operand block that uses it. Note that PHI nodes can 478 // have multiple entries for the same predecessor. We use a map to make sure 479 // that a PHI node only has a single Value* for each predecessor (which also 480 // prevents us from inserting duplicate code in some blocks). 481 DenseMap<BasicBlock*, Value*> InsertedCode; 482 PHINode *PN = cast<PHINode>(Inst); 483 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 484 if (PN->getIncomingValue(i) == OperandValToReplace) { 485 // If the original expression is outside the loop, put the replacement 486 // code in the same place as the original expression, 487 // which need not be an immediate predecessor of this PHI. This way we 488 // need only one copy of it even if it is referenced multiple times in 489 // the PHI. We don't do this when the original expression is inside the 490 // loop because multiple copies sometimes do useful sinking of code in 491 // that case(?). 492 Instruction *OldLoc = dyn_cast<Instruction>(OperandValToReplace); 493 if (L->contains(OldLoc->getParent())) { 494 // If this is a critical edge, split the edge so that we do not insert 495 // the code on all predecessor/successor paths. We do this unless this 496 // is the canonical backedge for this loop, as this can make some 497 // inserted code be in an illegal position. 498 BasicBlock *PHIPred = PN->getIncomingBlock(i); 499 if (e != 1 && PHIPred->getTerminator()->getNumSuccessors() > 1 && 500 (PN->getParent() != L->getHeader() || !L->contains(PHIPred))) { 501 502 // First step, split the critical edge. 503 SplitCriticalEdge(PHIPred, PN->getParent(), P, false); 504 505 // Next step: move the basic block. In particular, if the PHI node 506 // is outside of the loop, and PredTI is in the loop, we want to 507 // move the block to be immediately before the PHI block, not 508 // immediately after PredTI. 509 if (L->contains(PHIPred) && !L->contains(PN->getParent())) { 510 BasicBlock *NewBB = PN->getIncomingBlock(i); 511 NewBB->moveBefore(PN->getParent()); 512 } 513 514 // Splitting the edge can reduce the number of PHI entries we have. 515 e = PN->getNumIncomingValues(); 516 } 517 } 518 Value *&Code = InsertedCode[PN->getIncomingBlock(i)]; 519 if (!Code) { 520 // Insert the code into the end of the predecessor block. 521 Instruction *InsertPt = (L->contains(OldLoc->getParent())) ? 522 PN->getIncomingBlock(i)->getTerminator() : 523 OldLoc->getParent()->getTerminator(); 524 Code = InsertCodeForBaseAtPosition(NewBase, PN->getType(), 525 Rewriter, InsertPt, L, LI); 526 527 DOUT << " Changing PHI use to "; 528 DEBUG(WriteAsOperand(*DOUT, Code, /*PrintType=*/false)); 529 DOUT << ", which has value " << *NewBase << " plus IMM " << *Imm << "\n"; 530 } 531 532 // Replace the use of the operand Value with the new Phi we just created. 533 PN->setIncomingValue(i, Code); 534 Rewriter.clear(); 535 } 536 } 537 538 // PHI node might have become a constant value after SplitCriticalEdge. 539 DeadInsts.push_back(Inst); 540} 541 542 543/// fitsInAddressMode - Return true if V can be subsumed within an addressing 544/// mode, and does not need to be put in a register first. 545static bool fitsInAddressMode(const SCEV* const &V, const Type *AccessTy, 546 const TargetLowering *TLI, bool HasBaseReg) { 547 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(V)) { 548 int64_t VC = SC->getValue()->getSExtValue(); 549 if (TLI) { 550 TargetLowering::AddrMode AM; 551 AM.BaseOffs = VC; 552 AM.HasBaseReg = HasBaseReg; 553 return TLI->isLegalAddressingMode(AM, AccessTy); 554 } else { 555 // Defaults to PPC. PPC allows a sign-extended 16-bit immediate field. 556 return (VC > -(1 << 16) && VC < (1 << 16)-1); 557 } 558 } 559 560 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) 561 if (GlobalValue *GV = dyn_cast<GlobalValue>(SU->getValue())) { 562 if (TLI) { 563 TargetLowering::AddrMode AM; 564 AM.BaseGV = GV; 565 AM.HasBaseReg = HasBaseReg; 566 return TLI->isLegalAddressingMode(AM, AccessTy); 567 } else { 568 // Default: assume global addresses are not legal. 569 } 570 } 571 572 return false; 573} 574 575/// MoveLoopVariantsToImmediateField - Move any subexpressions from Val that are 576/// loop varying to the Imm operand. 577static void MoveLoopVariantsToImmediateField(const SCEV* &Val, const SCEV* &Imm, 578 Loop *L, ScalarEvolution *SE) { 579 if (Val->isLoopInvariant(L)) return; // Nothing to do. 580 581 if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) { 582 SmallVector<const SCEV*, 4> NewOps; 583 NewOps.reserve(SAE->getNumOperands()); 584 585 for (unsigned i = 0; i != SAE->getNumOperands(); ++i) 586 if (!SAE->getOperand(i)->isLoopInvariant(L)) { 587 // If this is a loop-variant expression, it must stay in the immediate 588 // field of the expression. 589 Imm = SE->getAddExpr(Imm, SAE->getOperand(i)); 590 } else { 591 NewOps.push_back(SAE->getOperand(i)); 592 } 593 594 if (NewOps.empty()) 595 Val = SE->getIntegerSCEV(0, Val->getType()); 596 else 597 Val = SE->getAddExpr(NewOps); 598 } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) { 599 // Try to pull immediates out of the start value of nested addrec's. 600 const SCEV* Start = SARE->getStart(); 601 MoveLoopVariantsToImmediateField(Start, Imm, L, SE); 602 603 SmallVector<const SCEV*, 4> Ops(SARE->op_begin(), SARE->op_end()); 604 Ops[0] = Start; 605 Val = SE->getAddRecExpr(Ops, SARE->getLoop()); 606 } else { 607 // Otherwise, all of Val is variant, move the whole thing over. 608 Imm = SE->getAddExpr(Imm, Val); 609 Val = SE->getIntegerSCEV(0, Val->getType()); 610 } 611} 612 613 614/// MoveImmediateValues - Look at Val, and pull out any additions of constants 615/// that can fit into the immediate field of instructions in the target. 616/// Accumulate these immediate values into the Imm value. 617static void MoveImmediateValues(const TargetLowering *TLI, 618 const Type *AccessTy, 619 const SCEV* &Val, const SCEV* &Imm, 620 bool isAddress, Loop *L, 621 ScalarEvolution *SE) { 622 if (const SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) { 623 SmallVector<const SCEV*, 4> NewOps; 624 NewOps.reserve(SAE->getNumOperands()); 625 626 for (unsigned i = 0; i != SAE->getNumOperands(); ++i) { 627 const SCEV* NewOp = SAE->getOperand(i); 628 MoveImmediateValues(TLI, AccessTy, NewOp, Imm, isAddress, L, SE); 629 630 if (!NewOp->isLoopInvariant(L)) { 631 // If this is a loop-variant expression, it must stay in the immediate 632 // field of the expression. 633 Imm = SE->getAddExpr(Imm, NewOp); 634 } else { 635 NewOps.push_back(NewOp); 636 } 637 } 638 639 if (NewOps.empty()) 640 Val = SE->getIntegerSCEV(0, Val->getType()); 641 else 642 Val = SE->getAddExpr(NewOps); 643 return; 644 } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) { 645 // Try to pull immediates out of the start value of nested addrec's. 646 const SCEV* Start = SARE->getStart(); 647 MoveImmediateValues(TLI, AccessTy, Start, Imm, isAddress, L, SE); 648 649 if (Start != SARE->getStart()) { 650 SmallVector<const SCEV*, 4> Ops(SARE->op_begin(), SARE->op_end()); 651 Ops[0] = Start; 652 Val = SE->getAddRecExpr(Ops, SARE->getLoop()); 653 } 654 return; 655 } else if (const SCEVMulExpr *SME = dyn_cast<SCEVMulExpr>(Val)) { 656 // Transform "8 * (4 + v)" -> "32 + 8*V" if "32" fits in the immed field. 657 if (isAddress && 658 fitsInAddressMode(SME->getOperand(0), AccessTy, TLI, false) && 659 SME->getNumOperands() == 2 && SME->isLoopInvariant(L)) { 660 661 const SCEV* SubImm = SE->getIntegerSCEV(0, Val->getType()); 662 const SCEV* NewOp = SME->getOperand(1); 663 MoveImmediateValues(TLI, AccessTy, NewOp, SubImm, isAddress, L, SE); 664 665 // If we extracted something out of the subexpressions, see if we can 666 // simplify this! 667 if (NewOp != SME->getOperand(1)) { 668 // Scale SubImm up by "8". If the result is a target constant, we are 669 // good. 670 SubImm = SE->getMulExpr(SubImm, SME->getOperand(0)); 671 if (fitsInAddressMode(SubImm, AccessTy, TLI, false)) { 672 // Accumulate the immediate. 673 Imm = SE->getAddExpr(Imm, SubImm); 674 675 // Update what is left of 'Val'. 676 Val = SE->getMulExpr(SME->getOperand(0), NewOp); 677 return; 678 } 679 } 680 } 681 } 682 683 // Loop-variant expressions must stay in the immediate field of the 684 // expression. 685 if ((isAddress && fitsInAddressMode(Val, AccessTy, TLI, false)) || 686 !Val->isLoopInvariant(L)) { 687 Imm = SE->getAddExpr(Imm, Val); 688 Val = SE->getIntegerSCEV(0, Val->getType()); 689 return; 690 } 691 692 // Otherwise, no immediates to move. 693} 694 695static void MoveImmediateValues(const TargetLowering *TLI, 696 Instruction *User, 697 const SCEV* &Val, const SCEV* &Imm, 698 bool isAddress, Loop *L, 699 ScalarEvolution *SE) { 700 const Type *AccessTy = getAccessType(User); 701 MoveImmediateValues(TLI, AccessTy, Val, Imm, isAddress, L, SE); 702} 703 704/// SeparateSubExprs - Decompose Expr into all of the subexpressions that are 705/// added together. This is used to reassociate common addition subexprs 706/// together for maximal sharing when rewriting bases. 707static void SeparateSubExprs(SmallVector<const SCEV*, 16> &SubExprs, 708 const SCEV* Expr, 709 ScalarEvolution *SE) { 710 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(Expr)) { 711 for (unsigned j = 0, e = AE->getNumOperands(); j != e; ++j) 712 SeparateSubExprs(SubExprs, AE->getOperand(j), SE); 713 } else if (const SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Expr)) { 714 const SCEV* Zero = SE->getIntegerSCEV(0, Expr->getType()); 715 if (SARE->getOperand(0) == Zero) { 716 SubExprs.push_back(Expr); 717 } else { 718 // Compute the addrec with zero as its base. 719 SmallVector<const SCEV*, 4> Ops(SARE->op_begin(), SARE->op_end()); 720 Ops[0] = Zero; // Start with zero base. 721 SubExprs.push_back(SE->getAddRecExpr(Ops, SARE->getLoop())); 722 723 724 SeparateSubExprs(SubExprs, SARE->getOperand(0), SE); 725 } 726 } else if (!Expr->isZero()) { 727 // Do not add zero. 728 SubExprs.push_back(Expr); 729 } 730} 731 732// This is logically local to the following function, but C++ says we have 733// to make it file scope. 734struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; }; 735 736/// RemoveCommonExpressionsFromUseBases - Look through all of the Bases of all 737/// the Uses, removing any common subexpressions, except that if all such 738/// subexpressions can be folded into an addressing mode for all uses inside 739/// the loop (this case is referred to as "free" in comments herein) we do 740/// not remove anything. This looks for things like (a+b+c) and 741/// (a+c+d) and computes the common (a+c) subexpression. The common expression 742/// is *removed* from the Bases and returned. 743static const SCEV* 744RemoveCommonExpressionsFromUseBases(std::vector<BasedUser> &Uses, 745 ScalarEvolution *SE, Loop *L, 746 const TargetLowering *TLI) { 747 unsigned NumUses = Uses.size(); 748 749 // Only one use? This is a very common case, so we handle it specially and 750 // cheaply. 751 const SCEV* Zero = SE->getIntegerSCEV(0, Uses[0].Base->getType()); 752 const SCEV* Result = Zero; 753 const SCEV* FreeResult = Zero; 754 if (NumUses == 1) { 755 // If the use is inside the loop, use its base, regardless of what it is: 756 // it is clearly shared across all the IV's. If the use is outside the loop 757 // (which means after it) we don't want to factor anything *into* the loop, 758 // so just use 0 as the base. 759 if (L->contains(Uses[0].Inst->getParent())) 760 std::swap(Result, Uses[0].Base); 761 return Result; 762 } 763 764 // To find common subexpressions, count how many of Uses use each expression. 765 // If any subexpressions are used Uses.size() times, they are common. 766 // Also track whether all uses of each expression can be moved into an 767 // an addressing mode "for free"; such expressions are left within the loop. 768 // struct SubExprUseData { unsigned Count; bool notAllUsesAreFree; }; 769 std::map<const SCEV*, SubExprUseData> SubExpressionUseData; 770 771 // UniqueSubExprs - Keep track of all of the subexpressions we see in the 772 // order we see them. 773 SmallVector<const SCEV*, 16> UniqueSubExprs; 774 775 SmallVector<const SCEV*, 16> SubExprs; 776 unsigned NumUsesInsideLoop = 0; 777 for (unsigned i = 0; i != NumUses; ++i) { 778 // If the user is outside the loop, just ignore it for base computation. 779 // Since the user is outside the loop, it must be *after* the loop (if it 780 // were before, it could not be based on the loop IV). We don't want users 781 // after the loop to affect base computation of values *inside* the loop, 782 // because we can always add their offsets to the result IV after the loop 783 // is done, ensuring we get good code inside the loop. 784 if (!L->contains(Uses[i].Inst->getParent())) 785 continue; 786 NumUsesInsideLoop++; 787 788 // If the base is zero (which is common), return zero now, there are no 789 // CSEs we can find. 790 if (Uses[i].Base == Zero) return Zero; 791 792 // If this use is as an address we may be able to put CSEs in the addressing 793 // mode rather than hoisting them. 794 bool isAddrUse = isAddressUse(Uses[i].Inst, Uses[i].OperandValToReplace); 795 // We may need the AccessTy below, but only when isAddrUse, so compute it 796 // only in that case. 797 const Type *AccessTy = 0; 798 if (isAddrUse) 799 AccessTy = getAccessType(Uses[i].Inst); 800 801 // Split the expression into subexprs. 802 SeparateSubExprs(SubExprs, Uses[i].Base, SE); 803 // Add one to SubExpressionUseData.Count for each subexpr present, and 804 // if the subexpr is not a valid immediate within an addressing mode use, 805 // set SubExpressionUseData.notAllUsesAreFree. We definitely want to 806 // hoist these out of the loop (if they are common to all uses). 807 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) { 808 if (++SubExpressionUseData[SubExprs[j]].Count == 1) 809 UniqueSubExprs.push_back(SubExprs[j]); 810 if (!isAddrUse || !fitsInAddressMode(SubExprs[j], AccessTy, TLI, false)) 811 SubExpressionUseData[SubExprs[j]].notAllUsesAreFree = true; 812 } 813 SubExprs.clear(); 814 } 815 816 // Now that we know how many times each is used, build Result. Iterate over 817 // UniqueSubexprs so that we have a stable ordering. 818 for (unsigned i = 0, e = UniqueSubExprs.size(); i != e; ++i) { 819 std::map<const SCEV*, SubExprUseData>::iterator I = 820 SubExpressionUseData.find(UniqueSubExprs[i]); 821 assert(I != SubExpressionUseData.end() && "Entry not found?"); 822 if (I->second.Count == NumUsesInsideLoop) { // Found CSE! 823 if (I->second.notAllUsesAreFree) 824 Result = SE->getAddExpr(Result, I->first); 825 else 826 FreeResult = SE->getAddExpr(FreeResult, I->first); 827 } else 828 // Remove non-cse's from SubExpressionUseData. 829 SubExpressionUseData.erase(I); 830 } 831 832 if (FreeResult != Zero) { 833 // We have some subexpressions that can be subsumed into addressing 834 // modes in every use inside the loop. However, it's possible that 835 // there are so many of them that the combined FreeResult cannot 836 // be subsumed, or that the target cannot handle both a FreeResult 837 // and a Result in the same instruction (for example because it would 838 // require too many registers). Check this. 839 for (unsigned i=0; i<NumUses; ++i) { 840 if (!L->contains(Uses[i].Inst->getParent())) 841 continue; 842 // We know this is an addressing mode use; if there are any uses that 843 // are not, FreeResult would be Zero. 844 const Type *AccessTy = getAccessType(Uses[i].Inst); 845 if (!fitsInAddressMode(FreeResult, AccessTy, TLI, Result!=Zero)) { 846 // FIXME: could split up FreeResult into pieces here, some hoisted 847 // and some not. There is no obvious advantage to this. 848 Result = SE->getAddExpr(Result, FreeResult); 849 FreeResult = Zero; 850 break; 851 } 852 } 853 } 854 855 // If we found no CSE's, return now. 856 if (Result == Zero) return Result; 857 858 // If we still have a FreeResult, remove its subexpressions from 859 // SubExpressionUseData. This means they will remain in the use Bases. 860 if (FreeResult != Zero) { 861 SeparateSubExprs(SubExprs, FreeResult, SE); 862 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) { 863 std::map<const SCEV*, SubExprUseData>::iterator I = 864 SubExpressionUseData.find(SubExprs[j]); 865 SubExpressionUseData.erase(I); 866 } 867 SubExprs.clear(); 868 } 869 870 // Otherwise, remove all of the CSE's we found from each of the base values. 871 for (unsigned i = 0; i != NumUses; ++i) { 872 // Uses outside the loop don't necessarily include the common base, but 873 // the final IV value coming into those uses does. Instead of trying to 874 // remove the pieces of the common base, which might not be there, 875 // subtract off the base to compensate for this. 876 if (!L->contains(Uses[i].Inst->getParent())) { 877 Uses[i].Base = SE->getMinusSCEV(Uses[i].Base, Result); 878 continue; 879 } 880 881 // Split the expression into subexprs. 882 SeparateSubExprs(SubExprs, Uses[i].Base, SE); 883 884 // Remove any common subexpressions. 885 for (unsigned j = 0, e = SubExprs.size(); j != e; ++j) 886 if (SubExpressionUseData.count(SubExprs[j])) { 887 SubExprs.erase(SubExprs.begin()+j); 888 --j; --e; 889 } 890 891 // Finally, add the non-shared expressions together. 892 if (SubExprs.empty()) 893 Uses[i].Base = Zero; 894 else 895 Uses[i].Base = SE->getAddExpr(SubExprs); 896 SubExprs.clear(); 897 } 898 899 return Result; 900} 901 902/// ValidScale - Check whether the given Scale is valid for all loads and 903/// stores in UsersToProcess. 904/// 905bool LoopStrengthReduce::ValidScale(bool HasBaseReg, int64_t Scale, 906 const std::vector<BasedUser>& UsersToProcess) { 907 if (!TLI) 908 return true; 909 910 for (unsigned i = 0, e = UsersToProcess.size(); i!=e; ++i) { 911 // If this is a load or other access, pass the type of the access in. 912 const Type *AccessTy = Type::VoidTy; 913 if (isAddressUse(UsersToProcess[i].Inst, 914 UsersToProcess[i].OperandValToReplace)) 915 AccessTy = getAccessType(UsersToProcess[i].Inst); 916 else if (isa<PHINode>(UsersToProcess[i].Inst)) 917 continue; 918 919 TargetLowering::AddrMode AM; 920 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm)) 921 AM.BaseOffs = SC->getValue()->getSExtValue(); 922 AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero(); 923 AM.Scale = Scale; 924 925 // If load[imm+r*scale] is illegal, bail out. 926 if (!TLI->isLegalAddressingMode(AM, AccessTy)) 927 return false; 928 } 929 return true; 930} 931 932/// ValidOffset - Check whether the given Offset is valid for all loads and 933/// stores in UsersToProcess. 934/// 935bool LoopStrengthReduce::ValidOffset(bool HasBaseReg, 936 int64_t Offset, 937 int64_t Scale, 938 const std::vector<BasedUser>& UsersToProcess) { 939 if (!TLI) 940 return true; 941 942 for (unsigned i=0, e = UsersToProcess.size(); i!=e; ++i) { 943 // If this is a load or other access, pass the type of the access in. 944 const Type *AccessTy = Type::VoidTy; 945 if (isAddressUse(UsersToProcess[i].Inst, 946 UsersToProcess[i].OperandValToReplace)) 947 AccessTy = getAccessType(UsersToProcess[i].Inst); 948 else if (isa<PHINode>(UsersToProcess[i].Inst)) 949 continue; 950 951 TargetLowering::AddrMode AM; 952 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm)) 953 AM.BaseOffs = SC->getValue()->getSExtValue(); 954 AM.BaseOffs = (uint64_t)AM.BaseOffs + (uint64_t)Offset; 955 AM.HasBaseReg = HasBaseReg || !UsersToProcess[i].Base->isZero(); 956 AM.Scale = Scale; 957 958 // If load[imm+r*scale] is illegal, bail out. 959 if (!TLI->isLegalAddressingMode(AM, AccessTy)) 960 return false; 961 } 962 return true; 963} 964 965/// RequiresTypeConversion - Returns true if converting Ty1 to Ty2 is not 966/// a nop. 967bool LoopStrengthReduce::RequiresTypeConversion(const Type *Ty1, 968 const Type *Ty2) { 969 if (Ty1 == Ty2) 970 return false; 971 Ty1 = SE->getEffectiveSCEVType(Ty1); 972 Ty2 = SE->getEffectiveSCEVType(Ty2); 973 if (Ty1 == Ty2) 974 return false; 975 if (Ty1->canLosslesslyBitCastTo(Ty2)) 976 return false; 977 if (TLI && TLI->isTruncateFree(Ty1, Ty2)) 978 return false; 979 return true; 980} 981 982/// CheckForIVReuse - Returns the multiple if the stride is the multiple 983/// of a previous stride and it is a legal value for the target addressing 984/// mode scale component and optional base reg. This allows the users of 985/// this stride to be rewritten as prev iv * factor. It returns 0 if no 986/// reuse is possible. Factors can be negative on same targets, e.g. ARM. 987/// 988/// If all uses are outside the loop, we don't require that all multiplies 989/// be folded into the addressing mode, nor even that the factor be constant; 990/// a multiply (executed once) outside the loop is better than another IV 991/// within. Well, usually. 992const SCEV* LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg, 993 bool AllUsesAreAddresses, 994 bool AllUsesAreOutsideLoop, 995 const SCEV* const &Stride, 996 IVExpr &IV, const Type *Ty, 997 const std::vector<BasedUser>& UsersToProcess) { 998 if (StrideNoReuse.count(Stride)) 999 return SE->getIntegerSCEV(0, Stride->getType()); 1000 1001 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) { 1002 int64_t SInt = SC->getValue()->getSExtValue(); 1003 for (unsigned NewStride = 0, e = IU->StrideOrder.size(); 1004 NewStride != e; ++NewStride) { 1005 std::map<const SCEV*, IVsOfOneStride>::iterator SI = 1006 IVsByStride.find(IU->StrideOrder[NewStride]); 1007 if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first) || 1008 StrideNoReuse.count(SI->first)) 1009 continue; 1010 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue(); 1011 if (SI->first != Stride && 1012 (unsigned(abs64(SInt)) < SSInt || (SInt % SSInt) != 0)) 1013 continue; 1014 int64_t Scale = SInt / SSInt; 1015 // Check that this stride is valid for all the types used for loads and 1016 // stores; if it can be used for some and not others, we might as well use 1017 // the original stride everywhere, since we have to create the IV for it 1018 // anyway. If the scale is 1, then we don't need to worry about folding 1019 // multiplications. 1020 if (Scale == 1 || 1021 (AllUsesAreAddresses && 1022 ValidScale(HasBaseReg, Scale, UsersToProcess))) { 1023 // Prefer to reuse an IV with a base of zero. 1024 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(), 1025 IE = SI->second.IVs.end(); II != IE; ++II) 1026 // Only reuse previous IV if it would not require a type conversion 1027 // and if the base difference can be folded. 1028 if (II->Base->isZero() && 1029 !RequiresTypeConversion(II->Base->getType(), Ty)) { 1030 IV = *II; 1031 return SE->getIntegerSCEV(Scale, Stride->getType()); 1032 } 1033 // Otherwise, settle for an IV with a foldable base. 1034 if (AllUsesAreAddresses) 1035 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(), 1036 IE = SI->second.IVs.end(); II != IE; ++II) 1037 // Only reuse previous IV if it would not require a type conversion 1038 // and if the base difference can be folded. 1039 if (SE->getEffectiveSCEVType(II->Base->getType()) == 1040 SE->getEffectiveSCEVType(Ty) && 1041 isa<SCEVConstant>(II->Base)) { 1042 int64_t Base = 1043 cast<SCEVConstant>(II->Base)->getValue()->getSExtValue(); 1044 if (Base > INT32_MIN && Base <= INT32_MAX && 1045 ValidOffset(HasBaseReg, -Base * Scale, 1046 Scale, UsersToProcess)) { 1047 IV = *II; 1048 return SE->getIntegerSCEV(Scale, Stride->getType()); 1049 } 1050 } 1051 } 1052 } 1053 } else if (AllUsesAreOutsideLoop) { 1054 // Accept nonconstant strides here; it is really really right to substitute 1055 // an existing IV if we can. 1056 for (unsigned NewStride = 0, e = IU->StrideOrder.size(); 1057 NewStride != e; ++NewStride) { 1058 std::map<const SCEV*, IVsOfOneStride>::iterator SI = 1059 IVsByStride.find(IU->StrideOrder[NewStride]); 1060 if (SI == IVsByStride.end() || !isa<SCEVConstant>(SI->first)) 1061 continue; 1062 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue(); 1063 if (SI->first != Stride && SSInt != 1) 1064 continue; 1065 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(), 1066 IE = SI->second.IVs.end(); II != IE; ++II) 1067 // Accept nonzero base here. 1068 // Only reuse previous IV if it would not require a type conversion. 1069 if (!RequiresTypeConversion(II->Base->getType(), Ty)) { 1070 IV = *II; 1071 return Stride; 1072 } 1073 } 1074 // Special case, old IV is -1*x and this one is x. Can treat this one as 1075 // -1*old. 1076 for (unsigned NewStride = 0, e = IU->StrideOrder.size(); 1077 NewStride != e; ++NewStride) { 1078 std::map<const SCEV*, IVsOfOneStride>::iterator SI = 1079 IVsByStride.find(IU->StrideOrder[NewStride]); 1080 if (SI == IVsByStride.end()) 1081 continue; 1082 if (const SCEVMulExpr *ME = dyn_cast<SCEVMulExpr>(SI->first)) 1083 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(ME->getOperand(0))) 1084 if (Stride == ME->getOperand(1) && 1085 SC->getValue()->getSExtValue() == -1LL) 1086 for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(), 1087 IE = SI->second.IVs.end(); II != IE; ++II) 1088 // Accept nonzero base here. 1089 // Only reuse previous IV if it would not require type conversion. 1090 if (!RequiresTypeConversion(II->Base->getType(), Ty)) { 1091 IV = *II; 1092 return SE->getIntegerSCEV(-1LL, Stride->getType()); 1093 } 1094 } 1095 } 1096 return SE->getIntegerSCEV(0, Stride->getType()); 1097} 1098 1099/// PartitionByIsUseOfPostIncrementedValue - Simple boolean predicate that 1100/// returns true if Val's isUseOfPostIncrementedValue is true. 1101static bool PartitionByIsUseOfPostIncrementedValue(const BasedUser &Val) { 1102 return Val.isUseOfPostIncrementedValue; 1103} 1104 1105/// isNonConstantNegative - Return true if the specified scev is negated, but 1106/// not a constant. 1107static bool isNonConstantNegative(const SCEV* const &Expr) { 1108 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Expr); 1109 if (!Mul) return false; 1110 1111 // If there is a constant factor, it will be first. 1112 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 1113 if (!SC) return false; 1114 1115 // Return true if the value is negative, this matches things like (-42 * V). 1116 return SC->getValue()->getValue().isNegative(); 1117} 1118 1119/// CollectIVUsers - Transform our list of users and offsets to a bit more 1120/// complex table. In this new vector, each 'BasedUser' contains 'Base', the base 1121/// of the strided accesses, as well as the old information from Uses. We 1122/// progressively move information from the Base field to the Imm field, until 1123/// we eventually have the full access expression to rewrite the use. 1124const SCEV* LoopStrengthReduce::CollectIVUsers(const SCEV* const &Stride, 1125 IVUsersOfOneStride &Uses, 1126 Loop *L, 1127 bool &AllUsesAreAddresses, 1128 bool &AllUsesAreOutsideLoop, 1129 std::vector<BasedUser> &UsersToProcess) { 1130 // FIXME: Generalize to non-affine IV's. 1131 if (!Stride->isLoopInvariant(L)) 1132 return SE->getIntegerSCEV(0, Stride->getType()); 1133 1134 UsersToProcess.reserve(Uses.Users.size()); 1135 for (ilist<IVStrideUse>::iterator I = Uses.Users.begin(), 1136 E = Uses.Users.end(); I != E; ++I) { 1137 UsersToProcess.push_back(BasedUser(*I, SE)); 1138 1139 // Move any loop variant operands from the offset field to the immediate 1140 // field of the use, so that we don't try to use something before it is 1141 // computed. 1142 MoveLoopVariantsToImmediateField(UsersToProcess.back().Base, 1143 UsersToProcess.back().Imm, L, SE); 1144 assert(UsersToProcess.back().Base->isLoopInvariant(L) && 1145 "Base value is not loop invariant!"); 1146 } 1147 1148 // We now have a whole bunch of uses of like-strided induction variables, but 1149 // they might all have different bases. We want to emit one PHI node for this 1150 // stride which we fold as many common expressions (between the IVs) into as 1151 // possible. Start by identifying the common expressions in the base values 1152 // for the strides (e.g. if we have "A+C+B" and "A+B+D" as our bases, find 1153 // "A+B"), emit it to the preheader, then remove the expression from the 1154 // UsersToProcess base values. 1155 const SCEV* CommonExprs = 1156 RemoveCommonExpressionsFromUseBases(UsersToProcess, SE, L, TLI); 1157 1158 // Next, figure out what we can represent in the immediate fields of 1159 // instructions. If we can represent anything there, move it to the imm 1160 // fields of the BasedUsers. We do this so that it increases the commonality 1161 // of the remaining uses. 1162 unsigned NumPHI = 0; 1163 bool HasAddress = false; 1164 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) { 1165 // If the user is not in the current loop, this means it is using the exit 1166 // value of the IV. Do not put anything in the base, make sure it's all in 1167 // the immediate field to allow as much factoring as possible. 1168 if (!L->contains(UsersToProcess[i].Inst->getParent())) { 1169 UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm, 1170 UsersToProcess[i].Base); 1171 UsersToProcess[i].Base = 1172 SE->getIntegerSCEV(0, UsersToProcess[i].Base->getType()); 1173 } else { 1174 // Not all uses are outside the loop. 1175 AllUsesAreOutsideLoop = false; 1176 1177 // Addressing modes can be folded into loads and stores. Be careful that 1178 // the store is through the expression, not of the expression though. 1179 bool isPHI = false; 1180 bool isAddress = isAddressUse(UsersToProcess[i].Inst, 1181 UsersToProcess[i].OperandValToReplace); 1182 if (isa<PHINode>(UsersToProcess[i].Inst)) { 1183 isPHI = true; 1184 ++NumPHI; 1185 } 1186 1187 if (isAddress) 1188 HasAddress = true; 1189 1190 // If this use isn't an address, then not all uses are addresses. 1191 if (!isAddress && !isPHI) 1192 AllUsesAreAddresses = false; 1193 1194 MoveImmediateValues(TLI, UsersToProcess[i].Inst, UsersToProcess[i].Base, 1195 UsersToProcess[i].Imm, isAddress, L, SE); 1196 } 1197 } 1198 1199 // If one of the use is a PHI node and all other uses are addresses, still 1200 // allow iv reuse. Essentially we are trading one constant multiplication 1201 // for one fewer iv. 1202 if (NumPHI > 1) 1203 AllUsesAreAddresses = false; 1204 1205 // There are no in-loop address uses. 1206 if (AllUsesAreAddresses && (!HasAddress && !AllUsesAreOutsideLoop)) 1207 AllUsesAreAddresses = false; 1208 1209 return CommonExprs; 1210} 1211 1212/// ShouldUseFullStrengthReductionMode - Test whether full strength-reduction 1213/// is valid and profitable for the given set of users of a stride. In 1214/// full strength-reduction mode, all addresses at the current stride are 1215/// strength-reduced all the way down to pointer arithmetic. 1216/// 1217bool LoopStrengthReduce::ShouldUseFullStrengthReductionMode( 1218 const std::vector<BasedUser> &UsersToProcess, 1219 const Loop *L, 1220 bool AllUsesAreAddresses, 1221 const SCEV* Stride) { 1222 if (!EnableFullLSRMode) 1223 return false; 1224 1225 // The heuristics below aim to avoid increasing register pressure, but 1226 // fully strength-reducing all the addresses increases the number of 1227 // add instructions, so don't do this when optimizing for size. 1228 // TODO: If the loop is large, the savings due to simpler addresses 1229 // may oughtweight the costs of the extra increment instructions. 1230 if (L->getHeader()->getParent()->hasFnAttr(Attribute::OptimizeForSize)) 1231 return false; 1232 1233 // TODO: For now, don't do full strength reduction if there could 1234 // potentially be greater-stride multiples of the current stride 1235 // which could reuse the current stride IV. 1236 if (IU->StrideOrder.back() != Stride) 1237 return false; 1238 1239 // Iterate through the uses to find conditions that automatically rule out 1240 // full-lsr mode. 1241 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) { 1242 const SCEV *Base = UsersToProcess[i].Base; 1243 const SCEV *Imm = UsersToProcess[i].Imm; 1244 // If any users have a loop-variant component, they can't be fully 1245 // strength-reduced. 1246 if (Imm && !Imm->isLoopInvariant(L)) 1247 return false; 1248 // If there are to users with the same base and the difference between 1249 // the two Imm values can't be folded into the address, full 1250 // strength reduction would increase register pressure. 1251 do { 1252 const SCEV *CurImm = UsersToProcess[i].Imm; 1253 if ((CurImm || Imm) && CurImm != Imm) { 1254 if (!CurImm) CurImm = SE->getIntegerSCEV(0, Stride->getType()); 1255 if (!Imm) Imm = SE->getIntegerSCEV(0, Stride->getType()); 1256 const Instruction *Inst = UsersToProcess[i].Inst; 1257 const Type *AccessTy = getAccessType(Inst); 1258 const SCEV* Diff = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm); 1259 if (!Diff->isZero() && 1260 (!AllUsesAreAddresses || 1261 !fitsInAddressMode(Diff, AccessTy, TLI, /*HasBaseReg=*/true))) 1262 return false; 1263 } 1264 } while (++i != e && Base == UsersToProcess[i].Base); 1265 } 1266 1267 // If there's exactly one user in this stride, fully strength-reducing it 1268 // won't increase register pressure. If it's starting from a non-zero base, 1269 // it'll be simpler this way. 1270 if (UsersToProcess.size() == 1 && !UsersToProcess[0].Base->isZero()) 1271 return true; 1272 1273 // Otherwise, if there are any users in this stride that don't require 1274 // a register for their base, full strength-reduction will increase 1275 // register pressure. 1276 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) 1277 if (UsersToProcess[i].Base->isZero()) 1278 return false; 1279 1280 // Otherwise, go for it. 1281 return true; 1282} 1283 1284/// InsertAffinePhi Create and insert a PHI node for an induction variable 1285/// with the specified start and step values in the specified loop. 1286/// 1287/// If NegateStride is true, the stride should be negated by using a 1288/// subtract instead of an add. 1289/// 1290/// Return the created phi node. 1291/// 1292static PHINode *InsertAffinePhi(const SCEV* Start, const SCEV* Step, 1293 Instruction *IVIncInsertPt, 1294 const Loop *L, 1295 SCEVExpander &Rewriter) { 1296 assert(Start->isLoopInvariant(L) && "New PHI start is not loop invariant!"); 1297 assert(Step->isLoopInvariant(L) && "New PHI stride is not loop invariant!"); 1298 1299 BasicBlock *Header = L->getHeader(); 1300 BasicBlock *Preheader = L->getLoopPreheader(); 1301 BasicBlock *LatchBlock = L->getLoopLatch(); 1302 const Type *Ty = Start->getType(); 1303 Ty = Rewriter.SE.getEffectiveSCEVType(Ty); 1304 1305 PHINode *PN = PHINode::Create(Ty, "lsr.iv", Header->begin()); 1306 PN->addIncoming(Rewriter.expandCodeFor(Start, Ty, Preheader->getTerminator()), 1307 Preheader); 1308 1309 // If the stride is negative, insert a sub instead of an add for the 1310 // increment. 1311 bool isNegative = isNonConstantNegative(Step); 1312 const SCEV* IncAmount = Step; 1313 if (isNegative) 1314 IncAmount = Rewriter.SE.getNegativeSCEV(Step); 1315 1316 // Insert an add instruction right before the terminator corresponding 1317 // to the back-edge or just before the only use. The location is determined 1318 // by the caller and passed in as IVIncInsertPt. 1319 Value *StepV = Rewriter.expandCodeFor(IncAmount, Ty, 1320 Preheader->getTerminator()); 1321 Instruction *IncV; 1322 if (isNegative) { 1323 IncV = BinaryOperator::CreateSub(PN, StepV, "lsr.iv.next", 1324 IVIncInsertPt); 1325 } else { 1326 IncV = BinaryOperator::CreateAdd(PN, StepV, "lsr.iv.next", 1327 IVIncInsertPt); 1328 } 1329 if (!isa<ConstantInt>(StepV)) ++NumVariable; 1330 1331 PN->addIncoming(IncV, LatchBlock); 1332 1333 ++NumInserted; 1334 return PN; 1335} 1336 1337static void SortUsersToProcess(std::vector<BasedUser> &UsersToProcess) { 1338 // We want to emit code for users inside the loop first. To do this, we 1339 // rearrange BasedUser so that the entries at the end have 1340 // isUseOfPostIncrementedValue = false, because we pop off the end of the 1341 // vector (so we handle them first). 1342 std::partition(UsersToProcess.begin(), UsersToProcess.end(), 1343 PartitionByIsUseOfPostIncrementedValue); 1344 1345 // Sort this by base, so that things with the same base are handled 1346 // together. By partitioning first and stable-sorting later, we are 1347 // guaranteed that within each base we will pop off users from within the 1348 // loop before users outside of the loop with a particular base. 1349 // 1350 // We would like to use stable_sort here, but we can't. The problem is that 1351 // const SCEV*'s don't have a deterministic ordering w.r.t to each other, so 1352 // we don't have anything to do a '<' comparison on. Because we think the 1353 // number of uses is small, do a horrible bubble sort which just relies on 1354 // ==. 1355 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) { 1356 // Get a base value. 1357 const SCEV* Base = UsersToProcess[i].Base; 1358 1359 // Compact everything with this base to be consecutive with this one. 1360 for (unsigned j = i+1; j != e; ++j) { 1361 if (UsersToProcess[j].Base == Base) { 1362 std::swap(UsersToProcess[i+1], UsersToProcess[j]); 1363 ++i; 1364 } 1365 } 1366 } 1367} 1368 1369/// PrepareToStrengthReduceFully - Prepare to fully strength-reduce 1370/// UsersToProcess, meaning lowering addresses all the way down to direct 1371/// pointer arithmetic. 1372/// 1373void 1374LoopStrengthReduce::PrepareToStrengthReduceFully( 1375 std::vector<BasedUser> &UsersToProcess, 1376 const SCEV* Stride, 1377 const SCEV* CommonExprs, 1378 const Loop *L, 1379 SCEVExpander &PreheaderRewriter) { 1380 DOUT << " Fully reducing all users\n"; 1381 1382 // Rewrite the UsersToProcess records, creating a separate PHI for each 1383 // unique Base value. 1384 Instruction *IVIncInsertPt = L->getLoopLatch()->getTerminator(); 1385 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ) { 1386 // TODO: The uses are grouped by base, but not sorted. We arbitrarily 1387 // pick the first Imm value here to start with, and adjust it for the 1388 // other uses. 1389 const SCEV* Imm = UsersToProcess[i].Imm; 1390 const SCEV* Base = UsersToProcess[i].Base; 1391 const SCEV* Start = SE->getAddExpr(CommonExprs, Base, Imm); 1392 PHINode *Phi = InsertAffinePhi(Start, Stride, IVIncInsertPt, L, 1393 PreheaderRewriter); 1394 // Loop over all the users with the same base. 1395 do { 1396 UsersToProcess[i].Base = SE->getIntegerSCEV(0, Stride->getType()); 1397 UsersToProcess[i].Imm = SE->getMinusSCEV(UsersToProcess[i].Imm, Imm); 1398 UsersToProcess[i].Phi = Phi; 1399 assert(UsersToProcess[i].Imm->isLoopInvariant(L) && 1400 "ShouldUseFullStrengthReductionMode should reject this!"); 1401 } while (++i != e && Base == UsersToProcess[i].Base); 1402 } 1403} 1404 1405/// FindIVIncInsertPt - Return the location to insert the increment instruction. 1406/// If the only use if a use of postinc value, (must be the loop termination 1407/// condition), then insert it just before the use. 1408static Instruction *FindIVIncInsertPt(std::vector<BasedUser> &UsersToProcess, 1409 const Loop *L) { 1410 if (UsersToProcess.size() == 1 && 1411 UsersToProcess[0].isUseOfPostIncrementedValue && 1412 L->contains(UsersToProcess[0].Inst->getParent())) 1413 return UsersToProcess[0].Inst; 1414 return L->getLoopLatch()->getTerminator(); 1415} 1416 1417/// PrepareToStrengthReduceWithNewPhi - Insert a new induction variable for the 1418/// given users to share. 1419/// 1420void 1421LoopStrengthReduce::PrepareToStrengthReduceWithNewPhi( 1422 std::vector<BasedUser> &UsersToProcess, 1423 const SCEV* Stride, 1424 const SCEV* CommonExprs, 1425 Value *CommonBaseV, 1426 Instruction *IVIncInsertPt, 1427 const Loop *L, 1428 SCEVExpander &PreheaderRewriter) { 1429 DOUT << " Inserting new PHI:\n"; 1430 1431 PHINode *Phi = InsertAffinePhi(SE->getUnknown(CommonBaseV), 1432 Stride, IVIncInsertPt, L, 1433 PreheaderRewriter); 1434 1435 // Remember this in case a later stride is multiple of this. 1436 IVsByStride[Stride].addIV(Stride, CommonExprs, Phi); 1437 1438 // All the users will share this new IV. 1439 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) 1440 UsersToProcess[i].Phi = Phi; 1441 1442 DOUT << " IV="; 1443 DEBUG(WriteAsOperand(*DOUT, Phi, /*PrintType=*/false)); 1444 DOUT << "\n"; 1445} 1446 1447/// PrepareToStrengthReduceFromSmallerStride - Prepare for the given users to 1448/// reuse an induction variable with a stride that is a factor of the current 1449/// induction variable. 1450/// 1451void 1452LoopStrengthReduce::PrepareToStrengthReduceFromSmallerStride( 1453 std::vector<BasedUser> &UsersToProcess, 1454 Value *CommonBaseV, 1455 const IVExpr &ReuseIV, 1456 Instruction *PreInsertPt) { 1457 DOUT << " Rewriting in terms of existing IV of STRIDE " << *ReuseIV.Stride 1458 << " and BASE " << *ReuseIV.Base << "\n"; 1459 1460 // All the users will share the reused IV. 1461 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) 1462 UsersToProcess[i].Phi = ReuseIV.PHI; 1463 1464 Constant *C = dyn_cast<Constant>(CommonBaseV); 1465 if (C && 1466 (!C->isNullValue() && 1467 !fitsInAddressMode(SE->getUnknown(CommonBaseV), CommonBaseV->getType(), 1468 TLI, false))) 1469 // We want the common base emitted into the preheader! This is just 1470 // using cast as a copy so BitCast (no-op cast) is appropriate 1471 CommonBaseV = new BitCastInst(CommonBaseV, CommonBaseV->getType(), 1472 "commonbase", PreInsertPt); 1473} 1474 1475static bool IsImmFoldedIntoAddrMode(GlobalValue *GV, int64_t Offset, 1476 const Type *AccessTy, 1477 std::vector<BasedUser> &UsersToProcess, 1478 const TargetLowering *TLI) { 1479 SmallVector<Instruction*, 16> AddrModeInsts; 1480 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) { 1481 if (UsersToProcess[i].isUseOfPostIncrementedValue) 1482 continue; 1483 ExtAddrMode AddrMode = 1484 AddressingModeMatcher::Match(UsersToProcess[i].OperandValToReplace, 1485 AccessTy, UsersToProcess[i].Inst, 1486 AddrModeInsts, *TLI); 1487 if (GV && GV != AddrMode.BaseGV) 1488 return false; 1489 if (Offset && !AddrMode.BaseOffs) 1490 // FIXME: How to accurate check it's immediate offset is folded. 1491 return false; 1492 AddrModeInsts.clear(); 1493 } 1494 return true; 1495} 1496 1497/// StrengthReduceStridedIVUsers - Strength reduce all of the users of a single 1498/// stride of IV. All of the users may have different starting values, and this 1499/// may not be the only stride. 1500void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV* const &Stride, 1501 IVUsersOfOneStride &Uses, 1502 Loop *L) { 1503 // If all the users are moved to another stride, then there is nothing to do. 1504 if (Uses.Users.empty()) 1505 return; 1506 1507 // Keep track if every use in UsersToProcess is an address. If they all are, 1508 // we may be able to rewrite the entire collection of them in terms of a 1509 // smaller-stride IV. 1510 bool AllUsesAreAddresses = true; 1511 1512 // Keep track if every use of a single stride is outside the loop. If so, 1513 // we want to be more aggressive about reusing a smaller-stride IV; a 1514 // multiply outside the loop is better than another IV inside. Well, usually. 1515 bool AllUsesAreOutsideLoop = true; 1516 1517 // Transform our list of users and offsets to a bit more complex table. In 1518 // this new vector, each 'BasedUser' contains 'Base' the base of the 1519 // strided accessas well as the old information from Uses. We progressively 1520 // move information from the Base field to the Imm field, until we eventually 1521 // have the full access expression to rewrite the use. 1522 std::vector<BasedUser> UsersToProcess; 1523 const SCEV* CommonExprs = CollectIVUsers(Stride, Uses, L, AllUsesAreAddresses, 1524 AllUsesAreOutsideLoop, 1525 UsersToProcess); 1526 1527 // Sort the UsersToProcess array so that users with common bases are 1528 // next to each other. 1529 SortUsersToProcess(UsersToProcess); 1530 1531 // If we managed to find some expressions in common, we'll need to carry 1532 // their value in a register and add it in for each use. This will take up 1533 // a register operand, which potentially restricts what stride values are 1534 // valid. 1535 bool HaveCommonExprs = !CommonExprs->isZero(); 1536 const Type *ReplacedTy = CommonExprs->getType(); 1537 1538 // If all uses are addresses, consider sinking the immediate part of the 1539 // common expression back into uses if they can fit in the immediate fields. 1540 if (TLI && HaveCommonExprs && AllUsesAreAddresses) { 1541 const SCEV* NewCommon = CommonExprs; 1542 const SCEV* Imm = SE->getIntegerSCEV(0, ReplacedTy); 1543 MoveImmediateValues(TLI, Type::VoidTy, NewCommon, Imm, true, L, SE); 1544 if (!Imm->isZero()) { 1545 bool DoSink = true; 1546 1547 // If the immediate part of the common expression is a GV, check if it's 1548 // possible to fold it into the target addressing mode. 1549 GlobalValue *GV = 0; 1550 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(Imm)) 1551 GV = dyn_cast<GlobalValue>(SU->getValue()); 1552 int64_t Offset = 0; 1553 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Imm)) 1554 Offset = SC->getValue()->getSExtValue(); 1555 if (GV || Offset) 1556 // Pass VoidTy as the AccessTy to be conservative, because 1557 // there could be multiple access types among all the uses. 1558 DoSink = IsImmFoldedIntoAddrMode(GV, Offset, Type::VoidTy, 1559 UsersToProcess, TLI); 1560 1561 if (DoSink) { 1562 DOUT << " Sinking " << *Imm << " back down into uses\n"; 1563 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) 1564 UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm, Imm); 1565 CommonExprs = NewCommon; 1566 HaveCommonExprs = !CommonExprs->isZero(); 1567 ++NumImmSunk; 1568 } 1569 } 1570 } 1571 1572 // Now that we know what we need to do, insert the PHI node itself. 1573 // 1574 DOUT << "LSR: Examining IVs of TYPE " << *ReplacedTy << " of STRIDE " 1575 << *Stride << ":\n" 1576 << " Common base: " << *CommonExprs << "\n"; 1577 1578 SCEVExpander Rewriter(*SE); 1579 SCEVExpander PreheaderRewriter(*SE); 1580 1581 BasicBlock *Preheader = L->getLoopPreheader(); 1582 Instruction *PreInsertPt = Preheader->getTerminator(); 1583 BasicBlock *LatchBlock = L->getLoopLatch(); 1584 Instruction *IVIncInsertPt = LatchBlock->getTerminator(); 1585 1586 Value *CommonBaseV = Constant::getNullValue(ReplacedTy); 1587 1588 const SCEV* RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy); 1589 IVExpr ReuseIV(SE->getIntegerSCEV(0, Type::Int32Ty), 1590 SE->getIntegerSCEV(0, Type::Int32Ty), 1591 0); 1592 1593 /// Choose a strength-reduction strategy and prepare for it by creating 1594 /// the necessary PHIs and adjusting the bookkeeping. 1595 if (ShouldUseFullStrengthReductionMode(UsersToProcess, L, 1596 AllUsesAreAddresses, Stride)) { 1597 PrepareToStrengthReduceFully(UsersToProcess, Stride, CommonExprs, L, 1598 PreheaderRewriter); 1599 } else { 1600 // Emit the initial base value into the loop preheader. 1601 CommonBaseV = PreheaderRewriter.expandCodeFor(CommonExprs, ReplacedTy, 1602 PreInsertPt); 1603 1604 // If all uses are addresses, check if it is possible to reuse an IV. The 1605 // new IV must have a stride that is a multiple of the old stride; the 1606 // multiple must be a number that can be encoded in the scale field of the 1607 // target addressing mode; and we must have a valid instruction after this 1608 // substitution, including the immediate field, if any. 1609 RewriteFactor = CheckForIVReuse(HaveCommonExprs, AllUsesAreAddresses, 1610 AllUsesAreOutsideLoop, 1611 Stride, ReuseIV, ReplacedTy, 1612 UsersToProcess); 1613 if (!RewriteFactor->isZero()) 1614 PrepareToStrengthReduceFromSmallerStride(UsersToProcess, CommonBaseV, 1615 ReuseIV, PreInsertPt); 1616 else { 1617 IVIncInsertPt = FindIVIncInsertPt(UsersToProcess, L); 1618 PrepareToStrengthReduceWithNewPhi(UsersToProcess, Stride, CommonExprs, 1619 CommonBaseV, IVIncInsertPt, 1620 L, PreheaderRewriter); 1621 } 1622 } 1623 1624 // Process all the users now, replacing their strided uses with 1625 // strength-reduced forms. This outer loop handles all bases, the inner 1626 // loop handles all users of a particular base. 1627 while (!UsersToProcess.empty()) { 1628 const SCEV* Base = UsersToProcess.back().Base; 1629 Instruction *Inst = UsersToProcess.back().Inst; 1630 1631 // Emit the code for Base into the preheader. 1632 Value *BaseV = 0; 1633 if (!Base->isZero()) { 1634 BaseV = PreheaderRewriter.expandCodeFor(Base, 0, PreInsertPt); 1635 1636 DOUT << " INSERTING code for BASE = " << *Base << ":"; 1637 if (BaseV->hasName()) 1638 DOUT << " Result value name = %" << BaseV->getNameStr(); 1639 DOUT << "\n"; 1640 1641 // If BaseV is a non-zero constant, make sure that it gets inserted into 1642 // the preheader, instead of being forward substituted into the uses. We 1643 // do this by forcing a BitCast (noop cast) to be inserted into the 1644 // preheader in this case. 1645 if (!fitsInAddressMode(Base, getAccessType(Inst), TLI, false)) { 1646 // We want this constant emitted into the preheader! This is just 1647 // using cast as a copy so BitCast (no-op cast) is appropriate 1648 BaseV = new BitCastInst(BaseV, BaseV->getType(), "preheaderinsert", 1649 PreInsertPt); 1650 } 1651 } 1652 1653 // Emit the code to add the immediate offset to the Phi value, just before 1654 // the instructions that we identified as using this stride and base. 1655 do { 1656 // FIXME: Use emitted users to emit other users. 1657 BasedUser &User = UsersToProcess.back(); 1658 1659 DOUT << " Examining "; 1660 if (User.isUseOfPostIncrementedValue) 1661 DOUT << "postinc"; 1662 else 1663 DOUT << "preinc"; 1664 DOUT << " use "; 1665 DEBUG(WriteAsOperand(*DOUT, UsersToProcess.back().OperandValToReplace, 1666 /*PrintType=*/false)); 1667 DOUT << " in Inst: " << *(User.Inst); 1668 1669 // If this instruction wants to use the post-incremented value, move it 1670 // after the post-inc and use its value instead of the PHI. 1671 Value *RewriteOp = User.Phi; 1672 if (User.isUseOfPostIncrementedValue) { 1673 RewriteOp = User.Phi->getIncomingValueForBlock(LatchBlock); 1674 // If this user is in the loop, make sure it is the last thing in the 1675 // loop to ensure it is dominated by the increment. In case it's the 1676 // only use of the iv, the increment instruction is already before the 1677 // use. 1678 if (L->contains(User.Inst->getParent()) && User.Inst != IVIncInsertPt) 1679 User.Inst->moveBefore(IVIncInsertPt); 1680 } 1681 1682 const SCEV* RewriteExpr = SE->getUnknown(RewriteOp); 1683 1684 if (SE->getEffectiveSCEVType(RewriteOp->getType()) != 1685 SE->getEffectiveSCEVType(ReplacedTy)) { 1686 assert(SE->getTypeSizeInBits(RewriteOp->getType()) > 1687 SE->getTypeSizeInBits(ReplacedTy) && 1688 "Unexpected widening cast!"); 1689 RewriteExpr = SE->getTruncateExpr(RewriteExpr, ReplacedTy); 1690 } 1691 1692 // If we had to insert new instructions for RewriteOp, we have to 1693 // consider that they may not have been able to end up immediately 1694 // next to RewriteOp, because non-PHI instructions may never precede 1695 // PHI instructions in a block. In this case, remember where the last 1696 // instruction was inserted so that if we're replacing a different 1697 // PHI node, we can use the later point to expand the final 1698 // RewriteExpr. 1699 Instruction *NewBasePt = dyn_cast<Instruction>(RewriteOp); 1700 if (RewriteOp == User.Phi) NewBasePt = 0; 1701 1702 // Clear the SCEVExpander's expression map so that we are guaranteed 1703 // to have the code emitted where we expect it. 1704 Rewriter.clear(); 1705 1706 // If we are reusing the iv, then it must be multiplied by a constant 1707 // factor to take advantage of the addressing mode scale component. 1708 if (!RewriteFactor->isZero()) { 1709 // If we're reusing an IV with a nonzero base (currently this happens 1710 // only when all reuses are outside the loop) subtract that base here. 1711 // The base has been used to initialize the PHI node but we don't want 1712 // it here. 1713 if (!ReuseIV.Base->isZero()) { 1714 const SCEV* typedBase = ReuseIV.Base; 1715 if (SE->getEffectiveSCEVType(RewriteExpr->getType()) != 1716 SE->getEffectiveSCEVType(ReuseIV.Base->getType())) { 1717 // It's possible the original IV is a larger type than the new IV, 1718 // in which case we have to truncate the Base. We checked in 1719 // RequiresTypeConversion that this is valid. 1720 assert(SE->getTypeSizeInBits(RewriteExpr->getType()) < 1721 SE->getTypeSizeInBits(ReuseIV.Base->getType()) && 1722 "Unexpected lengthening conversion!"); 1723 typedBase = SE->getTruncateExpr(ReuseIV.Base, 1724 RewriteExpr->getType()); 1725 } 1726 RewriteExpr = SE->getMinusSCEV(RewriteExpr, typedBase); 1727 } 1728 1729 // Multiply old variable, with base removed, by new scale factor. 1730 RewriteExpr = SE->getMulExpr(RewriteFactor, 1731 RewriteExpr); 1732 1733 // The common base is emitted in the loop preheader. But since we 1734 // are reusing an IV, it has not been used to initialize the PHI node. 1735 // Add it to the expression used to rewrite the uses. 1736 // When this use is outside the loop, we earlier subtracted the 1737 // common base, and are adding it back here. Use the same expression 1738 // as before, rather than CommonBaseV, so DAGCombiner will zap it. 1739 if (!CommonExprs->isZero()) { 1740 if (L->contains(User.Inst->getParent())) 1741 RewriteExpr = SE->getAddExpr(RewriteExpr, 1742 SE->getUnknown(CommonBaseV)); 1743 else 1744 RewriteExpr = SE->getAddExpr(RewriteExpr, CommonExprs); 1745 } 1746 } 1747 1748 // Now that we know what we need to do, insert code before User for the 1749 // immediate and any loop-variant expressions. 1750 if (BaseV) 1751 // Add BaseV to the PHI value if needed. 1752 RewriteExpr = SE->getAddExpr(RewriteExpr, SE->getUnknown(BaseV)); 1753 1754 User.RewriteInstructionToUseNewBase(RewriteExpr, NewBasePt, 1755 Rewriter, L, this, *LI, 1756 DeadInsts); 1757 1758 // Mark old value we replaced as possibly dead, so that it is eliminated 1759 // if we just replaced the last use of that value. 1760 DeadInsts.push_back(User.OperandValToReplace); 1761 1762 UsersToProcess.pop_back(); 1763 ++NumReduced; 1764 1765 // If there are any more users to process with the same base, process them 1766 // now. We sorted by base above, so we just have to check the last elt. 1767 } while (!UsersToProcess.empty() && UsersToProcess.back().Base == Base); 1768 // TODO: Next, find out which base index is the most common, pull it out. 1769 } 1770 1771 // IMPORTANT TODO: Figure out how to partition the IV's with this stride, but 1772 // different starting values, into different PHIs. 1773} 1774 1775/// FindIVUserForCond - If Cond has an operand that is an expression of an IV, 1776/// set the IV user and stride information and return true, otherwise return 1777/// false. 1778bool LoopStrengthReduce::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse, 1779 const SCEV* const * &CondStride) { 1780 for (unsigned Stride = 0, e = IU->StrideOrder.size(); 1781 Stride != e && !CondUse; ++Stride) { 1782 std::map<const SCEV*, IVUsersOfOneStride *>::iterator SI = 1783 IU->IVUsesByStride.find(IU->StrideOrder[Stride]); 1784 assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!"); 1785 1786 for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(), 1787 E = SI->second->Users.end(); UI != E; ++UI) 1788 if (UI->getUser() == Cond) { 1789 // NOTE: we could handle setcc instructions with multiple uses here, but 1790 // InstCombine does it as well for simple uses, it's not clear that it 1791 // occurs enough in real life to handle. 1792 CondUse = UI; 1793 CondStride = &SI->first; 1794 return true; 1795 } 1796 } 1797 return false; 1798} 1799 1800namespace { 1801 // Constant strides come first which in turns are sorted by their absolute 1802 // values. If absolute values are the same, then positive strides comes first. 1803 // e.g. 1804 // 4, -1, X, 1, 2 ==> 1, -1, 2, 4, X 1805 struct StrideCompare { 1806 const ScalarEvolution *SE; 1807 explicit StrideCompare(const ScalarEvolution *se) : SE(se) {} 1808 1809 bool operator()(const SCEV* const &LHS, const SCEV* const &RHS) { 1810 const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS); 1811 const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS); 1812 if (LHSC && RHSC) { 1813 int64_t LV = LHSC->getValue()->getSExtValue(); 1814 int64_t RV = RHSC->getValue()->getSExtValue(); 1815 uint64_t ALV = (LV < 0) ? -LV : LV; 1816 uint64_t ARV = (RV < 0) ? -RV : RV; 1817 if (ALV == ARV) { 1818 if (LV != RV) 1819 return LV > RV; 1820 } else { 1821 return ALV < ARV; 1822 } 1823 1824 // If it's the same value but different type, sort by bit width so 1825 // that we emit larger induction variables before smaller 1826 // ones, letting the smaller be re-written in terms of larger ones. 1827 return SE->getTypeSizeInBits(RHS->getType()) < 1828 SE->getTypeSizeInBits(LHS->getType()); 1829 } 1830 return LHSC && !RHSC; 1831 } 1832 }; 1833} 1834 1835/// ChangeCompareStride - If a loop termination compare instruction is the 1836/// only use of its stride, and the compaison is against a constant value, 1837/// try eliminate the stride by moving the compare instruction to another 1838/// stride and change its constant operand accordingly. e.g. 1839/// 1840/// loop: 1841/// ... 1842/// v1 = v1 + 3 1843/// v2 = v2 + 1 1844/// if (v2 < 10) goto loop 1845/// => 1846/// loop: 1847/// ... 1848/// v1 = v1 + 3 1849/// if (v1 < 30) goto loop 1850ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, 1851 IVStrideUse* &CondUse, 1852 const SCEV* const* &CondStride) { 1853 // If there's only one stride in the loop, there's nothing to do here. 1854 if (IU->StrideOrder.size() < 2) 1855 return Cond; 1856 // If there are other users of the condition's stride, don't bother 1857 // trying to change the condition because the stride will still 1858 // remain. 1859 std::map<const SCEV*, IVUsersOfOneStride *>::iterator I = 1860 IU->IVUsesByStride.find(*CondStride); 1861 if (I == IU->IVUsesByStride.end() || 1862 I->second->Users.size() != 1) 1863 return Cond; 1864 // Only handle constant strides for now. 1865 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride); 1866 if (!SC) return Cond; 1867 1868 ICmpInst::Predicate Predicate = Cond->getPredicate(); 1869 int64_t CmpSSInt = SC->getValue()->getSExtValue(); 1870 unsigned BitWidth = SE->getTypeSizeInBits((*CondStride)->getType()); 1871 uint64_t SignBit = 1ULL << (BitWidth-1); 1872 const Type *CmpTy = Cond->getOperand(0)->getType(); 1873 const Type *NewCmpTy = NULL; 1874 unsigned TyBits = SE->getTypeSizeInBits(CmpTy); 1875 unsigned NewTyBits = 0; 1876 const SCEV* *NewStride = NULL; 1877 Value *NewCmpLHS = NULL; 1878 Value *NewCmpRHS = NULL; 1879 int64_t Scale = 1; 1880 const SCEV* NewOffset = SE->getIntegerSCEV(0, CmpTy); 1881 1882 if (ConstantInt *C = dyn_cast<ConstantInt>(Cond->getOperand(1))) { 1883 int64_t CmpVal = C->getValue().getSExtValue(); 1884 1885 // Check stride constant and the comparision constant signs to detect 1886 // overflow. 1887 if ((CmpVal & SignBit) != (CmpSSInt & SignBit)) 1888 return Cond; 1889 1890 // Look for a suitable stride / iv as replacement. 1891 for (unsigned i = 0, e = IU->StrideOrder.size(); i != e; ++i) { 1892 std::map<const SCEV*, IVUsersOfOneStride *>::iterator SI = 1893 IU->IVUsesByStride.find(IU->StrideOrder[i]); 1894 if (!isa<SCEVConstant>(SI->first)) 1895 continue; 1896 int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue(); 1897 if (SSInt == CmpSSInt || 1898 abs64(SSInt) < abs64(CmpSSInt) || 1899 (SSInt % CmpSSInt) != 0) 1900 continue; 1901 1902 Scale = SSInt / CmpSSInt; 1903 int64_t NewCmpVal = CmpVal * Scale; 1904 APInt Mul = APInt(BitWidth*2, CmpVal, true); 1905 Mul = Mul * APInt(BitWidth*2, Scale, true); 1906 // Check for overflow. 1907 if (!Mul.isSignedIntN(BitWidth)) 1908 continue; 1909 // Check for overflow in the stride's type too. 1910 if (!Mul.isSignedIntN(SE->getTypeSizeInBits(SI->first->getType()))) 1911 continue; 1912 1913 // Watch out for overflow. 1914 if (ICmpInst::isSignedPredicate(Predicate) && 1915 (CmpVal & SignBit) != (NewCmpVal & SignBit)) 1916 continue; 1917 1918 if (NewCmpVal == CmpVal) 1919 continue; 1920 // Pick the best iv to use trying to avoid a cast. 1921 NewCmpLHS = NULL; 1922 for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(), 1923 E = SI->second->Users.end(); UI != E; ++UI) { 1924 Value *Op = UI->getOperandValToReplace(); 1925 1926 // If the IVStrideUse implies a cast, check for an actual cast which 1927 // can be used to find the original IV expression. 1928 if (SE->getEffectiveSCEVType(Op->getType()) != 1929 SE->getEffectiveSCEVType(SI->first->getType())) { 1930 CastInst *CI = dyn_cast<CastInst>(Op); 1931 // If it's not a simple cast, it's complicated. 1932 if (!CI) 1933 continue; 1934 // If it's a cast from a type other than the stride type, 1935 // it's complicated. 1936 if (CI->getOperand(0)->getType() != SI->first->getType()) 1937 continue; 1938 // Ok, we found the IV expression in the stride's type. 1939 Op = CI->getOperand(0); 1940 } 1941 1942 NewCmpLHS = Op; 1943 if (NewCmpLHS->getType() == CmpTy) 1944 break; 1945 } 1946 if (!NewCmpLHS) 1947 continue; 1948 1949 NewCmpTy = NewCmpLHS->getType(); 1950 NewTyBits = SE->getTypeSizeInBits(NewCmpTy); 1951 const Type *NewCmpIntTy = IntegerType::get(NewTyBits); 1952 if (RequiresTypeConversion(NewCmpTy, CmpTy)) { 1953 // Check if it is possible to rewrite it using 1954 // an iv / stride of a smaller integer type. 1955 unsigned Bits = NewTyBits; 1956 if (ICmpInst::isSignedPredicate(Predicate)) 1957 --Bits; 1958 uint64_t Mask = (1ULL << Bits) - 1; 1959 if (((uint64_t)NewCmpVal & Mask) != (uint64_t)NewCmpVal) 1960 continue; 1961 } 1962 1963 // Don't rewrite if use offset is non-constant and the new type is 1964 // of a different type. 1965 // FIXME: too conservative? 1966 if (NewTyBits != TyBits && !isa<SCEVConstant>(CondUse->getOffset())) 1967 continue; 1968 1969 bool AllUsesAreAddresses = true; 1970 bool AllUsesAreOutsideLoop = true; 1971 std::vector<BasedUser> UsersToProcess; 1972 const SCEV* CommonExprs = CollectIVUsers(SI->first, *SI->second, L, 1973 AllUsesAreAddresses, 1974 AllUsesAreOutsideLoop, 1975 UsersToProcess); 1976 // Avoid rewriting the compare instruction with an iv of new stride 1977 // if it's likely the new stride uses will be rewritten using the 1978 // stride of the compare instruction. 1979 if (AllUsesAreAddresses && 1980 ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess)) 1981 continue; 1982 1983 // Avoid rewriting the compare instruction with an iv which has 1984 // implicit extension or truncation built into it. 1985 // TODO: This is over-conservative. 1986 if (SE->getTypeSizeInBits(CondUse->getOffset()->getType()) != TyBits) 1987 continue; 1988 1989 // If scale is negative, use swapped predicate unless it's testing 1990 // for equality. 1991 if (Scale < 0 && !Cond->isEquality()) 1992 Predicate = ICmpInst::getSwappedPredicate(Predicate); 1993 1994 NewStride = &IU->StrideOrder[i]; 1995 if (!isa<PointerType>(NewCmpTy)) 1996 NewCmpRHS = ConstantInt::get(NewCmpTy, NewCmpVal); 1997 else { 1998 Constant *CI = ConstantInt::get(NewCmpIntTy, NewCmpVal); 1999 NewCmpRHS = ConstantExpr::getIntToPtr(CI, NewCmpTy); 2000 } 2001 NewOffset = TyBits == NewTyBits 2002 ? SE->getMulExpr(CondUse->getOffset(), 2003 SE->getConstant(CmpTy, Scale)) 2004 : SE->getConstant(NewCmpIntTy, 2005 cast<SCEVConstant>(CondUse->getOffset())->getValue() 2006 ->getSExtValue()*Scale); 2007 break; 2008 } 2009 } 2010 2011 // Forgo this transformation if it the increment happens to be 2012 // unfortunately positioned after the condition, and the condition 2013 // has multiple uses which prevent it from being moved immediately 2014 // before the branch. See 2015 // test/Transforms/LoopStrengthReduce/change-compare-stride-trickiness-*.ll 2016 // for an example of this situation. 2017 if (!Cond->hasOneUse()) { 2018 for (BasicBlock::iterator I = Cond, E = Cond->getParent()->end(); 2019 I != E; ++I) 2020 if (I == NewCmpLHS) 2021 return Cond; 2022 } 2023 2024 if (NewCmpRHS) { 2025 // Create a new compare instruction using new stride / iv. 2026 ICmpInst *OldCond = Cond; 2027 // Insert new compare instruction. 2028 Cond = new ICmpInst(Predicate, NewCmpLHS, NewCmpRHS, 2029 L->getHeader()->getName() + ".termcond", 2030 OldCond); 2031 2032 // Remove the old compare instruction. The old indvar is probably dead too. 2033 DeadInsts.push_back(CondUse->getOperandValToReplace()); 2034 OldCond->replaceAllUsesWith(Cond); 2035 OldCond->eraseFromParent(); 2036 2037 IU->IVUsesByStride[*NewStride]->addUser(NewOffset, Cond, NewCmpLHS); 2038 CondUse = &IU->IVUsesByStride[*NewStride]->Users.back(); 2039 CondStride = NewStride; 2040 ++NumEliminated; 2041 Changed = true; 2042 } 2043 2044 return Cond; 2045} 2046 2047/// OptimizeMax - Rewrite the loop's terminating condition if it uses 2048/// a max computation. 2049/// 2050/// This is a narrow solution to a specific, but acute, problem. For loops 2051/// like this: 2052/// 2053/// i = 0; 2054/// do { 2055/// p[i] = 0.0; 2056/// } while (++i < n); 2057/// 2058/// the trip count isn't just 'n', because 'n' might not be positive. And 2059/// unfortunately this can come up even for loops where the user didn't use 2060/// a C do-while loop. For example, seemingly well-behaved top-test loops 2061/// will commonly be lowered like this: 2062// 2063/// if (n > 0) { 2064/// i = 0; 2065/// do { 2066/// p[i] = 0.0; 2067/// } while (++i < n); 2068/// } 2069/// 2070/// and then it's possible for subsequent optimization to obscure the if 2071/// test in such a way that indvars can't find it. 2072/// 2073/// When indvars can't find the if test in loops like this, it creates a 2074/// max expression, which allows it to give the loop a canonical 2075/// induction variable: 2076/// 2077/// i = 0; 2078/// max = n < 1 ? 1 : n; 2079/// do { 2080/// p[i] = 0.0; 2081/// } while (++i != max); 2082/// 2083/// Canonical induction variables are necessary because the loop passes 2084/// are designed around them. The most obvious example of this is the 2085/// LoopInfo analysis, which doesn't remember trip count values. It 2086/// expects to be able to rediscover the trip count each time it is 2087/// needed, and it does this using a simple analyis that only succeeds if 2088/// the loop has a canonical induction variable. 2089/// 2090/// However, when it comes time to generate code, the maximum operation 2091/// can be quite costly, especially if it's inside of an outer loop. 2092/// 2093/// This function solves this problem by detecting this type of loop and 2094/// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting 2095/// the instructions for the maximum computation. 2096/// 2097ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond, 2098 IVStrideUse* &CondUse) { 2099 // Check that the loop matches the pattern we're looking for. 2100 if (Cond->getPredicate() != CmpInst::ICMP_EQ && 2101 Cond->getPredicate() != CmpInst::ICMP_NE) 2102 return Cond; 2103 2104 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1)); 2105 if (!Sel || !Sel->hasOneUse()) return Cond; 2106 2107 const SCEV* BackedgeTakenCount = SE->getBackedgeTakenCount(L); 2108 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 2109 return Cond; 2110 const SCEV* One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType()); 2111 2112 // Add one to the backedge-taken count to get the trip count. 2113 const SCEV* IterationCount = SE->getAddExpr(BackedgeTakenCount, One); 2114 2115 // Check for a max calculation that matches the pattern. 2116 if (!isa<SCEVSMaxExpr>(IterationCount) && !isa<SCEVUMaxExpr>(IterationCount)) 2117 return Cond; 2118 const SCEVNAryExpr *Max = cast<SCEVNAryExpr>(IterationCount); 2119 if (Max != SE->getSCEV(Sel)) return Cond; 2120 2121 // To handle a max with more than two operands, this optimization would 2122 // require additional checking and setup. 2123 if (Max->getNumOperands() != 2) 2124 return Cond; 2125 2126 const SCEV* MaxLHS = Max->getOperand(0); 2127 const SCEV* MaxRHS = Max->getOperand(1); 2128 if (!MaxLHS || MaxLHS != One) return Cond; 2129 2130 // Check the relevant induction variable for conformance to 2131 // the pattern. 2132 const SCEV* IV = SE->getSCEV(Cond->getOperand(0)); 2133 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); 2134 if (!AR || !AR->isAffine() || 2135 AR->getStart() != One || 2136 AR->getStepRecurrence(*SE) != One) 2137 return Cond; 2138 2139 assert(AR->getLoop() == L && 2140 "Loop condition operand is an addrec in a different loop!"); 2141 2142 // Check the right operand of the select, and remember it, as it will 2143 // be used in the new comparison instruction. 2144 Value *NewRHS = 0; 2145 if (SE->getSCEV(Sel->getOperand(1)) == MaxRHS) 2146 NewRHS = Sel->getOperand(1); 2147 else if (SE->getSCEV(Sel->getOperand(2)) == MaxRHS) 2148 NewRHS = Sel->getOperand(2); 2149 if (!NewRHS) return Cond; 2150 2151 // Determine the new comparison opcode. It may be signed or unsigned, 2152 // and the original comparison may be either equality or inequality. 2153 CmpInst::Predicate Pred = 2154 isa<SCEVSMaxExpr>(Max) ? CmpInst::ICMP_SLT : CmpInst::ICMP_ULT; 2155 if (Cond->getPredicate() == CmpInst::ICMP_EQ) 2156 Pred = CmpInst::getInversePredicate(Pred); 2157 2158 // Ok, everything looks ok to change the condition into an SLT or SGE and 2159 // delete the max calculation. 2160 ICmpInst *NewCond = 2161 new ICmpInst(Pred, Cond->getOperand(0), NewRHS, "scmp", Cond); 2162 2163 // Delete the max calculation instructions. 2164 Cond->replaceAllUsesWith(NewCond); 2165 CondUse->setUser(NewCond); 2166 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0)); 2167 Cond->eraseFromParent(); 2168 Sel->eraseFromParent(); 2169 if (Cmp->use_empty()) 2170 Cmp->eraseFromParent(); 2171 return NewCond; 2172} 2173 2174/// OptimizeShadowIV - If IV is used in a int-to-float cast 2175/// inside the loop then try to eliminate the cast opeation. 2176void LoopStrengthReduce::OptimizeShadowIV(Loop *L) { 2177 2178 const SCEV* BackedgeTakenCount = SE->getBackedgeTakenCount(L); 2179 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 2180 return; 2181 2182 for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e; 2183 ++Stride) { 2184 std::map<const SCEV*, IVUsersOfOneStride *>::iterator SI = 2185 IU->IVUsesByStride.find(IU->StrideOrder[Stride]); 2186 assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!"); 2187 if (!isa<SCEVConstant>(SI->first)) 2188 continue; 2189 2190 for (ilist<IVStrideUse>::iterator UI = SI->second->Users.begin(), 2191 E = SI->second->Users.end(); UI != E; /* empty */) { 2192 ilist<IVStrideUse>::iterator CandidateUI = UI; 2193 ++UI; 2194 Instruction *ShadowUse = CandidateUI->getUser(); 2195 const Type *DestTy = NULL; 2196 2197 /* If shadow use is a int->float cast then insert a second IV 2198 to eliminate this cast. 2199 2200 for (unsigned i = 0; i < n; ++i) 2201 foo((double)i); 2202 2203 is transformed into 2204 2205 double d = 0.0; 2206 for (unsigned i = 0; i < n; ++i, ++d) 2207 foo(d); 2208 */ 2209 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) 2210 DestTy = UCast->getDestTy(); 2211 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) 2212 DestTy = SCast->getDestTy(); 2213 if (!DestTy) continue; 2214 2215 if (TLI) { 2216 // If target does not support DestTy natively then do not apply 2217 // this transformation. 2218 MVT DVT = TLI->getValueType(DestTy); 2219 if (!TLI->isTypeLegal(DVT)) continue; 2220 } 2221 2222 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0)); 2223 if (!PH) continue; 2224 if (PH->getNumIncomingValues() != 2) continue; 2225 2226 const Type *SrcTy = PH->getType(); 2227 int Mantissa = DestTy->getFPMantissaWidth(); 2228 if (Mantissa == -1) continue; 2229 if ((int)SE->getTypeSizeInBits(SrcTy) > Mantissa) 2230 continue; 2231 2232 unsigned Entry, Latch; 2233 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) { 2234 Entry = 0; 2235 Latch = 1; 2236 } else { 2237 Entry = 1; 2238 Latch = 0; 2239 } 2240 2241 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry)); 2242 if (!Init) continue; 2243 Constant *NewInit = ConstantFP::get(DestTy, Init->getZExtValue()); 2244 2245 BinaryOperator *Incr = 2246 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch)); 2247 if (!Incr) continue; 2248 if (Incr->getOpcode() != Instruction::Add 2249 && Incr->getOpcode() != Instruction::Sub) 2250 continue; 2251 2252 /* Initialize new IV, double d = 0.0 in above example. */ 2253 ConstantInt *C = NULL; 2254 if (Incr->getOperand(0) == PH) 2255 C = dyn_cast<ConstantInt>(Incr->getOperand(1)); 2256 else if (Incr->getOperand(1) == PH) 2257 C = dyn_cast<ConstantInt>(Incr->getOperand(0)); 2258 else 2259 continue; 2260 2261 if (!C) continue; 2262 2263 /* Add new PHINode. */ 2264 PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH); 2265 2266 /* create new increment. '++d' in above example. */ 2267 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue()); 2268 BinaryOperator *NewIncr = 2269 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ? 2270 Instruction::FAdd : Instruction::FSub, 2271 NewPH, CFP, "IV.S.next.", Incr); 2272 2273 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry)); 2274 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch)); 2275 2276 /* Remove cast operation */ 2277 ShadowUse->replaceAllUsesWith(NewPH); 2278 ShadowUse->eraseFromParent(); 2279 NumShadow++; 2280 break; 2281 } 2282 } 2283} 2284 2285/// OptimizeIndvars - Now that IVUsesByStride is set up with all of the indvar 2286/// uses in the loop, look to see if we can eliminate some, in favor of using 2287/// common indvars for the different uses. 2288void LoopStrengthReduce::OptimizeIndvars(Loop *L) { 2289 // TODO: implement optzns here. 2290 2291 OptimizeShadowIV(L); 2292} 2293 2294/// OptimizeLoopTermCond - Change loop terminating condition to use the 2295/// postinc iv when possible. 2296void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) { 2297 // Finally, get the terminating condition for the loop if possible. If we 2298 // can, we want to change it to use a post-incremented version of its 2299 // induction variable, to allow coalescing the live ranges for the IV into 2300 // one register value. 2301 BasicBlock *LatchBlock = L->getLoopLatch(); 2302 BasicBlock *ExitingBlock = L->getExitingBlock(); 2303 if (!ExitingBlock) 2304 // Multiple exits, just look at the exit in the latch block if there is one. 2305 ExitingBlock = LatchBlock; 2306 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 2307 if (!TermBr) 2308 return; 2309 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition())) 2310 return; 2311 2312 // Search IVUsesByStride to find Cond's IVUse if there is one. 2313 IVStrideUse *CondUse = 0; 2314 const SCEV* const *CondStride = 0; 2315 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); 2316 if (!FindIVUserForCond(Cond, CondUse, CondStride)) 2317 return; // setcc doesn't use the IV. 2318 2319 if (ExitingBlock != LatchBlock) { 2320 if (!Cond->hasOneUse()) 2321 // See below, we don't want the condition to be cloned. 2322 return; 2323 2324 // If exiting block is the latch block, we know it's safe and profitable to 2325 // transform the icmp to use post-inc iv. Otherwise do so only if it would 2326 // not reuse another iv and its iv would be reused by other uses. We are 2327 // optimizing for the case where the icmp is the only use of the iv. 2328 IVUsersOfOneStride &StrideUses = *IU->IVUsesByStride[*CondStride]; 2329 for (ilist<IVStrideUse>::iterator I = StrideUses.Users.begin(), 2330 E = StrideUses.Users.end(); I != E; ++I) { 2331 if (I->getUser() == Cond) 2332 continue; 2333 if (!I->isUseOfPostIncrementedValue()) 2334 return; 2335 } 2336 2337 // FIXME: This is expensive, and worse still ChangeCompareStride does a 2338 // similar check. Can we perform all the icmp related transformations after 2339 // StrengthReduceStridedIVUsers? 2340 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride)) { 2341 int64_t SInt = SC->getValue()->getSExtValue(); 2342 for (unsigned NewStride = 0, ee = IU->StrideOrder.size(); NewStride != ee; 2343 ++NewStride) { 2344 std::map<const SCEV*, IVUsersOfOneStride *>::iterator SI = 2345 IU->IVUsesByStride.find(IU->StrideOrder[NewStride]); 2346 if (!isa<SCEVConstant>(SI->first) || SI->first == *CondStride) 2347 continue; 2348 int64_t SSInt = 2349 cast<SCEVConstant>(SI->first)->getValue()->getSExtValue(); 2350 if (SSInt == SInt) 2351 return; // This can definitely be reused. 2352 if (unsigned(abs64(SSInt)) < SInt || (SSInt % SInt) != 0) 2353 continue; 2354 int64_t Scale = SSInt / SInt; 2355 bool AllUsesAreAddresses = true; 2356 bool AllUsesAreOutsideLoop = true; 2357 std::vector<BasedUser> UsersToProcess; 2358 const SCEV* CommonExprs = CollectIVUsers(SI->first, *SI->second, L, 2359 AllUsesAreAddresses, 2360 AllUsesAreOutsideLoop, 2361 UsersToProcess); 2362 // Avoid rewriting the compare instruction with an iv of new stride 2363 // if it's likely the new stride uses will be rewritten using the 2364 // stride of the compare instruction. 2365 if (AllUsesAreAddresses && 2366 ValidScale(!CommonExprs->isZero(), Scale, UsersToProcess)) 2367 return; 2368 } 2369 } 2370 2371 StrideNoReuse.insert(*CondStride); 2372 } 2373 2374 // If the trip count is computed in terms of a max (due to ScalarEvolution 2375 // being unable to find a sufficient guard, for example), change the loop 2376 // comparison to use SLT or ULT instead of NE. 2377 Cond = OptimizeMax(L, Cond, CondUse); 2378 2379 // If possible, change stride and operands of the compare instruction to 2380 // eliminate one stride. 2381 if (ExitingBlock == LatchBlock) 2382 Cond = ChangeCompareStride(L, Cond, CondUse, CondStride); 2383 2384 // It's possible for the setcc instruction to be anywhere in the loop, and 2385 // possible for it to have multiple users. If it is not immediately before 2386 // the latch block branch, move it. 2387 if (&*++BasicBlock::iterator(Cond) != (Instruction*)TermBr) { 2388 if (Cond->hasOneUse()) { // Condition has a single use, just move it. 2389 Cond->moveBefore(TermBr); 2390 } else { 2391 // Otherwise, clone the terminating condition and insert into the loopend. 2392 Cond = cast<ICmpInst>(Cond->clone()); 2393 Cond->setName(L->getHeader()->getName() + ".termcond"); 2394 LatchBlock->getInstList().insert(TermBr, Cond); 2395 2396 // Clone the IVUse, as the old use still exists! 2397 IU->IVUsesByStride[*CondStride]->addUser(CondUse->getOffset(), Cond, 2398 CondUse->getOperandValToReplace()); 2399 CondUse = &IU->IVUsesByStride[*CondStride]->Users.back(); 2400 } 2401 } 2402 2403 // If we get to here, we know that we can transform the setcc instruction to 2404 // use the post-incremented version of the IV, allowing us to coalesce the 2405 // live ranges for the IV correctly. 2406 CondUse->setOffset(SE->getMinusSCEV(CondUse->getOffset(), *CondStride)); 2407 CondUse->setIsUseOfPostIncrementedValue(true); 2408 Changed = true; 2409 2410 ++NumLoopCond; 2411} 2412 2413/// OptimizeLoopCountIV - If, after all sharing of IVs, the IV used for deciding 2414/// when to exit the loop is used only for that purpose, try to rearrange things 2415/// so it counts down to a test against zero. 2416void LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) { 2417 2418 // If the number of times the loop is executed isn't computable, give up. 2419 const SCEV* BackedgeTakenCount = SE->getBackedgeTakenCount(L); 2420 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 2421 return; 2422 2423 // Get the terminating condition for the loop if possible (this isn't 2424 // necessarily in the latch, or a block that's a predecessor of the header). 2425 if (!L->getExitBlock()) 2426 return; // More than one loop exit blocks. 2427 2428 // Okay, there is one exit block. Try to find the condition that causes the 2429 // loop to be exited. 2430 BasicBlock *ExitingBlock = L->getExitingBlock(); 2431 if (!ExitingBlock) 2432 return; // More than one block exiting! 2433 2434 // Okay, we've computed the exiting block. See what condition causes us to 2435 // exit. 2436 // 2437 // FIXME: we should be able to handle switch instructions (with a single exit) 2438 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 2439 if (TermBr == 0) return; 2440 assert(TermBr->isConditional() && "If unconditional, it can't be in loop!"); 2441 if (!isa<ICmpInst>(TermBr->getCondition())) 2442 return; 2443 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); 2444 2445 // Handle only tests for equality for the moment, and only stride 1. 2446 if (Cond->getPredicate() != CmpInst::ICMP_EQ) 2447 return; 2448 const SCEV* IV = SE->getSCEV(Cond->getOperand(0)); 2449 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); 2450 const SCEV* One = SE->getIntegerSCEV(1, BackedgeTakenCount->getType()); 2451 if (!AR || !AR->isAffine() || AR->getStepRecurrence(*SE) != One) 2452 return; 2453 // If the RHS of the comparison is defined inside the loop, the rewrite 2454 // cannot be done. 2455 if (Instruction *CR = dyn_cast<Instruction>(Cond->getOperand(1))) 2456 if (L->contains(CR->getParent())) 2457 return; 2458 2459 // Make sure the IV is only used for counting. Value may be preinc or 2460 // postinc; 2 uses in either case. 2461 if (!Cond->getOperand(0)->hasNUses(2)) 2462 return; 2463 PHINode *phi = dyn_cast<PHINode>(Cond->getOperand(0)); 2464 Instruction *incr; 2465 if (phi && phi->getParent()==L->getHeader()) { 2466 // value tested is preinc. Find the increment. 2467 // A CmpInst is not a BinaryOperator; we depend on this. 2468 Instruction::use_iterator UI = phi->use_begin(); 2469 incr = dyn_cast<BinaryOperator>(UI); 2470 if (!incr) 2471 incr = dyn_cast<BinaryOperator>(++UI); 2472 // 1 use for postinc value, the phi. Unnecessarily conservative? 2473 if (!incr || !incr->hasOneUse() || incr->getOpcode()!=Instruction::Add) 2474 return; 2475 } else { 2476 // Value tested is postinc. Find the phi node. 2477 incr = dyn_cast<BinaryOperator>(Cond->getOperand(0)); 2478 if (!incr || incr->getOpcode()!=Instruction::Add) 2479 return; 2480 2481 Instruction::use_iterator UI = Cond->getOperand(0)->use_begin(); 2482 phi = dyn_cast<PHINode>(UI); 2483 if (!phi) 2484 phi = dyn_cast<PHINode>(++UI); 2485 // 1 use for preinc value, the increment. 2486 if (!phi || phi->getParent()!=L->getHeader() || !phi->hasOneUse()) 2487 return; 2488 } 2489 2490 // Replace the increment with a decrement. 2491 BinaryOperator *decr = 2492 BinaryOperator::Create(Instruction::Sub, incr->getOperand(0), 2493 incr->getOperand(1), "tmp", incr); 2494 incr->replaceAllUsesWith(decr); 2495 incr->eraseFromParent(); 2496 2497 // Substitute endval-startval for the original startval, and 0 for the 2498 // original endval. Since we're only testing for equality this is OK even 2499 // if the computation wraps around. 2500 BasicBlock *Preheader = L->getLoopPreheader(); 2501 Instruction *PreInsertPt = Preheader->getTerminator(); 2502 int inBlock = L->contains(phi->getIncomingBlock(0)) ? 1 : 0; 2503 Value *startVal = phi->getIncomingValue(inBlock); 2504 Value *endVal = Cond->getOperand(1); 2505 // FIXME check for case where both are constant 2506 Constant* Zero = ConstantInt::get(Cond->getOperand(1)->getType(), 0); 2507 BinaryOperator *NewStartVal = 2508 BinaryOperator::Create(Instruction::Sub, endVal, startVal, 2509 "tmp", PreInsertPt); 2510 phi->setIncomingValue(inBlock, NewStartVal); 2511 Cond->setOperand(1, Zero); 2512 2513 Changed = true; 2514} 2515 2516bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) { 2517 2518 IU = &getAnalysis<IVUsers>(); 2519 LI = &getAnalysis<LoopInfo>(); 2520 DT = &getAnalysis<DominatorTree>(); 2521 SE = &getAnalysis<ScalarEvolution>(); 2522 Changed = false; 2523 2524 if (!IU->IVUsesByStride.empty()) { 2525#ifndef NDEBUG 2526 DOUT << "\nLSR on \"" << L->getHeader()->getParent()->getNameStart() 2527 << "\" "; 2528 DEBUG(L->dump()); 2529#endif 2530 2531 // Sort the StrideOrder so we process larger strides first. 2532 std::stable_sort(IU->StrideOrder.begin(), IU->StrideOrder.end(), 2533 StrideCompare(SE)); 2534 2535 // Optimize induction variables. Some indvar uses can be transformed to use 2536 // strides that will be needed for other purposes. A common example of this 2537 // is the exit test for the loop, which can often be rewritten to use the 2538 // computation of some other indvar to decide when to terminate the loop. 2539 OptimizeIndvars(L); 2540 2541 // Change loop terminating condition to use the postinc iv when possible 2542 // and optimize loop terminating compare. FIXME: Move this after 2543 // StrengthReduceStridedIVUsers? 2544 OptimizeLoopTermCond(L); 2545 2546 // FIXME: We can shrink overlarge IV's here. e.g. if the code has 2547 // computation in i64 values and the target doesn't support i64, demote 2548 // the computation to 32-bit if safe. 2549 2550 // FIXME: Attempt to reuse values across multiple IV's. In particular, we 2551 // could have something like "for(i) { foo(i*8); bar(i*16) }", which should 2552 // be codegened as "for (j = 0;; j+=8) { foo(j); bar(j+j); }" on X86/PPC. 2553 // Need to be careful that IV's are all the same type. Only works for 2554 // intptr_t indvars. 2555 2556 // IVsByStride keeps IVs for one particular loop. 2557 assert(IVsByStride.empty() && "Stale entries in IVsByStride?"); 2558 2559 // Note: this processes each stride/type pair individually. All users 2560 // passed into StrengthReduceStridedIVUsers have the same type AND stride. 2561 // Also, note that we iterate over IVUsesByStride indirectly by using 2562 // StrideOrder. This extra layer of indirection makes the ordering of 2563 // strides deterministic - not dependent on map order. 2564 for (unsigned Stride = 0, e = IU->StrideOrder.size(); 2565 Stride != e; ++Stride) { 2566 std::map<const SCEV*, IVUsersOfOneStride *>::iterator SI = 2567 IU->IVUsesByStride.find(IU->StrideOrder[Stride]); 2568 assert(SI != IU->IVUsesByStride.end() && "Stride doesn't exist!"); 2569 // FIXME: Generalize to non-affine IV's. 2570 if (!SI->first->isLoopInvariant(L)) 2571 continue; 2572 StrengthReduceStridedIVUsers(SI->first, *SI->second, L); 2573 } 2574 } 2575 2576 // After all sharing is done, see if we can adjust the loop to test against 2577 // zero instead of counting up to a maximum. This is usually faster. 2578 OptimizeLoopCountIV(L); 2579 2580 // We're done analyzing this loop; release all the state we built up for it. 2581 IVsByStride.clear(); 2582 StrideNoReuse.clear(); 2583 2584 // Clean up after ourselves 2585 if (!DeadInsts.empty()) 2586 DeleteTriviallyDeadInstructions(); 2587 2588 // At this point, it is worth checking to see if any recurrence PHIs are also 2589 // dead, so that we can remove them as well. 2590 DeleteDeadPHIs(L->getHeader()); 2591 2592 return Changed; 2593} 2594