IndVarSimplify.cpp revision 280031
1//===- IndVarSimplify.cpp - Induction Variable Elimination ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation analyzes and transforms the induction variables (and 11// computations derived from them) into simpler forms suitable for subsequent 12// analysis and transformation. 13// 14// If the trip count of a loop is computable, this pass also makes the following 15// changes: 16// 1. The exit condition for the loop is canonicalized to compare the 17// induction value against the exit value. This turns loops like: 18// 'for (i = 7; i*i < 1000; ++i)' into 'for (i = 0; i != 25; ++i)' 19// 2. Any use outside of the loop of an expression derived from the indvar 20// is changed to compute the derived value outside of the loop, eliminating 21// the dependence on the exit value of the induction variable. If the only 22// purpose of the loop is to compute the exit value of some derived 23// expression, this transformation will make the loop dead. 24// 25//===----------------------------------------------------------------------===// 26 27#include "llvm/Transforms/Scalar.h" 28#include "llvm/ADT/DenseMap.h" 29#include "llvm/ADT/SmallVector.h" 30#include "llvm/ADT/Statistic.h" 31#include "llvm/Analysis/LoopInfo.h" 32#include "llvm/Analysis/LoopPass.h" 33#include "llvm/Analysis/ScalarEvolutionExpander.h" 34#include "llvm/Analysis/TargetTransformInfo.h" 35#include "llvm/IR/BasicBlock.h" 36#include "llvm/IR/CFG.h" 37#include "llvm/IR/Constants.h" 38#include "llvm/IR/DataLayout.h" 39#include "llvm/IR/Dominators.h" 40#include "llvm/IR/Instructions.h" 41#include "llvm/IR/IntrinsicInst.h" 42#include "llvm/IR/LLVMContext.h" 43#include "llvm/IR/Type.h" 44#include "llvm/Support/CommandLine.h" 45#include "llvm/Support/Debug.h" 46#include "llvm/Support/raw_ostream.h" 47#include "llvm/Target/TargetLibraryInfo.h" 48#include "llvm/Transforms/Utils/BasicBlockUtils.h" 49#include "llvm/Transforms/Utils/Local.h" 50#include "llvm/Transforms/Utils/SimplifyIndVar.h" 51using namespace llvm; 52 53#define DEBUG_TYPE "indvars" 54 55STATISTIC(NumWidened , "Number of indvars widened"); 56STATISTIC(NumReplaced , "Number of exit values replaced"); 57STATISTIC(NumLFTR , "Number of loop exit tests replaced"); 58STATISTIC(NumElimExt , "Number of IV sign/zero extends eliminated"); 59STATISTIC(NumElimIV , "Number of congruent IVs eliminated"); 60 61// Trip count verification can be enabled by default under NDEBUG if we 62// implement a strong expression equivalence checker in SCEV. Until then, we 63// use the verify-indvars flag, which may assert in some cases. 64static cl::opt<bool> VerifyIndvars( 65 "verify-indvars", cl::Hidden, 66 cl::desc("Verify the ScalarEvolution result after running indvars")); 67 68static cl::opt<bool> ReduceLiveIVs("liv-reduce", cl::Hidden, 69 cl::desc("Reduce live induction variables.")); 70 71namespace { 72 class IndVarSimplify : public LoopPass { 73 LoopInfo *LI; 74 ScalarEvolution *SE; 75 DominatorTree *DT; 76 const DataLayout *DL; 77 TargetLibraryInfo *TLI; 78 const TargetTransformInfo *TTI; 79 80 SmallVector<WeakVH, 16> DeadInsts; 81 bool Changed; 82 public: 83 84 static char ID; // Pass identification, replacement for typeid 85 IndVarSimplify() : LoopPass(ID), LI(nullptr), SE(nullptr), DT(nullptr), 86 DL(nullptr), Changed(false) { 87 initializeIndVarSimplifyPass(*PassRegistry::getPassRegistry()); 88 } 89 90 bool runOnLoop(Loop *L, LPPassManager &LPM) override; 91 92 void getAnalysisUsage(AnalysisUsage &AU) const override { 93 AU.addRequired<DominatorTreeWrapperPass>(); 94 AU.addRequired<LoopInfo>(); 95 AU.addRequired<ScalarEvolution>(); 96 AU.addRequiredID(LoopSimplifyID); 97 AU.addRequiredID(LCSSAID); 98 AU.addPreserved<ScalarEvolution>(); 99 AU.addPreservedID(LoopSimplifyID); 100 AU.addPreservedID(LCSSAID); 101 AU.setPreservesCFG(); 102 } 103 104 private: 105 void releaseMemory() override { 106 DeadInsts.clear(); 107 } 108 109 bool isValidRewrite(Value *FromVal, Value *ToVal); 110 111 void HandleFloatingPointIV(Loop *L, PHINode *PH); 112 void RewriteNonIntegerIVs(Loop *L); 113 114 void SimplifyAndExtend(Loop *L, SCEVExpander &Rewriter, LPPassManager &LPM); 115 116 void RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter); 117 118 Value *LinearFunctionTestReplace(Loop *L, const SCEV *BackedgeTakenCount, 119 PHINode *IndVar, SCEVExpander &Rewriter); 120 121 void SinkUnusedInvariants(Loop *L); 122 }; 123} 124 125char IndVarSimplify::ID = 0; 126INITIALIZE_PASS_BEGIN(IndVarSimplify, "indvars", 127 "Induction Variable Simplification", false, false) 128INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 129INITIALIZE_PASS_DEPENDENCY(LoopInfo) 130INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 131INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 132INITIALIZE_PASS_DEPENDENCY(LCSSA) 133INITIALIZE_PASS_END(IndVarSimplify, "indvars", 134 "Induction Variable Simplification", false, false) 135 136Pass *llvm::createIndVarSimplifyPass() { 137 return new IndVarSimplify(); 138} 139 140/// isValidRewrite - Return true if the SCEV expansion generated by the 141/// rewriter can replace the original value. SCEV guarantees that it 142/// produces the same value, but the way it is produced may be illegal IR. 143/// Ideally, this function will only be called for verification. 144bool IndVarSimplify::isValidRewrite(Value *FromVal, Value *ToVal) { 145 // If an SCEV expression subsumed multiple pointers, its expansion could 146 // reassociate the GEP changing the base pointer. This is illegal because the 147 // final address produced by a GEP chain must be inbounds relative to its 148 // underlying object. Otherwise basic alias analysis, among other things, 149 // could fail in a dangerous way. Ultimately, SCEV will be improved to avoid 150 // producing an expression involving multiple pointers. Until then, we must 151 // bail out here. 152 // 153 // Retrieve the pointer operand of the GEP. Don't use GetUnderlyingObject 154 // because it understands lcssa phis while SCEV does not. 155 Value *FromPtr = FromVal; 156 Value *ToPtr = ToVal; 157 if (GEPOperator *GEP = dyn_cast<GEPOperator>(FromVal)) { 158 FromPtr = GEP->getPointerOperand(); 159 } 160 if (GEPOperator *GEP = dyn_cast<GEPOperator>(ToVal)) { 161 ToPtr = GEP->getPointerOperand(); 162 } 163 if (FromPtr != FromVal || ToPtr != ToVal) { 164 // Quickly check the common case 165 if (FromPtr == ToPtr) 166 return true; 167 168 // SCEV may have rewritten an expression that produces the GEP's pointer 169 // operand. That's ok as long as the pointer operand has the same base 170 // pointer. Unlike GetUnderlyingObject(), getPointerBase() will find the 171 // base of a recurrence. This handles the case in which SCEV expansion 172 // converts a pointer type recurrence into a nonrecurrent pointer base 173 // indexed by an integer recurrence. 174 175 // If the GEP base pointer is a vector of pointers, abort. 176 if (!FromPtr->getType()->isPointerTy() || !ToPtr->getType()->isPointerTy()) 177 return false; 178 179 const SCEV *FromBase = SE->getPointerBase(SE->getSCEV(FromPtr)); 180 const SCEV *ToBase = SE->getPointerBase(SE->getSCEV(ToPtr)); 181 if (FromBase == ToBase) 182 return true; 183 184 DEBUG(dbgs() << "INDVARS: GEP rewrite bail out " 185 << *FromBase << " != " << *ToBase << "\n"); 186 187 return false; 188 } 189 return true; 190} 191 192/// Determine the insertion point for this user. By default, insert immediately 193/// before the user. SCEVExpander or LICM will hoist loop invariants out of the 194/// loop. For PHI nodes, there may be multiple uses, so compute the nearest 195/// common dominator for the incoming blocks. 196static Instruction *getInsertPointForUses(Instruction *User, Value *Def, 197 DominatorTree *DT) { 198 PHINode *PHI = dyn_cast<PHINode>(User); 199 if (!PHI) 200 return User; 201 202 Instruction *InsertPt = nullptr; 203 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) { 204 if (PHI->getIncomingValue(i) != Def) 205 continue; 206 207 BasicBlock *InsertBB = PHI->getIncomingBlock(i); 208 if (!InsertPt) { 209 InsertPt = InsertBB->getTerminator(); 210 continue; 211 } 212 InsertBB = DT->findNearestCommonDominator(InsertPt->getParent(), InsertBB); 213 InsertPt = InsertBB->getTerminator(); 214 } 215 assert(InsertPt && "Missing phi operand"); 216 assert((!isa<Instruction>(Def) || 217 DT->dominates(cast<Instruction>(Def), InsertPt)) && 218 "def does not dominate all uses"); 219 return InsertPt; 220} 221 222//===----------------------------------------------------------------------===// 223// RewriteNonIntegerIVs and helpers. Prefer integer IVs. 224//===----------------------------------------------------------------------===// 225 226/// ConvertToSInt - Convert APF to an integer, if possible. 227static bool ConvertToSInt(const APFloat &APF, int64_t &IntVal) { 228 bool isExact = false; 229 // See if we can convert this to an int64_t 230 uint64_t UIntVal; 231 if (APF.convertToInteger(&UIntVal, 64, true, APFloat::rmTowardZero, 232 &isExact) != APFloat::opOK || !isExact) 233 return false; 234 IntVal = UIntVal; 235 return true; 236} 237 238/// HandleFloatingPointIV - If the loop has floating induction variable 239/// then insert corresponding integer induction variable if possible. 240/// For example, 241/// for(double i = 0; i < 10000; ++i) 242/// bar(i) 243/// is converted into 244/// for(int i = 0; i < 10000; ++i) 245/// bar((double)i); 246/// 247void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) { 248 unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0)); 249 unsigned BackEdge = IncomingEdge^1; 250 251 // Check incoming value. 252 ConstantFP *InitValueVal = 253 dyn_cast<ConstantFP>(PN->getIncomingValue(IncomingEdge)); 254 255 int64_t InitValue; 256 if (!InitValueVal || !ConvertToSInt(InitValueVal->getValueAPF(), InitValue)) 257 return; 258 259 // Check IV increment. Reject this PN if increment operation is not 260 // an add or increment value can not be represented by an integer. 261 BinaryOperator *Incr = 262 dyn_cast<BinaryOperator>(PN->getIncomingValue(BackEdge)); 263 if (Incr == nullptr || Incr->getOpcode() != Instruction::FAdd) return; 264 265 // If this is not an add of the PHI with a constantfp, or if the constant fp 266 // is not an integer, bail out. 267 ConstantFP *IncValueVal = dyn_cast<ConstantFP>(Incr->getOperand(1)); 268 int64_t IncValue; 269 if (IncValueVal == nullptr || Incr->getOperand(0) != PN || 270 !ConvertToSInt(IncValueVal->getValueAPF(), IncValue)) 271 return; 272 273 // Check Incr uses. One user is PN and the other user is an exit condition 274 // used by the conditional terminator. 275 Value::user_iterator IncrUse = Incr->user_begin(); 276 Instruction *U1 = cast<Instruction>(*IncrUse++); 277 if (IncrUse == Incr->user_end()) return; 278 Instruction *U2 = cast<Instruction>(*IncrUse++); 279 if (IncrUse != Incr->user_end()) return; 280 281 // Find exit condition, which is an fcmp. If it doesn't exist, or if it isn't 282 // only used by a branch, we can't transform it. 283 FCmpInst *Compare = dyn_cast<FCmpInst>(U1); 284 if (!Compare) 285 Compare = dyn_cast<FCmpInst>(U2); 286 if (!Compare || !Compare->hasOneUse() || 287 !isa<BranchInst>(Compare->user_back())) 288 return; 289 290 BranchInst *TheBr = cast<BranchInst>(Compare->user_back()); 291 292 // We need to verify that the branch actually controls the iteration count 293 // of the loop. If not, the new IV can overflow and no one will notice. 294 // The branch block must be in the loop and one of the successors must be out 295 // of the loop. 296 assert(TheBr->isConditional() && "Can't use fcmp if not conditional"); 297 if (!L->contains(TheBr->getParent()) || 298 (L->contains(TheBr->getSuccessor(0)) && 299 L->contains(TheBr->getSuccessor(1)))) 300 return; 301 302 303 // If it isn't a comparison with an integer-as-fp (the exit value), we can't 304 // transform it. 305 ConstantFP *ExitValueVal = dyn_cast<ConstantFP>(Compare->getOperand(1)); 306 int64_t ExitValue; 307 if (ExitValueVal == nullptr || 308 !ConvertToSInt(ExitValueVal->getValueAPF(), ExitValue)) 309 return; 310 311 // Find new predicate for integer comparison. 312 CmpInst::Predicate NewPred = CmpInst::BAD_ICMP_PREDICATE; 313 switch (Compare->getPredicate()) { 314 default: return; // Unknown comparison. 315 case CmpInst::FCMP_OEQ: 316 case CmpInst::FCMP_UEQ: NewPred = CmpInst::ICMP_EQ; break; 317 case CmpInst::FCMP_ONE: 318 case CmpInst::FCMP_UNE: NewPred = CmpInst::ICMP_NE; break; 319 case CmpInst::FCMP_OGT: 320 case CmpInst::FCMP_UGT: NewPred = CmpInst::ICMP_SGT; break; 321 case CmpInst::FCMP_OGE: 322 case CmpInst::FCMP_UGE: NewPred = CmpInst::ICMP_SGE; break; 323 case CmpInst::FCMP_OLT: 324 case CmpInst::FCMP_ULT: NewPred = CmpInst::ICMP_SLT; break; 325 case CmpInst::FCMP_OLE: 326 case CmpInst::FCMP_ULE: NewPred = CmpInst::ICMP_SLE; break; 327 } 328 329 // We convert the floating point induction variable to a signed i32 value if 330 // we can. This is only safe if the comparison will not overflow in a way 331 // that won't be trapped by the integer equivalent operations. Check for this 332 // now. 333 // TODO: We could use i64 if it is native and the range requires it. 334 335 // The start/stride/exit values must all fit in signed i32. 336 if (!isInt<32>(InitValue) || !isInt<32>(IncValue) || !isInt<32>(ExitValue)) 337 return; 338 339 // If not actually striding (add x, 0.0), avoid touching the code. 340 if (IncValue == 0) 341 return; 342 343 // Positive and negative strides have different safety conditions. 344 if (IncValue > 0) { 345 // If we have a positive stride, we require the init to be less than the 346 // exit value. 347 if (InitValue >= ExitValue) 348 return; 349 350 uint32_t Range = uint32_t(ExitValue-InitValue); 351 // Check for infinite loop, either: 352 // while (i <= Exit) or until (i > Exit) 353 if (NewPred == CmpInst::ICMP_SLE || NewPred == CmpInst::ICMP_SGT) { 354 if (++Range == 0) return; // Range overflows. 355 } 356 357 unsigned Leftover = Range % uint32_t(IncValue); 358 359 // If this is an equality comparison, we require that the strided value 360 // exactly land on the exit value, otherwise the IV condition will wrap 361 // around and do things the fp IV wouldn't. 362 if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) && 363 Leftover != 0) 364 return; 365 366 // If the stride would wrap around the i32 before exiting, we can't 367 // transform the IV. 368 if (Leftover != 0 && int32_t(ExitValue+IncValue) < ExitValue) 369 return; 370 371 } else { 372 // If we have a negative stride, we require the init to be greater than the 373 // exit value. 374 if (InitValue <= ExitValue) 375 return; 376 377 uint32_t Range = uint32_t(InitValue-ExitValue); 378 // Check for infinite loop, either: 379 // while (i >= Exit) or until (i < Exit) 380 if (NewPred == CmpInst::ICMP_SGE || NewPred == CmpInst::ICMP_SLT) { 381 if (++Range == 0) return; // Range overflows. 382 } 383 384 unsigned Leftover = Range % uint32_t(-IncValue); 385 386 // If this is an equality comparison, we require that the strided value 387 // exactly land on the exit value, otherwise the IV condition will wrap 388 // around and do things the fp IV wouldn't. 389 if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) && 390 Leftover != 0) 391 return; 392 393 // If the stride would wrap around the i32 before exiting, we can't 394 // transform the IV. 395 if (Leftover != 0 && int32_t(ExitValue+IncValue) > ExitValue) 396 return; 397 } 398 399 IntegerType *Int32Ty = Type::getInt32Ty(PN->getContext()); 400 401 // Insert new integer induction variable. 402 PHINode *NewPHI = PHINode::Create(Int32Ty, 2, PN->getName()+".int", PN); 403 NewPHI->addIncoming(ConstantInt::get(Int32Ty, InitValue), 404 PN->getIncomingBlock(IncomingEdge)); 405 406 Value *NewAdd = 407 BinaryOperator::CreateAdd(NewPHI, ConstantInt::get(Int32Ty, IncValue), 408 Incr->getName()+".int", Incr); 409 NewPHI->addIncoming(NewAdd, PN->getIncomingBlock(BackEdge)); 410 411 ICmpInst *NewCompare = new ICmpInst(TheBr, NewPred, NewAdd, 412 ConstantInt::get(Int32Ty, ExitValue), 413 Compare->getName()); 414 415 // In the following deletions, PN may become dead and may be deleted. 416 // Use a WeakVH to observe whether this happens. 417 WeakVH WeakPH = PN; 418 419 // Delete the old floating point exit comparison. The branch starts using the 420 // new comparison. 421 NewCompare->takeName(Compare); 422 Compare->replaceAllUsesWith(NewCompare); 423 RecursivelyDeleteTriviallyDeadInstructions(Compare, TLI); 424 425 // Delete the old floating point increment. 426 Incr->replaceAllUsesWith(UndefValue::get(Incr->getType())); 427 RecursivelyDeleteTriviallyDeadInstructions(Incr, TLI); 428 429 // If the FP induction variable still has uses, this is because something else 430 // in the loop uses its value. In order to canonicalize the induction 431 // variable, we chose to eliminate the IV and rewrite it in terms of an 432 // int->fp cast. 433 // 434 // We give preference to sitofp over uitofp because it is faster on most 435 // platforms. 436 if (WeakPH) { 437 Value *Conv = new SIToFPInst(NewPHI, PN->getType(), "indvar.conv", 438 PN->getParent()->getFirstInsertionPt()); 439 PN->replaceAllUsesWith(Conv); 440 RecursivelyDeleteTriviallyDeadInstructions(PN, TLI); 441 } 442 Changed = true; 443} 444 445void IndVarSimplify::RewriteNonIntegerIVs(Loop *L) { 446 // First step. Check to see if there are any floating-point recurrences. 447 // If there are, change them into integer recurrences, permitting analysis by 448 // the SCEV routines. 449 // 450 BasicBlock *Header = L->getHeader(); 451 452 SmallVector<WeakVH, 8> PHIs; 453 for (BasicBlock::iterator I = Header->begin(); 454 PHINode *PN = dyn_cast<PHINode>(I); ++I) 455 PHIs.push_back(PN); 456 457 for (unsigned i = 0, e = PHIs.size(); i != e; ++i) 458 if (PHINode *PN = dyn_cast_or_null<PHINode>(&*PHIs[i])) 459 HandleFloatingPointIV(L, PN); 460 461 // If the loop previously had floating-point IV, ScalarEvolution 462 // may not have been able to compute a trip count. Now that we've done some 463 // re-writing, the trip count may be computable. 464 if (Changed) 465 SE->forgetLoop(L); 466} 467 468//===----------------------------------------------------------------------===// 469// RewriteLoopExitValues - Optimize IV users outside the loop. 470// As a side effect, reduces the amount of IV processing within the loop. 471//===----------------------------------------------------------------------===// 472 473/// RewriteLoopExitValues - Check to see if this loop has a computable 474/// loop-invariant execution count. If so, this means that we can compute the 475/// final value of any expressions that are recurrent in the loop, and 476/// substitute the exit values from the loop into any instructions outside of 477/// the loop that use the final values of the current expressions. 478/// 479/// This is mostly redundant with the regular IndVarSimplify activities that 480/// happen later, except that it's more powerful in some cases, because it's 481/// able to brute-force evaluate arbitrary instructions as long as they have 482/// constant operands at the beginning of the loop. 483void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) { 484 // Verify the input to the pass in already in LCSSA form. 485 assert(L->isLCSSAForm(*DT)); 486 487 SmallVector<BasicBlock*, 8> ExitBlocks; 488 L->getUniqueExitBlocks(ExitBlocks); 489 490 // Find all values that are computed inside the loop, but used outside of it. 491 // Because of LCSSA, these values will only occur in LCSSA PHI Nodes. Scan 492 // the exit blocks of the loop to find them. 493 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) { 494 BasicBlock *ExitBB = ExitBlocks[i]; 495 496 // If there are no PHI nodes in this exit block, then no values defined 497 // inside the loop are used on this path, skip it. 498 PHINode *PN = dyn_cast<PHINode>(ExitBB->begin()); 499 if (!PN) continue; 500 501 unsigned NumPreds = PN->getNumIncomingValues(); 502 503 // We would like to be able to RAUW single-incoming value PHI nodes. We 504 // have to be certain this is safe even when this is an LCSSA PHI node. 505 // While the computed exit value is no longer varying in *this* loop, the 506 // exit block may be an exit block for an outer containing loop as well, 507 // the exit value may be varying in the outer loop, and thus it may still 508 // require an LCSSA PHI node. The safe case is when this is 509 // single-predecessor PHI node (LCSSA) and the exit block containing it is 510 // part of the enclosing loop, or this is the outer most loop of the nest. 511 // In either case the exit value could (at most) be varying in the same 512 // loop body as the phi node itself. Thus if it is in turn used outside of 513 // an enclosing loop it will only be via a separate LCSSA node. 514 bool LCSSASafePhiForRAUW = 515 NumPreds == 1 && 516 (!L->getParentLoop() || L->getParentLoop() == LI->getLoopFor(ExitBB)); 517 518 // Iterate over all of the PHI nodes. 519 BasicBlock::iterator BBI = ExitBB->begin(); 520 while ((PN = dyn_cast<PHINode>(BBI++))) { 521 if (PN->use_empty()) 522 continue; // dead use, don't replace it 523 524 // SCEV only supports integer expressions for now. 525 if (!PN->getType()->isIntegerTy() && !PN->getType()->isPointerTy()) 526 continue; 527 528 // It's necessary to tell ScalarEvolution about this explicitly so that 529 // it can walk the def-use list and forget all SCEVs, as it may not be 530 // watching the PHI itself. Once the new exit value is in place, there 531 // may not be a def-use connection between the loop and every instruction 532 // which got a SCEVAddRecExpr for that loop. 533 SE->forgetValue(PN); 534 535 // Iterate over all of the values in all the PHI nodes. 536 for (unsigned i = 0; i != NumPreds; ++i) { 537 // If the value being merged in is not integer or is not defined 538 // in the loop, skip it. 539 Value *InVal = PN->getIncomingValue(i); 540 if (!isa<Instruction>(InVal)) 541 continue; 542 543 // If this pred is for a subloop, not L itself, skip it. 544 if (LI->getLoopFor(PN->getIncomingBlock(i)) != L) 545 continue; // The Block is in a subloop, skip it. 546 547 // Check that InVal is defined in the loop. 548 Instruction *Inst = cast<Instruction>(InVal); 549 if (!L->contains(Inst)) 550 continue; 551 552 // Okay, this instruction has a user outside of the current loop 553 // and varies predictably *inside* the loop. Evaluate the value it 554 // contains when the loop exits, if possible. 555 const SCEV *ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop()); 556 if (!SE->isLoopInvariant(ExitValue, L) || 557 !isSafeToExpand(ExitValue, *SE)) 558 continue; 559 560 // Computing the value outside of the loop brings no benefit if : 561 // - it is definitely used inside the loop in a way which can not be 562 // optimized away. 563 // - no use outside of the loop can take advantage of hoisting the 564 // computation out of the loop 565 if (ExitValue->getSCEVType()>=scMulExpr) { 566 unsigned NumHardInternalUses = 0; 567 unsigned NumSoftExternalUses = 0; 568 unsigned NumUses = 0; 569 for (auto IB = Inst->user_begin(), IE = Inst->user_end(); 570 IB != IE && NumUses <= 6; ++IB) { 571 Instruction *UseInstr = cast<Instruction>(*IB); 572 unsigned Opc = UseInstr->getOpcode(); 573 NumUses++; 574 if (L->contains(UseInstr)) { 575 if (Opc == Instruction::Call || Opc == Instruction::Ret) 576 NumHardInternalUses++; 577 } else { 578 if (Opc == Instruction::PHI) { 579 // Do not count the Phi as a use. LCSSA may have inserted 580 // plenty of trivial ones. 581 NumUses--; 582 for (auto PB = UseInstr->user_begin(), 583 PE = UseInstr->user_end(); 584 PB != PE && NumUses <= 6; ++PB, ++NumUses) { 585 unsigned PhiOpc = cast<Instruction>(*PB)->getOpcode(); 586 if (PhiOpc != Instruction::Call && PhiOpc != Instruction::Ret) 587 NumSoftExternalUses++; 588 } 589 continue; 590 } 591 if (Opc != Instruction::Call && Opc != Instruction::Ret) 592 NumSoftExternalUses++; 593 } 594 } 595 if (NumUses <= 6 && NumHardInternalUses && !NumSoftExternalUses) 596 continue; 597 } 598 599 Value *ExitVal = Rewriter.expandCodeFor(ExitValue, PN->getType(), Inst); 600 601 DEBUG(dbgs() << "INDVARS: RLEV: AfterLoopVal = " << *ExitVal << '\n' 602 << " LoopVal = " << *Inst << "\n"); 603 604 if (!isValidRewrite(Inst, ExitVal)) { 605 DeadInsts.push_back(ExitVal); 606 continue; 607 } 608 Changed = true; 609 ++NumReplaced; 610 611 PN->setIncomingValue(i, ExitVal); 612 613 // If this instruction is dead now, delete it. Don't do it now to avoid 614 // invalidating iterators. 615 if (isInstructionTriviallyDead(Inst, TLI)) 616 DeadInsts.push_back(Inst); 617 618 // If we determined that this PHI is safe to replace even if an LCSSA 619 // PHI, do so. 620 if (LCSSASafePhiForRAUW) { 621 PN->replaceAllUsesWith(ExitVal); 622 PN->eraseFromParent(); 623 } 624 } 625 626 // If we were unable to completely replace the PHI node, clone the PHI 627 // and delete the original one. This lets IVUsers and any other maps 628 // purge the original user from their records. 629 if (!LCSSASafePhiForRAUW) { 630 PHINode *NewPN = cast<PHINode>(PN->clone()); 631 NewPN->takeName(PN); 632 NewPN->insertBefore(PN); 633 PN->replaceAllUsesWith(NewPN); 634 PN->eraseFromParent(); 635 } 636 } 637 } 638 639 // The insertion point instruction may have been deleted; clear it out 640 // so that the rewriter doesn't trip over it later. 641 Rewriter.clearInsertPoint(); 642} 643 644//===----------------------------------------------------------------------===// 645// IV Widening - Extend the width of an IV to cover its widest uses. 646//===----------------------------------------------------------------------===// 647 648namespace { 649 // Collect information about induction variables that are used by sign/zero 650 // extend operations. This information is recorded by CollectExtend and 651 // provides the input to WidenIV. 652 struct WideIVInfo { 653 PHINode *NarrowIV; 654 Type *WidestNativeType; // Widest integer type created [sz]ext 655 bool IsSigned; // Was a sext user seen before a zext? 656 657 WideIVInfo() : NarrowIV(nullptr), WidestNativeType(nullptr), 658 IsSigned(false) {} 659 }; 660} 661 662/// visitCast - Update information about the induction variable that is 663/// extended by this sign or zero extend operation. This is used to determine 664/// the final width of the IV before actually widening it. 665static void visitIVCast(CastInst *Cast, WideIVInfo &WI, ScalarEvolution *SE, 666 const DataLayout *DL, const TargetTransformInfo *TTI) { 667 bool IsSigned = Cast->getOpcode() == Instruction::SExt; 668 if (!IsSigned && Cast->getOpcode() != Instruction::ZExt) 669 return; 670 671 Type *Ty = Cast->getType(); 672 uint64_t Width = SE->getTypeSizeInBits(Ty); 673 if (DL && !DL->isLegalInteger(Width)) 674 return; 675 676 // Cast is either an sext or zext up to this point. 677 // We should not widen an indvar if arithmetics on the wider indvar are more 678 // expensive than those on the narrower indvar. We check only the cost of ADD 679 // because at least an ADD is required to increment the induction variable. We 680 // could compute more comprehensively the cost of all instructions on the 681 // induction variable when necessary. 682 if (TTI && 683 TTI->getArithmeticInstrCost(Instruction::Add, Ty) > 684 TTI->getArithmeticInstrCost(Instruction::Add, 685 Cast->getOperand(0)->getType())) { 686 return; 687 } 688 689 if (!WI.WidestNativeType) { 690 WI.WidestNativeType = SE->getEffectiveSCEVType(Ty); 691 WI.IsSigned = IsSigned; 692 return; 693 } 694 695 // We extend the IV to satisfy the sign of its first user, arbitrarily. 696 if (WI.IsSigned != IsSigned) 697 return; 698 699 if (Width > SE->getTypeSizeInBits(WI.WidestNativeType)) 700 WI.WidestNativeType = SE->getEffectiveSCEVType(Ty); 701} 702 703namespace { 704 705/// NarrowIVDefUse - Record a link in the Narrow IV def-use chain along with the 706/// WideIV that computes the same value as the Narrow IV def. This avoids 707/// caching Use* pointers. 708struct NarrowIVDefUse { 709 Instruction *NarrowDef; 710 Instruction *NarrowUse; 711 Instruction *WideDef; 712 713 NarrowIVDefUse(): NarrowDef(nullptr), NarrowUse(nullptr), WideDef(nullptr) {} 714 715 NarrowIVDefUse(Instruction *ND, Instruction *NU, Instruction *WD): 716 NarrowDef(ND), NarrowUse(NU), WideDef(WD) {} 717}; 718 719/// WidenIV - The goal of this transform is to remove sign and zero extends 720/// without creating any new induction variables. To do this, it creates a new 721/// phi of the wider type and redirects all users, either removing extends or 722/// inserting truncs whenever we stop propagating the type. 723/// 724class WidenIV { 725 // Parameters 726 PHINode *OrigPhi; 727 Type *WideType; 728 bool IsSigned; 729 730 // Context 731 LoopInfo *LI; 732 Loop *L; 733 ScalarEvolution *SE; 734 DominatorTree *DT; 735 736 // Result 737 PHINode *WidePhi; 738 Instruction *WideInc; 739 const SCEV *WideIncExpr; 740 SmallVectorImpl<WeakVH> &DeadInsts; 741 742 SmallPtrSet<Instruction*,16> Widened; 743 SmallVector<NarrowIVDefUse, 8> NarrowIVUsers; 744 745public: 746 WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, 747 ScalarEvolution *SEv, DominatorTree *DTree, 748 SmallVectorImpl<WeakVH> &DI) : 749 OrigPhi(WI.NarrowIV), 750 WideType(WI.WidestNativeType), 751 IsSigned(WI.IsSigned), 752 LI(LInfo), 753 L(LI->getLoopFor(OrigPhi->getParent())), 754 SE(SEv), 755 DT(DTree), 756 WidePhi(nullptr), 757 WideInc(nullptr), 758 WideIncExpr(nullptr), 759 DeadInsts(DI) { 760 assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV"); 761 } 762 763 PHINode *CreateWideIV(SCEVExpander &Rewriter); 764 765protected: 766 Value *getExtend(Value *NarrowOper, Type *WideType, bool IsSigned, 767 Instruction *Use); 768 769 Instruction *CloneIVUser(NarrowIVDefUse DU); 770 771 const SCEVAddRecExpr *GetWideRecurrence(Instruction *NarrowUse); 772 773 const SCEVAddRecExpr* GetExtendedOperandRecurrence(NarrowIVDefUse DU); 774 775 const SCEV *GetSCEVByOpCode(const SCEV *LHS, const SCEV *RHS, 776 unsigned OpCode) const; 777 778 Instruction *WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter); 779 780 bool WidenLoopCompare(NarrowIVDefUse DU); 781 782 void pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef); 783}; 784} // anonymous namespace 785 786/// isLoopInvariant - Perform a quick domtree based check for loop invariance 787/// assuming that V is used within the loop. LoopInfo::isLoopInvariant() seems 788/// gratuitous for this purpose. 789static bool isLoopInvariant(Value *V, const Loop *L, const DominatorTree *DT) { 790 Instruction *Inst = dyn_cast<Instruction>(V); 791 if (!Inst) 792 return true; 793 794 return DT->properlyDominates(Inst->getParent(), L->getHeader()); 795} 796 797Value *WidenIV::getExtend(Value *NarrowOper, Type *WideType, bool IsSigned, 798 Instruction *Use) { 799 // Set the debug location and conservative insertion point. 800 IRBuilder<> Builder(Use); 801 // Hoist the insertion point into loop preheaders as far as possible. 802 for (const Loop *L = LI->getLoopFor(Use->getParent()); 803 L && L->getLoopPreheader() && isLoopInvariant(NarrowOper, L, DT); 804 L = L->getParentLoop()) 805 Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator()); 806 807 return IsSigned ? Builder.CreateSExt(NarrowOper, WideType) : 808 Builder.CreateZExt(NarrowOper, WideType); 809} 810 811/// CloneIVUser - Instantiate a wide operation to replace a narrow 812/// operation. This only needs to handle operations that can evaluation to 813/// SCEVAddRec. It can safely return 0 for any operation we decide not to clone. 814Instruction *WidenIV::CloneIVUser(NarrowIVDefUse DU) { 815 unsigned Opcode = DU.NarrowUse->getOpcode(); 816 switch (Opcode) { 817 default: 818 return nullptr; 819 case Instruction::Add: 820 case Instruction::Mul: 821 case Instruction::UDiv: 822 case Instruction::Sub: 823 case Instruction::And: 824 case Instruction::Or: 825 case Instruction::Xor: 826 case Instruction::Shl: 827 case Instruction::LShr: 828 case Instruction::AShr: 829 DEBUG(dbgs() << "Cloning IVUser: " << *DU.NarrowUse << "\n"); 830 831 // Replace NarrowDef operands with WideDef. Otherwise, we don't know 832 // anything about the narrow operand yet so must insert a [sz]ext. It is 833 // probably loop invariant and will be folded or hoisted. If it actually 834 // comes from a widened IV, it should be removed during a future call to 835 // WidenIVUse. 836 Value *LHS = (DU.NarrowUse->getOperand(0) == DU.NarrowDef) ? DU.WideDef : 837 getExtend(DU.NarrowUse->getOperand(0), WideType, IsSigned, DU.NarrowUse); 838 Value *RHS = (DU.NarrowUse->getOperand(1) == DU.NarrowDef) ? DU.WideDef : 839 getExtend(DU.NarrowUse->getOperand(1), WideType, IsSigned, DU.NarrowUse); 840 841 BinaryOperator *NarrowBO = cast<BinaryOperator>(DU.NarrowUse); 842 BinaryOperator *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), 843 LHS, RHS, 844 NarrowBO->getName()); 845 IRBuilder<> Builder(DU.NarrowUse); 846 Builder.Insert(WideBO); 847 if (const OverflowingBinaryOperator *OBO = 848 dyn_cast<OverflowingBinaryOperator>(NarrowBO)) { 849 if (OBO->hasNoUnsignedWrap()) WideBO->setHasNoUnsignedWrap(); 850 if (OBO->hasNoSignedWrap()) WideBO->setHasNoSignedWrap(); 851 } 852 return WideBO; 853 } 854} 855 856const SCEV *WidenIV::GetSCEVByOpCode(const SCEV *LHS, const SCEV *RHS, 857 unsigned OpCode) const { 858 if (OpCode == Instruction::Add) 859 return SE->getAddExpr(LHS, RHS); 860 if (OpCode == Instruction::Sub) 861 return SE->getMinusSCEV(LHS, RHS); 862 if (OpCode == Instruction::Mul) 863 return SE->getMulExpr(LHS, RHS); 864 865 llvm_unreachable("Unsupported opcode."); 866} 867 868/// No-wrap operations can transfer sign extension of their result to their 869/// operands. Generate the SCEV value for the widened operation without 870/// actually modifying the IR yet. If the expression after extending the 871/// operands is an AddRec for this loop, return it. 872const SCEVAddRecExpr* WidenIV::GetExtendedOperandRecurrence(NarrowIVDefUse DU) { 873 874 // Handle the common case of add<nsw/nuw> 875 const unsigned OpCode = DU.NarrowUse->getOpcode(); 876 // Only Add/Sub/Mul instructions supported yet. 877 if (OpCode != Instruction::Add && OpCode != Instruction::Sub && 878 OpCode != Instruction::Mul) 879 return nullptr; 880 881 // One operand (NarrowDef) has already been extended to WideDef. Now determine 882 // if extending the other will lead to a recurrence. 883 const unsigned ExtendOperIdx = 884 DU.NarrowUse->getOperand(0) == DU.NarrowDef ? 1 : 0; 885 assert(DU.NarrowUse->getOperand(1-ExtendOperIdx) == DU.NarrowDef && "bad DU"); 886 887 const SCEV *ExtendOperExpr = nullptr; 888 const OverflowingBinaryOperator *OBO = 889 cast<OverflowingBinaryOperator>(DU.NarrowUse); 890 if (IsSigned && OBO->hasNoSignedWrap()) 891 ExtendOperExpr = SE->getSignExtendExpr( 892 SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType); 893 else if(!IsSigned && OBO->hasNoUnsignedWrap()) 894 ExtendOperExpr = SE->getZeroExtendExpr( 895 SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType); 896 else 897 return nullptr; 898 899 // When creating this SCEV expr, don't apply the current operations NSW or NUW 900 // flags. This instruction may be guarded by control flow that the no-wrap 901 // behavior depends on. Non-control-equivalent instructions can be mapped to 902 // the same SCEV expression, and it would be incorrect to transfer NSW/NUW 903 // semantics to those operations. 904 const SCEV *lhs = SE->getSCEV(DU.WideDef); 905 const SCEV *rhs = ExtendOperExpr; 906 907 // Let's swap operands to the initial order for the case of non-commutative 908 // operations, like SUB. See PR21014. 909 if (ExtendOperIdx == 0) 910 std::swap(lhs, rhs); 911 const SCEVAddRecExpr *AddRec = 912 dyn_cast<SCEVAddRecExpr>(GetSCEVByOpCode(lhs, rhs, OpCode)); 913 914 if (!AddRec || AddRec->getLoop() != L) 915 return nullptr; 916 return AddRec; 917} 918 919/// GetWideRecurrence - Is this instruction potentially interesting from 920/// IVUsers' perspective after widening it's type? In other words, can the 921/// extend be safely hoisted out of the loop with SCEV reducing the value to a 922/// recurrence on the same loop. If so, return the sign or zero extended 923/// recurrence. Otherwise return NULL. 924const SCEVAddRecExpr *WidenIV::GetWideRecurrence(Instruction *NarrowUse) { 925 if (!SE->isSCEVable(NarrowUse->getType())) 926 return nullptr; 927 928 const SCEV *NarrowExpr = SE->getSCEV(NarrowUse); 929 if (SE->getTypeSizeInBits(NarrowExpr->getType()) 930 >= SE->getTypeSizeInBits(WideType)) { 931 // NarrowUse implicitly widens its operand. e.g. a gep with a narrow 932 // index. So don't follow this use. 933 return nullptr; 934 } 935 936 const SCEV *WideExpr = IsSigned ? 937 SE->getSignExtendExpr(NarrowExpr, WideType) : 938 SE->getZeroExtendExpr(NarrowExpr, WideType); 939 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(WideExpr); 940 if (!AddRec || AddRec->getLoop() != L) 941 return nullptr; 942 return AddRec; 943} 944 945/// This IV user cannot be widen. Replace this use of the original narrow IV 946/// with a truncation of the new wide IV to isolate and eliminate the narrow IV. 947static void truncateIVUse(NarrowIVDefUse DU, DominatorTree *DT) { 948 DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef 949 << " for user " << *DU.NarrowUse << "\n"); 950 IRBuilder<> Builder(getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT)); 951 Value *Trunc = Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType()); 952 DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, Trunc); 953} 954 955/// If the narrow use is a compare instruction, then widen the compare 956// (and possibly the other operand). The extend operation is hoisted into the 957// loop preheader as far as possible. 958bool WidenIV::WidenLoopCompare(NarrowIVDefUse DU) { 959 ICmpInst *Cmp = dyn_cast<ICmpInst>(DU.NarrowUse); 960 if (!Cmp) 961 return false; 962 963 // Sign of IV user and compare must match. 964 if (IsSigned != CmpInst::isSigned(Cmp->getPredicate())) 965 return false; 966 967 Value *Op = Cmp->getOperand(Cmp->getOperand(0) == DU.NarrowDef ? 1 : 0); 968 unsigned CastWidth = SE->getTypeSizeInBits(Op->getType()); 969 unsigned IVWidth = SE->getTypeSizeInBits(WideType); 970 assert (CastWidth <= IVWidth && "Unexpected width while widening compare."); 971 972 // Widen the compare instruction. 973 IRBuilder<> Builder(getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT)); 974 DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef); 975 976 // Widen the other operand of the compare, if necessary. 977 if (CastWidth < IVWidth) { 978 Value *ExtOp = getExtend(Op, WideType, IsSigned, Cmp); 979 DU.NarrowUse->replaceUsesOfWith(Op, ExtOp); 980 } 981 return true; 982} 983 984/// WidenIVUse - Determine whether an individual user of the narrow IV can be 985/// widened. If so, return the wide clone of the user. 986Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) { 987 988 // Stop traversing the def-use chain at inner-loop phis or post-loop phis. 989 if (PHINode *UsePhi = dyn_cast<PHINode>(DU.NarrowUse)) { 990 if (LI->getLoopFor(UsePhi->getParent()) != L) { 991 // For LCSSA phis, sink the truncate outside the loop. 992 // After SimplifyCFG most loop exit targets have a single predecessor. 993 // Otherwise fall back to a truncate within the loop. 994 if (UsePhi->getNumOperands() != 1) 995 truncateIVUse(DU, DT); 996 else { 997 PHINode *WidePhi = 998 PHINode::Create(DU.WideDef->getType(), 1, UsePhi->getName() + ".wide", 999 UsePhi); 1000 WidePhi->addIncoming(DU.WideDef, UsePhi->getIncomingBlock(0)); 1001 IRBuilder<> Builder(WidePhi->getParent()->getFirstInsertionPt()); 1002 Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType()); 1003 UsePhi->replaceAllUsesWith(Trunc); 1004 DeadInsts.push_back(UsePhi); 1005 DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi 1006 << " to " << *WidePhi << "\n"); 1007 } 1008 return nullptr; 1009 } 1010 } 1011 // Our raison d'etre! Eliminate sign and zero extension. 1012 if (IsSigned ? isa<SExtInst>(DU.NarrowUse) : isa<ZExtInst>(DU.NarrowUse)) { 1013 Value *NewDef = DU.WideDef; 1014 if (DU.NarrowUse->getType() != WideType) { 1015 unsigned CastWidth = SE->getTypeSizeInBits(DU.NarrowUse->getType()); 1016 unsigned IVWidth = SE->getTypeSizeInBits(WideType); 1017 if (CastWidth < IVWidth) { 1018 // The cast isn't as wide as the IV, so insert a Trunc. 1019 IRBuilder<> Builder(DU.NarrowUse); 1020 NewDef = Builder.CreateTrunc(DU.WideDef, DU.NarrowUse->getType()); 1021 } 1022 else { 1023 // A wider extend was hidden behind a narrower one. This may induce 1024 // another round of IV widening in which the intermediate IV becomes 1025 // dead. It should be very rare. 1026 DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi 1027 << " not wide enough to subsume " << *DU.NarrowUse << "\n"); 1028 DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef); 1029 NewDef = DU.NarrowUse; 1030 } 1031 } 1032 if (NewDef != DU.NarrowUse) { 1033 DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse 1034 << " replaced by " << *DU.WideDef << "\n"); 1035 ++NumElimExt; 1036 DU.NarrowUse->replaceAllUsesWith(NewDef); 1037 DeadInsts.push_back(DU.NarrowUse); 1038 } 1039 // Now that the extend is gone, we want to expose it's uses for potential 1040 // further simplification. We don't need to directly inform SimplifyIVUsers 1041 // of the new users, because their parent IV will be processed later as a 1042 // new loop phi. If we preserved IVUsers analysis, we would also want to 1043 // push the uses of WideDef here. 1044 1045 // No further widening is needed. The deceased [sz]ext had done it for us. 1046 return nullptr; 1047 } 1048 1049 // Does this user itself evaluate to a recurrence after widening? 1050 const SCEVAddRecExpr *WideAddRec = GetWideRecurrence(DU.NarrowUse); 1051 if (!WideAddRec) 1052 WideAddRec = GetExtendedOperandRecurrence(DU); 1053 1054 if (!WideAddRec) { 1055 // If use is a loop condition, try to promote the condition instead of 1056 // truncating the IV first. 1057 if (WidenLoopCompare(DU)) 1058 return nullptr; 1059 1060 // This user does not evaluate to a recurence after widening, so don't 1061 // follow it. Instead insert a Trunc to kill off the original use, 1062 // eventually isolating the original narrow IV so it can be removed. 1063 truncateIVUse(DU, DT); 1064 return nullptr; 1065 } 1066 // Assume block terminators cannot evaluate to a recurrence. We can't to 1067 // insert a Trunc after a terminator if there happens to be a critical edge. 1068 assert(DU.NarrowUse != DU.NarrowUse->getParent()->getTerminator() && 1069 "SCEV is not expected to evaluate a block terminator"); 1070 1071 // Reuse the IV increment that SCEVExpander created as long as it dominates 1072 // NarrowUse. 1073 Instruction *WideUse = nullptr; 1074 if (WideAddRec == WideIncExpr 1075 && Rewriter.hoistIVInc(WideInc, DU.NarrowUse)) 1076 WideUse = WideInc; 1077 else { 1078 WideUse = CloneIVUser(DU); 1079 if (!WideUse) 1080 return nullptr; 1081 } 1082 // Evaluation of WideAddRec ensured that the narrow expression could be 1083 // extended outside the loop without overflow. This suggests that the wide use 1084 // evaluates to the same expression as the extended narrow use, but doesn't 1085 // absolutely guarantee it. Hence the following failsafe check. In rare cases 1086 // where it fails, we simply throw away the newly created wide use. 1087 if (WideAddRec != SE->getSCEV(WideUse)) { 1088 DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse 1089 << ": " << *SE->getSCEV(WideUse) << " != " << *WideAddRec << "\n"); 1090 DeadInsts.push_back(WideUse); 1091 return nullptr; 1092 } 1093 1094 // Returning WideUse pushes it on the worklist. 1095 return WideUse; 1096} 1097 1098/// pushNarrowIVUsers - Add eligible users of NarrowDef to NarrowIVUsers. 1099/// 1100void WidenIV::pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef) { 1101 for (User *U : NarrowDef->users()) { 1102 Instruction *NarrowUser = cast<Instruction>(U); 1103 1104 // Handle data flow merges and bizarre phi cycles. 1105 if (!Widened.insert(NarrowUser).second) 1106 continue; 1107 1108 NarrowIVUsers.push_back(NarrowIVDefUse(NarrowDef, NarrowUser, WideDef)); 1109 } 1110} 1111 1112/// CreateWideIV - Process a single induction variable. First use the 1113/// SCEVExpander to create a wide induction variable that evaluates to the same 1114/// recurrence as the original narrow IV. Then use a worklist to forward 1115/// traverse the narrow IV's def-use chain. After WidenIVUse has processed all 1116/// interesting IV users, the narrow IV will be isolated for removal by 1117/// DeleteDeadPHIs. 1118/// 1119/// It would be simpler to delete uses as they are processed, but we must avoid 1120/// invalidating SCEV expressions. 1121/// 1122PHINode *WidenIV::CreateWideIV(SCEVExpander &Rewriter) { 1123 // Is this phi an induction variable? 1124 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(OrigPhi)); 1125 if (!AddRec) 1126 return nullptr; 1127 1128 // Widen the induction variable expression. 1129 const SCEV *WideIVExpr = IsSigned ? 1130 SE->getSignExtendExpr(AddRec, WideType) : 1131 SE->getZeroExtendExpr(AddRec, WideType); 1132 1133 assert(SE->getEffectiveSCEVType(WideIVExpr->getType()) == WideType && 1134 "Expect the new IV expression to preserve its type"); 1135 1136 // Can the IV be extended outside the loop without overflow? 1137 AddRec = dyn_cast<SCEVAddRecExpr>(WideIVExpr); 1138 if (!AddRec || AddRec->getLoop() != L) 1139 return nullptr; 1140 1141 // An AddRec must have loop-invariant operands. Since this AddRec is 1142 // materialized by a loop header phi, the expression cannot have any post-loop 1143 // operands, so they must dominate the loop header. 1144 assert(SE->properlyDominates(AddRec->getStart(), L->getHeader()) && 1145 SE->properlyDominates(AddRec->getStepRecurrence(*SE), L->getHeader()) 1146 && "Loop header phi recurrence inputs do not dominate the loop"); 1147 1148 // The rewriter provides a value for the desired IV expression. This may 1149 // either find an existing phi or materialize a new one. Either way, we 1150 // expect a well-formed cyclic phi-with-increments. i.e. any operand not part 1151 // of the phi-SCC dominates the loop entry. 1152 Instruction *InsertPt = L->getHeader()->begin(); 1153 WidePhi = cast<PHINode>(Rewriter.expandCodeFor(AddRec, WideType, InsertPt)); 1154 1155 // Remembering the WideIV increment generated by SCEVExpander allows 1156 // WidenIVUse to reuse it when widening the narrow IV's increment. We don't 1157 // employ a general reuse mechanism because the call above is the only call to 1158 // SCEVExpander. Henceforth, we produce 1-to-1 narrow to wide uses. 1159 if (BasicBlock *LatchBlock = L->getLoopLatch()) { 1160 WideInc = 1161 cast<Instruction>(WidePhi->getIncomingValueForBlock(LatchBlock)); 1162 WideIncExpr = SE->getSCEV(WideInc); 1163 } 1164 1165 DEBUG(dbgs() << "Wide IV: " << *WidePhi << "\n"); 1166 ++NumWidened; 1167 1168 // Traverse the def-use chain using a worklist starting at the original IV. 1169 assert(Widened.empty() && NarrowIVUsers.empty() && "expect initial state" ); 1170 1171 Widened.insert(OrigPhi); 1172 pushNarrowIVUsers(OrigPhi, WidePhi); 1173 1174 while (!NarrowIVUsers.empty()) { 1175 NarrowIVDefUse DU = NarrowIVUsers.pop_back_val(); 1176 1177 // Process a def-use edge. This may replace the use, so don't hold a 1178 // use_iterator across it. 1179 Instruction *WideUse = WidenIVUse(DU, Rewriter); 1180 1181 // Follow all def-use edges from the previous narrow use. 1182 if (WideUse) 1183 pushNarrowIVUsers(DU.NarrowUse, WideUse); 1184 1185 // WidenIVUse may have removed the def-use edge. 1186 if (DU.NarrowDef->use_empty()) 1187 DeadInsts.push_back(DU.NarrowDef); 1188 } 1189 return WidePhi; 1190} 1191 1192//===----------------------------------------------------------------------===// 1193// Live IV Reduction - Minimize IVs live across the loop. 1194//===----------------------------------------------------------------------===// 1195 1196 1197//===----------------------------------------------------------------------===// 1198// Simplification of IV users based on SCEV evaluation. 1199//===----------------------------------------------------------------------===// 1200 1201namespace { 1202 class IndVarSimplifyVisitor : public IVVisitor { 1203 ScalarEvolution *SE; 1204 const DataLayout *DL; 1205 const TargetTransformInfo *TTI; 1206 PHINode *IVPhi; 1207 1208 public: 1209 WideIVInfo WI; 1210 1211 IndVarSimplifyVisitor(PHINode *IV, ScalarEvolution *SCEV, 1212 const DataLayout *DL, const TargetTransformInfo *TTI, 1213 const DominatorTree *DTree) 1214 : SE(SCEV), DL(DL), TTI(TTI), IVPhi(IV) { 1215 DT = DTree; 1216 WI.NarrowIV = IVPhi; 1217 if (ReduceLiveIVs) 1218 setSplitOverflowIntrinsics(); 1219 } 1220 1221 // Implement the interface used by simplifyUsersOfIV. 1222 void visitCast(CastInst *Cast) override { 1223 visitIVCast(Cast, WI, SE, DL, TTI); 1224 } 1225 }; 1226} 1227 1228/// SimplifyAndExtend - Iteratively perform simplification on a worklist of IV 1229/// users. Each successive simplification may push more users which may 1230/// themselves be candidates for simplification. 1231/// 1232/// Sign/Zero extend elimination is interleaved with IV simplification. 1233/// 1234void IndVarSimplify::SimplifyAndExtend(Loop *L, 1235 SCEVExpander &Rewriter, 1236 LPPassManager &LPM) { 1237 SmallVector<WideIVInfo, 8> WideIVs; 1238 1239 SmallVector<PHINode*, 8> LoopPhis; 1240 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) { 1241 LoopPhis.push_back(cast<PHINode>(I)); 1242 } 1243 // Each round of simplification iterates through the SimplifyIVUsers worklist 1244 // for all current phis, then determines whether any IVs can be 1245 // widened. Widening adds new phis to LoopPhis, inducing another round of 1246 // simplification on the wide IVs. 1247 while (!LoopPhis.empty()) { 1248 // Evaluate as many IV expressions as possible before widening any IVs. This 1249 // forces SCEV to set no-wrap flags before evaluating sign/zero 1250 // extension. The first time SCEV attempts to normalize sign/zero extension, 1251 // the result becomes final. So for the most predictable results, we delay 1252 // evaluation of sign/zero extend evaluation until needed, and avoid running 1253 // other SCEV based analysis prior to SimplifyAndExtend. 1254 do { 1255 PHINode *CurrIV = LoopPhis.pop_back_val(); 1256 1257 // Information about sign/zero extensions of CurrIV. 1258 IndVarSimplifyVisitor Visitor(CurrIV, SE, DL, TTI, DT); 1259 1260 Changed |= simplifyUsersOfIV(CurrIV, SE, &LPM, DeadInsts, &Visitor); 1261 1262 if (Visitor.WI.WidestNativeType) { 1263 WideIVs.push_back(Visitor.WI); 1264 } 1265 } while(!LoopPhis.empty()); 1266 1267 for (; !WideIVs.empty(); WideIVs.pop_back()) { 1268 WidenIV Widener(WideIVs.back(), LI, SE, DT, DeadInsts); 1269 if (PHINode *WidePhi = Widener.CreateWideIV(Rewriter)) { 1270 Changed = true; 1271 LoopPhis.push_back(WidePhi); 1272 } 1273 } 1274 } 1275} 1276 1277//===----------------------------------------------------------------------===// 1278// LinearFunctionTestReplace and its kin. Rewrite the loop exit condition. 1279//===----------------------------------------------------------------------===// 1280 1281/// Check for expressions that ScalarEvolution generates to compute 1282/// BackedgeTakenInfo. If these expressions have not been reduced, then 1283/// expanding them may incur additional cost (albeit in the loop preheader). 1284static bool isHighCostExpansion(const SCEV *S, BranchInst *BI, 1285 SmallPtrSetImpl<const SCEV*> &Processed, 1286 ScalarEvolution *SE) { 1287 if (!Processed.insert(S).second) 1288 return false; 1289 1290 // If the backedge-taken count is a UDiv, it's very likely a UDiv that 1291 // ScalarEvolution's HowFarToZero or HowManyLessThans produced to compute a 1292 // precise expression, rather than a UDiv from the user's code. If we can't 1293 // find a UDiv in the code with some simple searching, assume the former and 1294 // forego rewriting the loop. 1295 if (isa<SCEVUDivExpr>(S)) { 1296 ICmpInst *OrigCond = dyn_cast<ICmpInst>(BI->getCondition()); 1297 if (!OrigCond) return true; 1298 const SCEV *R = SE->getSCEV(OrigCond->getOperand(1)); 1299 R = SE->getMinusSCEV(R, SE->getConstant(R->getType(), 1)); 1300 if (R != S) { 1301 const SCEV *L = SE->getSCEV(OrigCond->getOperand(0)); 1302 L = SE->getMinusSCEV(L, SE->getConstant(L->getType(), 1)); 1303 if (L != S) 1304 return true; 1305 } 1306 } 1307 1308 // Recurse past add expressions, which commonly occur in the 1309 // BackedgeTakenCount. They may already exist in program code, and if not, 1310 // they are not too expensive rematerialize. 1311 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 1312 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 1313 I != E; ++I) { 1314 if (isHighCostExpansion(*I, BI, Processed, SE)) 1315 return true; 1316 } 1317 return false; 1318 } 1319 1320 // HowManyLessThans uses a Max expression whenever the loop is not guarded by 1321 // the exit condition. 1322 if (isa<SCEVSMaxExpr>(S) || isa<SCEVUMaxExpr>(S)) 1323 return true; 1324 1325 // If we haven't recognized an expensive SCEV pattern, assume it's an 1326 // expression produced by program code. 1327 return false; 1328} 1329 1330/// canExpandBackedgeTakenCount - Return true if this loop's backedge taken 1331/// count expression can be safely and cheaply expanded into an instruction 1332/// sequence that can be used by LinearFunctionTestReplace. 1333/// 1334/// TODO: This fails for pointer-type loop counters with greater than one byte 1335/// strides, consequently preventing LFTR from running. For the purpose of LFTR 1336/// we could skip this check in the case that the LFTR loop counter (chosen by 1337/// FindLoopCounter) is also pointer type. Instead, we could directly convert 1338/// the loop test to an inequality test by checking the target data's alignment 1339/// of element types (given that the initial pointer value originates from or is 1340/// used by ABI constrained operation, as opposed to inttoptr/ptrtoint). 1341/// However, we don't yet have a strong motivation for converting loop tests 1342/// into inequality tests. 1343static bool canExpandBackedgeTakenCount(Loop *L, ScalarEvolution *SE) { 1344 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); 1345 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount) || 1346 BackedgeTakenCount->isZero()) 1347 return false; 1348 1349 if (!L->getExitingBlock()) 1350 return false; 1351 1352 // Can't rewrite non-branch yet. 1353 BranchInst *BI = dyn_cast<BranchInst>(L->getExitingBlock()->getTerminator()); 1354 if (!BI) 1355 return false; 1356 1357 SmallPtrSet<const SCEV*, 8> Processed; 1358 if (isHighCostExpansion(BackedgeTakenCount, BI, Processed, SE)) 1359 return false; 1360 1361 return true; 1362} 1363 1364/// getLoopPhiForCounter - Return the loop header phi IFF IncV adds a loop 1365/// invariant value to the phi. 1366static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L, DominatorTree *DT) { 1367 Instruction *IncI = dyn_cast<Instruction>(IncV); 1368 if (!IncI) 1369 return nullptr; 1370 1371 switch (IncI->getOpcode()) { 1372 case Instruction::Add: 1373 case Instruction::Sub: 1374 break; 1375 case Instruction::GetElementPtr: 1376 // An IV counter must preserve its type. 1377 if (IncI->getNumOperands() == 2) 1378 break; 1379 default: 1380 return nullptr; 1381 } 1382 1383 PHINode *Phi = dyn_cast<PHINode>(IncI->getOperand(0)); 1384 if (Phi && Phi->getParent() == L->getHeader()) { 1385 if (isLoopInvariant(IncI->getOperand(1), L, DT)) 1386 return Phi; 1387 return nullptr; 1388 } 1389 if (IncI->getOpcode() == Instruction::GetElementPtr) 1390 return nullptr; 1391 1392 // Allow add/sub to be commuted. 1393 Phi = dyn_cast<PHINode>(IncI->getOperand(1)); 1394 if (Phi && Phi->getParent() == L->getHeader()) { 1395 if (isLoopInvariant(IncI->getOperand(0), L, DT)) 1396 return Phi; 1397 } 1398 return nullptr; 1399} 1400 1401/// Return the compare guarding the loop latch, or NULL for unrecognized tests. 1402static ICmpInst *getLoopTest(Loop *L) { 1403 assert(L->getExitingBlock() && "expected loop exit"); 1404 1405 BasicBlock *LatchBlock = L->getLoopLatch(); 1406 // Don't bother with LFTR if the loop is not properly simplified. 1407 if (!LatchBlock) 1408 return nullptr; 1409 1410 BranchInst *BI = dyn_cast<BranchInst>(L->getExitingBlock()->getTerminator()); 1411 assert(BI && "expected exit branch"); 1412 1413 return dyn_cast<ICmpInst>(BI->getCondition()); 1414} 1415 1416/// needsLFTR - LinearFunctionTestReplace policy. Return true unless we can show 1417/// that the current exit test is already sufficiently canonical. 1418static bool needsLFTR(Loop *L, DominatorTree *DT) { 1419 // Do LFTR to simplify the exit condition to an ICMP. 1420 ICmpInst *Cond = getLoopTest(L); 1421 if (!Cond) 1422 return true; 1423 1424 // Do LFTR to simplify the exit ICMP to EQ/NE 1425 ICmpInst::Predicate Pred = Cond->getPredicate(); 1426 if (Pred != ICmpInst::ICMP_NE && Pred != ICmpInst::ICMP_EQ) 1427 return true; 1428 1429 // Look for a loop invariant RHS 1430 Value *LHS = Cond->getOperand(0); 1431 Value *RHS = Cond->getOperand(1); 1432 if (!isLoopInvariant(RHS, L, DT)) { 1433 if (!isLoopInvariant(LHS, L, DT)) 1434 return true; 1435 std::swap(LHS, RHS); 1436 } 1437 // Look for a simple IV counter LHS 1438 PHINode *Phi = dyn_cast<PHINode>(LHS); 1439 if (!Phi) 1440 Phi = getLoopPhiForCounter(LHS, L, DT); 1441 1442 if (!Phi) 1443 return true; 1444 1445 // Do LFTR if PHI node is defined in the loop, but is *not* a counter. 1446 int Idx = Phi->getBasicBlockIndex(L->getLoopLatch()); 1447 if (Idx < 0) 1448 return true; 1449 1450 // Do LFTR if the exit condition's IV is *not* a simple counter. 1451 Value *IncV = Phi->getIncomingValue(Idx); 1452 return Phi != getLoopPhiForCounter(IncV, L, DT); 1453} 1454 1455/// Recursive helper for hasConcreteDef(). Unfortunately, this currently boils 1456/// down to checking that all operands are constant and listing instructions 1457/// that may hide undef. 1458static bool hasConcreteDefImpl(Value *V, SmallPtrSetImpl<Value*> &Visited, 1459 unsigned Depth) { 1460 if (isa<Constant>(V)) 1461 return !isa<UndefValue>(V); 1462 1463 if (Depth >= 6) 1464 return false; 1465 1466 // Conservatively handle non-constant non-instructions. For example, Arguments 1467 // may be undef. 1468 Instruction *I = dyn_cast<Instruction>(V); 1469 if (!I) 1470 return false; 1471 1472 // Load and return values may be undef. 1473 if(I->mayReadFromMemory() || isa<CallInst>(I) || isa<InvokeInst>(I)) 1474 return false; 1475 1476 // Optimistically handle other instructions. 1477 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) { 1478 if (!Visited.insert(*OI).second) 1479 continue; 1480 if (!hasConcreteDefImpl(*OI, Visited, Depth+1)) 1481 return false; 1482 } 1483 return true; 1484} 1485 1486/// Return true if the given value is concrete. We must prove that undef can 1487/// never reach it. 1488/// 1489/// TODO: If we decide that this is a good approach to checking for undef, we 1490/// may factor it into a common location. 1491static bool hasConcreteDef(Value *V) { 1492 SmallPtrSet<Value*, 8> Visited; 1493 Visited.insert(V); 1494 return hasConcreteDefImpl(V, Visited, 0); 1495} 1496 1497/// AlmostDeadIV - Return true if this IV has any uses other than the (soon to 1498/// be rewritten) loop exit test. 1499static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) { 1500 int LatchIdx = Phi->getBasicBlockIndex(LatchBlock); 1501 Value *IncV = Phi->getIncomingValue(LatchIdx); 1502 1503 for (User *U : Phi->users()) 1504 if (U != Cond && U != IncV) return false; 1505 1506 for (User *U : IncV->users()) 1507 if (U != Cond && U != Phi) return false; 1508 return true; 1509} 1510 1511/// FindLoopCounter - Find an affine IV in canonical form. 1512/// 1513/// BECount may be an i8* pointer type. The pointer difference is already 1514/// valid count without scaling the address stride, so it remains a pointer 1515/// expression as far as SCEV is concerned. 1516/// 1517/// Currently only valid for LFTR. See the comments on hasConcreteDef below. 1518/// 1519/// FIXME: Accept -1 stride and set IVLimit = IVInit - BECount 1520/// 1521/// FIXME: Accept non-unit stride as long as SCEV can reduce BECount * Stride. 1522/// This is difficult in general for SCEV because of potential overflow. But we 1523/// could at least handle constant BECounts. 1524static PHINode * 1525FindLoopCounter(Loop *L, const SCEV *BECount, 1526 ScalarEvolution *SE, DominatorTree *DT, const DataLayout *DL) { 1527 uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType()); 1528 1529 Value *Cond = 1530 cast<BranchInst>(L->getExitingBlock()->getTerminator())->getCondition(); 1531 1532 // Loop over all of the PHI nodes, looking for a simple counter. 1533 PHINode *BestPhi = nullptr; 1534 const SCEV *BestInit = nullptr; 1535 BasicBlock *LatchBlock = L->getLoopLatch(); 1536 assert(LatchBlock && "needsLFTR should guarantee a loop latch"); 1537 1538 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) { 1539 PHINode *Phi = cast<PHINode>(I); 1540 if (!SE->isSCEVable(Phi->getType())) 1541 continue; 1542 1543 // Avoid comparing an integer IV against a pointer Limit. 1544 if (BECount->getType()->isPointerTy() && !Phi->getType()->isPointerTy()) 1545 continue; 1546 1547 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Phi)); 1548 if (!AR || AR->getLoop() != L || !AR->isAffine()) 1549 continue; 1550 1551 // AR may be a pointer type, while BECount is an integer type. 1552 // AR may be wider than BECount. With eq/ne tests overflow is immaterial. 1553 // AR may not be a narrower type, or we may never exit. 1554 uint64_t PhiWidth = SE->getTypeSizeInBits(AR->getType()); 1555 if (PhiWidth < BCWidth || (DL && !DL->isLegalInteger(PhiWidth))) 1556 continue; 1557 1558 const SCEV *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE)); 1559 if (!Step || !Step->isOne()) 1560 continue; 1561 1562 int LatchIdx = Phi->getBasicBlockIndex(LatchBlock); 1563 Value *IncV = Phi->getIncomingValue(LatchIdx); 1564 if (getLoopPhiForCounter(IncV, L, DT) != Phi) 1565 continue; 1566 1567 // Avoid reusing a potentially undef value to compute other values that may 1568 // have originally had a concrete definition. 1569 if (!hasConcreteDef(Phi)) { 1570 // We explicitly allow unknown phis as long as they are already used by 1571 // the loop test. In this case we assume that performing LFTR could not 1572 // increase the number of undef users. 1573 if (ICmpInst *Cond = getLoopTest(L)) { 1574 if (Phi != getLoopPhiForCounter(Cond->getOperand(0), L, DT) 1575 && Phi != getLoopPhiForCounter(Cond->getOperand(1), L, DT)) { 1576 continue; 1577 } 1578 } 1579 } 1580 const SCEV *Init = AR->getStart(); 1581 1582 if (BestPhi && !AlmostDeadIV(BestPhi, LatchBlock, Cond)) { 1583 // Don't force a live loop counter if another IV can be used. 1584 if (AlmostDeadIV(Phi, LatchBlock, Cond)) 1585 continue; 1586 1587 // Prefer to count-from-zero. This is a more "canonical" counter form. It 1588 // also prefers integer to pointer IVs. 1589 if (BestInit->isZero() != Init->isZero()) { 1590 if (BestInit->isZero()) 1591 continue; 1592 } 1593 // If two IVs both count from zero or both count from nonzero then the 1594 // narrower is likely a dead phi that has been widened. Use the wider phi 1595 // to allow the other to be eliminated. 1596 else if (PhiWidth <= SE->getTypeSizeInBits(BestPhi->getType())) 1597 continue; 1598 } 1599 BestPhi = Phi; 1600 BestInit = Init; 1601 } 1602 return BestPhi; 1603} 1604 1605/// genLoopLimit - Help LinearFunctionTestReplace by generating a value that 1606/// holds the RHS of the new loop test. 1607static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L, 1608 SCEVExpander &Rewriter, ScalarEvolution *SE) { 1609 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(IndVar)); 1610 assert(AR && AR->getLoop() == L && AR->isAffine() && "bad loop counter"); 1611 const SCEV *IVInit = AR->getStart(); 1612 1613 // IVInit may be a pointer while IVCount is an integer when FindLoopCounter 1614 // finds a valid pointer IV. Sign extend BECount in order to materialize a 1615 // GEP. Avoid running SCEVExpander on a new pointer value, instead reusing 1616 // the existing GEPs whenever possible. 1617 if (IndVar->getType()->isPointerTy() 1618 && !IVCount->getType()->isPointerTy()) { 1619 1620 // IVOffset will be the new GEP offset that is interpreted by GEP as a 1621 // signed value. IVCount on the other hand represents the loop trip count, 1622 // which is an unsigned value. FindLoopCounter only allows induction 1623 // variables that have a positive unit stride of one. This means we don't 1624 // have to handle the case of negative offsets (yet) and just need to zero 1625 // extend IVCount. 1626 Type *OfsTy = SE->getEffectiveSCEVType(IVInit->getType()); 1627 const SCEV *IVOffset = SE->getTruncateOrZeroExtend(IVCount, OfsTy); 1628 1629 // Expand the code for the iteration count. 1630 assert(SE->isLoopInvariant(IVOffset, L) && 1631 "Computed iteration count is not loop invariant!"); 1632 BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator()); 1633 Value *GEPOffset = Rewriter.expandCodeFor(IVOffset, OfsTy, BI); 1634 1635 Value *GEPBase = IndVar->getIncomingValueForBlock(L->getLoopPreheader()); 1636 assert(AR->getStart() == SE->getSCEV(GEPBase) && "bad loop counter"); 1637 // We could handle pointer IVs other than i8*, but we need to compensate for 1638 // gep index scaling. See canExpandBackedgeTakenCount comments. 1639 assert(SE->getSizeOfExpr(IntegerType::getInt64Ty(IndVar->getContext()), 1640 cast<PointerType>(GEPBase->getType())->getElementType())->isOne() 1641 && "unit stride pointer IV must be i8*"); 1642 1643 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 1644 return Builder.CreateGEP(GEPBase, GEPOffset, "lftr.limit"); 1645 } 1646 else { 1647 // In any other case, convert both IVInit and IVCount to integers before 1648 // comparing. This may result in SCEV expension of pointers, but in practice 1649 // SCEV will fold the pointer arithmetic away as such: 1650 // BECount = (IVEnd - IVInit - 1) => IVLimit = IVInit (postinc). 1651 // 1652 // Valid Cases: (1) both integers is most common; (2) both may be pointers 1653 // for simple memset-style loops. 1654 // 1655 // IVInit integer and IVCount pointer would only occur if a canonical IV 1656 // were generated on top of case #2, which is not expected. 1657 1658 const SCEV *IVLimit = nullptr; 1659 // For unit stride, IVCount = Start + BECount with 2's complement overflow. 1660 // For non-zero Start, compute IVCount here. 1661 if (AR->getStart()->isZero()) 1662 IVLimit = IVCount; 1663 else { 1664 assert(AR->getStepRecurrence(*SE)->isOne() && "only handles unit stride"); 1665 const SCEV *IVInit = AR->getStart(); 1666 1667 // For integer IVs, truncate the IV before computing IVInit + BECount. 1668 if (SE->getTypeSizeInBits(IVInit->getType()) 1669 > SE->getTypeSizeInBits(IVCount->getType())) 1670 IVInit = SE->getTruncateExpr(IVInit, IVCount->getType()); 1671 1672 IVLimit = SE->getAddExpr(IVInit, IVCount); 1673 } 1674 // Expand the code for the iteration count. 1675 BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator()); 1676 IRBuilder<> Builder(BI); 1677 assert(SE->isLoopInvariant(IVLimit, L) && 1678 "Computed iteration count is not loop invariant!"); 1679 // Ensure that we generate the same type as IndVar, or a smaller integer 1680 // type. In the presence of null pointer values, we have an integer type 1681 // SCEV expression (IVInit) for a pointer type IV value (IndVar). 1682 Type *LimitTy = IVCount->getType()->isPointerTy() ? 1683 IndVar->getType() : IVCount->getType(); 1684 return Rewriter.expandCodeFor(IVLimit, LimitTy, BI); 1685 } 1686} 1687 1688/// LinearFunctionTestReplace - This method rewrites the exit condition of the 1689/// loop to be a canonical != comparison against the incremented loop induction 1690/// variable. This pass is able to rewrite the exit tests of any loop where the 1691/// SCEV analysis can determine a loop-invariant trip count of the loop, which 1692/// is actually a much broader range than just linear tests. 1693Value *IndVarSimplify:: 1694LinearFunctionTestReplace(Loop *L, 1695 const SCEV *BackedgeTakenCount, 1696 PHINode *IndVar, 1697 SCEVExpander &Rewriter) { 1698 assert(canExpandBackedgeTakenCount(L, SE) && "precondition"); 1699 1700 // Initialize CmpIndVar and IVCount to their preincremented values. 1701 Value *CmpIndVar = IndVar; 1702 const SCEV *IVCount = BackedgeTakenCount; 1703 1704 // If the exiting block is the same as the backedge block, we prefer to 1705 // compare against the post-incremented value, otherwise we must compare 1706 // against the preincremented value. 1707 if (L->getExitingBlock() == L->getLoopLatch()) { 1708 // The BackedgeTaken expression contains the number of times that the 1709 // backedge branches to the loop header. This is one less than the 1710 // number of times the loop executes, so use the incremented indvar. 1711 llvm::Value *IncrementedIndvar = 1712 IndVar->getIncomingValueForBlock(L->getExitingBlock()); 1713 const auto *IncrementedIndvarSCEV = 1714 cast<SCEVAddRecExpr>(SE->getSCEV(IncrementedIndvar)); 1715 // It is unsafe to use the incremented indvar if it has a wrapping flag, we 1716 // don't want to compare against a poison value. Check the SCEV that 1717 // corresponds to the incremented indvar, the SCEVExpander will only insert 1718 // flags in the IR if the SCEV originally had wrapping flags. 1719 // FIXME: In theory, SCEV could drop flags even though they exist in IR. 1720 // A more robust solution would involve getting a new expression for 1721 // CmpIndVar by applying non-NSW/NUW AddExprs. 1722 auto WrappingFlags = 1723 ScalarEvolution::setFlags(SCEV::FlagNUW, SCEV::FlagNSW); 1724 const SCEV *IVInit = IncrementedIndvarSCEV->getStart(); 1725 if (SE->getTypeSizeInBits(IVInit->getType()) > 1726 SE->getTypeSizeInBits(IVCount->getType())) 1727 IVInit = SE->getTruncateExpr(IVInit, IVCount->getType()); 1728 unsigned BitWidth = SE->getTypeSizeInBits(IVCount->getType()); 1729 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth + 1); 1730 // Check if InitIV + BECount+1 requires sign/zero extension. 1731 // If not, clear the corresponding flag from WrappingFlags because it is not 1732 // necessary for those flags in the IncrementedIndvarSCEV expression. 1733 if (SE->getSignExtendExpr(SE->getAddExpr(IVInit, BackedgeTakenCount), 1734 WideTy) == 1735 SE->getAddExpr(SE->getSignExtendExpr(IVInit, WideTy), 1736 SE->getSignExtendExpr(BackedgeTakenCount, WideTy))) 1737 WrappingFlags = ScalarEvolution::clearFlags(WrappingFlags, SCEV::FlagNSW); 1738 if (SE->getZeroExtendExpr(SE->getAddExpr(IVInit, BackedgeTakenCount), 1739 WideTy) == 1740 SE->getAddExpr(SE->getZeroExtendExpr(IVInit, WideTy), 1741 SE->getZeroExtendExpr(BackedgeTakenCount, WideTy))) 1742 WrappingFlags = ScalarEvolution::clearFlags(WrappingFlags, SCEV::FlagNUW); 1743 if (!ScalarEvolution::maskFlags(IncrementedIndvarSCEV->getNoWrapFlags(), 1744 WrappingFlags)) { 1745 // Add one to the "backedge-taken" count to get the trip count. 1746 // This addition may overflow, which is valid as long as the comparison is 1747 // truncated to BackedgeTakenCount->getType(). 1748 IVCount = 1749 SE->getAddExpr(BackedgeTakenCount, 1750 SE->getConstant(BackedgeTakenCount->getType(), 1)); 1751 CmpIndVar = IncrementedIndvar; 1752 } 1753 } 1754 1755 Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE); 1756 assert(ExitCnt->getType()->isPointerTy() == IndVar->getType()->isPointerTy() 1757 && "genLoopLimit missed a cast"); 1758 1759 // Insert a new icmp_ne or icmp_eq instruction before the branch. 1760 BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator()); 1761 ICmpInst::Predicate P; 1762 if (L->contains(BI->getSuccessor(0))) 1763 P = ICmpInst::ICMP_NE; 1764 else 1765 P = ICmpInst::ICMP_EQ; 1766 1767 DEBUG(dbgs() << "INDVARS: Rewriting loop exit condition to:\n" 1768 << " LHS:" << *CmpIndVar << '\n' 1769 << " op:\t" 1770 << (P == ICmpInst::ICMP_NE ? "!=" : "==") << "\n" 1771 << " RHS:\t" << *ExitCnt << "\n" 1772 << " IVCount:\t" << *IVCount << "\n"); 1773 1774 IRBuilder<> Builder(BI); 1775 1776 // LFTR can ignore IV overflow and truncate to the width of 1777 // BECount. This avoids materializing the add(zext(add)) expression. 1778 unsigned CmpIndVarSize = SE->getTypeSizeInBits(CmpIndVar->getType()); 1779 unsigned ExitCntSize = SE->getTypeSizeInBits(ExitCnt->getType()); 1780 if (CmpIndVarSize > ExitCntSize) { 1781 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(SE->getSCEV(IndVar)); 1782 const SCEV *ARStart = AR->getStart(); 1783 const SCEV *ARStep = AR->getStepRecurrence(*SE); 1784 // For constant IVCount, avoid truncation. 1785 if (isa<SCEVConstant>(ARStart) && isa<SCEVConstant>(IVCount)) { 1786 const APInt &Start = cast<SCEVConstant>(ARStart)->getValue()->getValue(); 1787 APInt Count = cast<SCEVConstant>(IVCount)->getValue()->getValue(); 1788 // Note that the post-inc value of BackedgeTakenCount may have overflowed 1789 // above such that IVCount is now zero. 1790 if (IVCount != BackedgeTakenCount && Count == 0) { 1791 Count = APInt::getMaxValue(Count.getBitWidth()).zext(CmpIndVarSize); 1792 ++Count; 1793 } 1794 else 1795 Count = Count.zext(CmpIndVarSize); 1796 APInt NewLimit; 1797 if (cast<SCEVConstant>(ARStep)->getValue()->isNegative()) 1798 NewLimit = Start - Count; 1799 else 1800 NewLimit = Start + Count; 1801 ExitCnt = ConstantInt::get(CmpIndVar->getType(), NewLimit); 1802 1803 DEBUG(dbgs() << " Widen RHS:\t" << *ExitCnt << "\n"); 1804 } else { 1805 CmpIndVar = Builder.CreateTrunc(CmpIndVar, ExitCnt->getType(), 1806 "lftr.wideiv"); 1807 } 1808 } 1809 Value *Cond = Builder.CreateICmp(P, CmpIndVar, ExitCnt, "exitcond"); 1810 Value *OrigCond = BI->getCondition(); 1811 // It's tempting to use replaceAllUsesWith here to fully replace the old 1812 // comparison, but that's not immediately safe, since users of the old 1813 // comparison may not be dominated by the new comparison. Instead, just 1814 // update the branch to use the new comparison; in the common case this 1815 // will make old comparison dead. 1816 BI->setCondition(Cond); 1817 DeadInsts.push_back(OrigCond); 1818 1819 ++NumLFTR; 1820 Changed = true; 1821 return Cond; 1822} 1823 1824//===----------------------------------------------------------------------===// 1825// SinkUnusedInvariants. A late subpass to cleanup loop preheaders. 1826//===----------------------------------------------------------------------===// 1827 1828/// If there's a single exit block, sink any loop-invariant values that 1829/// were defined in the preheader but not used inside the loop into the 1830/// exit block to reduce register pressure in the loop. 1831void IndVarSimplify::SinkUnusedInvariants(Loop *L) { 1832 BasicBlock *ExitBlock = L->getExitBlock(); 1833 if (!ExitBlock) return; 1834 1835 BasicBlock *Preheader = L->getLoopPreheader(); 1836 if (!Preheader) return; 1837 1838 Instruction *InsertPt = ExitBlock->getFirstInsertionPt(); 1839 BasicBlock::iterator I = Preheader->getTerminator(); 1840 while (I != Preheader->begin()) { 1841 --I; 1842 // New instructions were inserted at the end of the preheader. 1843 if (isa<PHINode>(I)) 1844 break; 1845 1846 // Don't move instructions which might have side effects, since the side 1847 // effects need to complete before instructions inside the loop. Also don't 1848 // move instructions which might read memory, since the loop may modify 1849 // memory. Note that it's okay if the instruction might have undefined 1850 // behavior: LoopSimplify guarantees that the preheader dominates the exit 1851 // block. 1852 if (I->mayHaveSideEffects() || I->mayReadFromMemory()) 1853 continue; 1854 1855 // Skip debug info intrinsics. 1856 if (isa<DbgInfoIntrinsic>(I)) 1857 continue; 1858 1859 // Skip landingpad instructions. 1860 if (isa<LandingPadInst>(I)) 1861 continue; 1862 1863 // Don't sink alloca: we never want to sink static alloca's out of the 1864 // entry block, and correctly sinking dynamic alloca's requires 1865 // checks for stacksave/stackrestore intrinsics. 1866 // FIXME: Refactor this check somehow? 1867 if (isa<AllocaInst>(I)) 1868 continue; 1869 1870 // Determine if there is a use in or before the loop (direct or 1871 // otherwise). 1872 bool UsedInLoop = false; 1873 for (Use &U : I->uses()) { 1874 Instruction *User = cast<Instruction>(U.getUser()); 1875 BasicBlock *UseBB = User->getParent(); 1876 if (PHINode *P = dyn_cast<PHINode>(User)) { 1877 unsigned i = 1878 PHINode::getIncomingValueNumForOperand(U.getOperandNo()); 1879 UseBB = P->getIncomingBlock(i); 1880 } 1881 if (UseBB == Preheader || L->contains(UseBB)) { 1882 UsedInLoop = true; 1883 break; 1884 } 1885 } 1886 1887 // If there is, the def must remain in the preheader. 1888 if (UsedInLoop) 1889 continue; 1890 1891 // Otherwise, sink it to the exit block. 1892 Instruction *ToMove = I; 1893 bool Done = false; 1894 1895 if (I != Preheader->begin()) { 1896 // Skip debug info intrinsics. 1897 do { 1898 --I; 1899 } while (isa<DbgInfoIntrinsic>(I) && I != Preheader->begin()); 1900 1901 if (isa<DbgInfoIntrinsic>(I) && I == Preheader->begin()) 1902 Done = true; 1903 } else { 1904 Done = true; 1905 } 1906 1907 ToMove->moveBefore(InsertPt); 1908 if (Done) break; 1909 InsertPt = ToMove; 1910 } 1911} 1912 1913//===----------------------------------------------------------------------===// 1914// IndVarSimplify driver. Manage several subpasses of IV simplification. 1915//===----------------------------------------------------------------------===// 1916 1917bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { 1918 if (skipOptnoneFunction(L)) 1919 return false; 1920 1921 // If LoopSimplify form is not available, stay out of trouble. Some notes: 1922 // - LSR currently only supports LoopSimplify-form loops. Indvars' 1923 // canonicalization can be a pessimization without LSR to "clean up" 1924 // afterwards. 1925 // - We depend on having a preheader; in particular, 1926 // Loop::getCanonicalInductionVariable only supports loops with preheaders, 1927 // and we're in trouble if we can't find the induction variable even when 1928 // we've manually inserted one. 1929 if (!L->isLoopSimplifyForm()) 1930 return false; 1931 1932 LI = &getAnalysis<LoopInfo>(); 1933 SE = &getAnalysis<ScalarEvolution>(); 1934 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1935 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 1936 DL = DLP ? &DLP->getDataLayout() : nullptr; 1937 TLI = getAnalysisIfAvailable<TargetLibraryInfo>(); 1938 TTI = getAnalysisIfAvailable<TargetTransformInfo>(); 1939 1940 DeadInsts.clear(); 1941 Changed = false; 1942 1943 // If there are any floating-point recurrences, attempt to 1944 // transform them to use integer recurrences. 1945 RewriteNonIntegerIVs(L); 1946 1947 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); 1948 1949 // Create a rewriter object which we'll use to transform the code with. 1950 SCEVExpander Rewriter(*SE, "indvars"); 1951#ifndef NDEBUG 1952 Rewriter.setDebugType(DEBUG_TYPE); 1953#endif 1954 1955 // Eliminate redundant IV users. 1956 // 1957 // Simplification works best when run before other consumers of SCEV. We 1958 // attempt to avoid evaluating SCEVs for sign/zero extend operations until 1959 // other expressions involving loop IVs have been evaluated. This helps SCEV 1960 // set no-wrap flags before normalizing sign/zero extension. 1961 Rewriter.disableCanonicalMode(); 1962 SimplifyAndExtend(L, Rewriter, LPM); 1963 1964 // Check to see if this loop has a computable loop-invariant execution count. 1965 // If so, this means that we can compute the final value of any expressions 1966 // that are recurrent in the loop, and substitute the exit values from the 1967 // loop into any instructions outside of the loop that use the final values of 1968 // the current expressions. 1969 // 1970 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1971 RewriteLoopExitValues(L, Rewriter); 1972 1973 // Eliminate redundant IV cycles. 1974 NumElimIV += Rewriter.replaceCongruentIVs(L, DT, DeadInsts); 1975 1976 // If we have a trip count expression, rewrite the loop's exit condition 1977 // using it. We can currently only handle loops with a single exit. 1978 if (canExpandBackedgeTakenCount(L, SE) && needsLFTR(L, DT)) { 1979 PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT, DL); 1980 if (IndVar) { 1981 // Check preconditions for proper SCEVExpander operation. SCEV does not 1982 // express SCEVExpander's dependencies, such as LoopSimplify. Instead any 1983 // pass that uses the SCEVExpander must do it. This does not work well for 1984 // loop passes because SCEVExpander makes assumptions about all loops, 1985 // while LoopPassManager only forces the current loop to be simplified. 1986 // 1987 // FIXME: SCEV expansion has no way to bail out, so the caller must 1988 // explicitly check any assumptions made by SCEV. Brittle. 1989 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(BackedgeTakenCount); 1990 if (!AR || AR->getLoop()->getLoopPreheader()) 1991 (void)LinearFunctionTestReplace(L, BackedgeTakenCount, IndVar, 1992 Rewriter); 1993 } 1994 } 1995 // Clear the rewriter cache, because values that are in the rewriter's cache 1996 // can be deleted in the loop below, causing the AssertingVH in the cache to 1997 // trigger. 1998 Rewriter.clear(); 1999 2000 // Now that we're done iterating through lists, clean up any instructions 2001 // which are now dead. 2002 while (!DeadInsts.empty()) 2003 if (Instruction *Inst = 2004 dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val())) 2005 RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI); 2006 2007 // The Rewriter may not be used from this point on. 2008 2009 // Loop-invariant instructions in the preheader that aren't used in the 2010 // loop may be sunk below the loop to reduce register pressure. 2011 SinkUnusedInvariants(L); 2012 2013 // Clean up dead instructions. 2014 Changed |= DeleteDeadPHIs(L->getHeader(), TLI); 2015 // Check a post-condition. 2016 assert(L->isLCSSAForm(*DT) && 2017 "Indvars did not leave the loop in lcssa form!"); 2018 2019 // Verify that LFTR, and any other change have not interfered with SCEV's 2020 // ability to compute trip count. 2021#ifndef NDEBUG 2022 if (VerifyIndvars && !isa<SCEVCouldNotCompute>(BackedgeTakenCount)) { 2023 SE->forgetLoop(L); 2024 const SCEV *NewBECount = SE->getBackedgeTakenCount(L); 2025 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) < 2026 SE->getTypeSizeInBits(NewBECount->getType())) 2027 NewBECount = SE->getTruncateOrNoop(NewBECount, 2028 BackedgeTakenCount->getType()); 2029 else 2030 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, 2031 NewBECount->getType()); 2032 assert(BackedgeTakenCount == NewBECount && "indvars must preserve SCEV"); 2033 } 2034#endif 2035 2036 return Changed; 2037} 2038