LoopIdiomRecognize.cpp revision 218885
1//===-- LoopIdiomRecognize.cpp - Loop idiom recognition -------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass implements an idiom recognizer that transforms simple loops into a 11// non-loop form. In cases that this kicks in, it can be a significant 12// performance win. 13// 14//===----------------------------------------------------------------------===// 15// 16// TODO List: 17// 18// Future loop memory idioms to recognize: 19// memcmp, memmove, strlen, etc. 20// Future floating point idioms to recognize in -ffast-math mode: 21// fpowi 22// Future integer operation idioms to recognize: 23// ctpop, ctlz, cttz 24// 25// Beware that isel's default lowering for ctpop is highly inefficient for 26// i64 and larger types when i64 is legal and the value has few bits set. It 27// would be good to enhance isel to emit a loop for ctpop in this case. 28// 29// We should enhance the memset/memcpy recognition to handle multiple stores in 30// the loop. This would handle things like: 31// void foo(_Complex float *P) 32// for (i) { __real__(*P) = 0; __imag__(*P) = 0; } 33// 34// This could recognize common matrix multiplies and dot product idioms and 35// replace them with calls to BLAS (if linked in??). 36// 37//===----------------------------------------------------------------------===// 38 39#define DEBUG_TYPE "loop-idiom" 40#include "llvm/Transforms/Scalar.h" 41#include "llvm/IntrinsicInst.h" 42#include "llvm/Module.h" 43#include "llvm/Analysis/AliasAnalysis.h" 44#include "llvm/Analysis/LoopPass.h" 45#include "llvm/Analysis/ScalarEvolutionExpressions.h" 46#include "llvm/Analysis/ScalarEvolutionExpander.h" 47#include "llvm/Analysis/ValueTracking.h" 48#include "llvm/Target/TargetData.h" 49#include "llvm/Target/TargetLibraryInfo.h" 50#include "llvm/Transforms/Utils/Local.h" 51#include "llvm/Support/Debug.h" 52#include "llvm/Support/IRBuilder.h" 53#include "llvm/Support/raw_ostream.h" 54#include "llvm/ADT/Statistic.h" 55using namespace llvm; 56 57STATISTIC(NumMemSet, "Number of memset's formed from loop stores"); 58STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores"); 59 60namespace { 61 class LoopIdiomRecognize : public LoopPass { 62 Loop *CurLoop; 63 const TargetData *TD; 64 DominatorTree *DT; 65 ScalarEvolution *SE; 66 TargetLibraryInfo *TLI; 67 public: 68 static char ID; 69 explicit LoopIdiomRecognize() : LoopPass(ID) { 70 initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry()); 71 } 72 73 bool runOnLoop(Loop *L, LPPassManager &LPM); 74 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount, 75 SmallVectorImpl<BasicBlock*> &ExitBlocks); 76 77 bool processLoopStore(StoreInst *SI, const SCEV *BECount); 78 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount); 79 80 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize, 81 unsigned StoreAlignment, 82 Value *SplatValue, Instruction *TheStore, 83 const SCEVAddRecExpr *Ev, 84 const SCEV *BECount); 85 bool processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize, 86 const SCEVAddRecExpr *StoreEv, 87 const SCEVAddRecExpr *LoadEv, 88 const SCEV *BECount); 89 90 /// This transformation requires natural loop information & requires that 91 /// loop preheaders be inserted into the CFG. 92 /// 93 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 94 AU.addRequired<LoopInfo>(); 95 AU.addPreserved<LoopInfo>(); 96 AU.addRequiredID(LoopSimplifyID); 97 AU.addPreservedID(LoopSimplifyID); 98 AU.addRequiredID(LCSSAID); 99 AU.addPreservedID(LCSSAID); 100 AU.addRequired<AliasAnalysis>(); 101 AU.addPreserved<AliasAnalysis>(); 102 AU.addRequired<ScalarEvolution>(); 103 AU.addPreserved<ScalarEvolution>(); 104 AU.addPreserved<DominatorTree>(); 105 AU.addRequired<DominatorTree>(); 106 AU.addRequired<TargetLibraryInfo>(); 107 } 108 }; 109} 110 111char LoopIdiomRecognize::ID = 0; 112INITIALIZE_PASS_BEGIN(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms", 113 false, false) 114INITIALIZE_PASS_DEPENDENCY(LoopInfo) 115INITIALIZE_PASS_DEPENDENCY(DominatorTree) 116INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 117INITIALIZE_PASS_DEPENDENCY(LCSSA) 118INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 119INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) 120INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 121INITIALIZE_PASS_END(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms", 122 false, false) 123 124Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognize(); } 125 126/// DeleteDeadInstruction - Delete this instruction. Before we do, go through 127/// and zero out all the operands of this instruction. If any of them become 128/// dead, delete them and the computation tree that feeds them. 129/// 130static void DeleteDeadInstruction(Instruction *I, ScalarEvolution &SE) { 131 SmallVector<Instruction*, 32> NowDeadInsts; 132 133 NowDeadInsts.push_back(I); 134 135 // Before we touch this instruction, remove it from SE! 136 do { 137 Instruction *DeadInst = NowDeadInsts.pop_back_val(); 138 139 // This instruction is dead, zap it, in stages. Start by removing it from 140 // SCEV. 141 SE.forgetValue(DeadInst); 142 143 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) { 144 Value *Op = DeadInst->getOperand(op); 145 DeadInst->setOperand(op, 0); 146 147 // If this operand just became dead, add it to the NowDeadInsts list. 148 if (!Op->use_empty()) continue; 149 150 if (Instruction *OpI = dyn_cast<Instruction>(Op)) 151 if (isInstructionTriviallyDead(OpI)) 152 NowDeadInsts.push_back(OpI); 153 } 154 155 DeadInst->eraseFromParent(); 156 157 } while (!NowDeadInsts.empty()); 158} 159 160bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) { 161 CurLoop = L; 162 163 // The trip count of the loop must be analyzable. 164 SE = &getAnalysis<ScalarEvolution>(); 165 if (!SE->hasLoopInvariantBackedgeTakenCount(L)) 166 return false; 167 const SCEV *BECount = SE->getBackedgeTakenCount(L); 168 if (isa<SCEVCouldNotCompute>(BECount)) return false; 169 170 // If this loop executes exactly one time, then it should be peeled, not 171 // optimized by this pass. 172 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 173 if (BECst->getValue()->getValue() == 0) 174 return false; 175 176 // We require target data for now. 177 TD = getAnalysisIfAvailable<TargetData>(); 178 if (TD == 0) return false; 179 180 DT = &getAnalysis<DominatorTree>(); 181 LoopInfo &LI = getAnalysis<LoopInfo>(); 182 TLI = &getAnalysis<TargetLibraryInfo>(); 183 184 SmallVector<BasicBlock*, 8> ExitBlocks; 185 CurLoop->getUniqueExitBlocks(ExitBlocks); 186 187 DEBUG(dbgs() << "loop-idiom Scanning: F[" 188 << L->getHeader()->getParent()->getName() 189 << "] Loop %" << L->getHeader()->getName() << "\n"); 190 191 bool MadeChange = false; 192 // Scan all the blocks in the loop that are not in subloops. 193 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E; 194 ++BI) { 195 // Ignore blocks in subloops. 196 if (LI.getLoopFor(*BI) != CurLoop) 197 continue; 198 199 MadeChange |= runOnLoopBlock(*BI, BECount, ExitBlocks); 200 } 201 return MadeChange; 202} 203 204/// runOnLoopBlock - Process the specified block, which lives in a counted loop 205/// with the specified backedge count. This block is known to be in the current 206/// loop and not in any subloops. 207bool LoopIdiomRecognize::runOnLoopBlock(BasicBlock *BB, const SCEV *BECount, 208 SmallVectorImpl<BasicBlock*> &ExitBlocks) { 209 // We can only promote stores in this block if they are unconditionally 210 // executed in the loop. For a block to be unconditionally executed, it has 211 // to dominate all the exit blocks of the loop. Verify this now. 212 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) 213 if (!DT->dominates(BB, ExitBlocks[i])) 214 return false; 215 216 bool MadeChange = false; 217 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) { 218 Instruction *Inst = I++; 219 // Look for store instructions, which may be optimized to memset/memcpy. 220 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 221 WeakVH InstPtr(I); 222 if (!processLoopStore(SI, BECount)) continue; 223 MadeChange = true; 224 225 // If processing the store invalidated our iterator, start over from the 226 // top of the block. 227 if (InstPtr == 0) 228 I = BB->begin(); 229 continue; 230 } 231 232 // Look for memset instructions, which may be optimized to a larger memset. 233 if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) { 234 WeakVH InstPtr(I); 235 if (!processLoopMemSet(MSI, BECount)) continue; 236 MadeChange = true; 237 238 // If processing the memset invalidated our iterator, start over from the 239 // top of the block. 240 if (InstPtr == 0) 241 I = BB->begin(); 242 continue; 243 } 244 } 245 246 return MadeChange; 247} 248 249 250/// processLoopStore - See if this store can be promoted to a memset or memcpy. 251bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) { 252 if (SI->isVolatile()) return false; 253 254 Value *StoredVal = SI->getValueOperand(); 255 Value *StorePtr = SI->getPointerOperand(); 256 257 // Reject stores that are so large that they overflow an unsigned. 258 uint64_t SizeInBits = TD->getTypeSizeInBits(StoredVal->getType()); 259 if ((SizeInBits & 7) || (SizeInBits >> 32) != 0) 260 return false; 261 262 // See if the pointer expression is an AddRec like {base,+,1} on the current 263 // loop, which indicates a strided store. If we have something else, it's a 264 // random store we can't handle. 265 const SCEVAddRecExpr *StoreEv = 266 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 267 if (StoreEv == 0 || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine()) 268 return false; 269 270 // Check to see if the stride matches the size of the store. If so, then we 271 // know that every byte is touched in the loop. 272 unsigned StoreSize = (unsigned)SizeInBits >> 3; 273 const SCEVConstant *Stride = dyn_cast<SCEVConstant>(StoreEv->getOperand(1)); 274 275 // TODO: Could also handle negative stride here someday, that will require the 276 // validity check in mayLoopAccessLocation to be updated though. 277 if (Stride == 0 || StoreSize != Stride->getValue()->getValue()) 278 return false; 279 280 // See if we can optimize just this store in isolation. 281 if (processLoopStridedStore(StorePtr, StoreSize, SI->getAlignment(), 282 StoredVal, SI, StoreEv, BECount)) 283 return true; 284 285 // If the stored value is a strided load in the same loop with the same stride 286 // this this may be transformable into a memcpy. This kicks in for stuff like 287 // for (i) A[i] = B[i]; 288 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { 289 const SCEVAddRecExpr *LoadEv = 290 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getOperand(0))); 291 if (LoadEv && LoadEv->getLoop() == CurLoop && LoadEv->isAffine() && 292 StoreEv->getOperand(1) == LoadEv->getOperand(1) && !LI->isVolatile()) 293 if (processLoopStoreOfLoopLoad(SI, StoreSize, StoreEv, LoadEv, BECount)) 294 return true; 295 } 296 //errs() << "UNHANDLED strided store: " << *StoreEv << " - " << *SI << "\n"; 297 298 return false; 299} 300 301/// processLoopMemSet - See if this memset can be promoted to a large memset. 302bool LoopIdiomRecognize:: 303processLoopMemSet(MemSetInst *MSI, const SCEV *BECount) { 304 // We can only handle non-volatile memsets with a constant size. 305 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength())) return false; 306 307 // If we're not allowed to hack on memset, we fail. 308 if (!TLI->has(LibFunc::memset)) 309 return false; 310 311 Value *Pointer = MSI->getDest(); 312 313 // See if the pointer expression is an AddRec like {base,+,1} on the current 314 // loop, which indicates a strided store. If we have something else, it's a 315 // random store we can't handle. 316 const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer)); 317 if (Ev == 0 || Ev->getLoop() != CurLoop || !Ev->isAffine()) 318 return false; 319 320 // Reject memsets that are so large that they overflow an unsigned. 321 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 322 if ((SizeInBytes >> 32) != 0) 323 return false; 324 325 // Check to see if the stride matches the size of the memset. If so, then we 326 // know that every byte is touched in the loop. 327 const SCEVConstant *Stride = dyn_cast<SCEVConstant>(Ev->getOperand(1)); 328 329 // TODO: Could also handle negative stride here someday, that will require the 330 // validity check in mayLoopAccessLocation to be updated though. 331 if (Stride == 0 || MSI->getLength() != Stride->getValue()) 332 return false; 333 334 return processLoopStridedStore(Pointer, (unsigned)SizeInBytes, 335 MSI->getAlignment(), MSI->getValue(), 336 MSI, Ev, BECount); 337} 338 339 340/// mayLoopAccessLocation - Return true if the specified loop might access the 341/// specified pointer location, which is a loop-strided access. The 'Access' 342/// argument specifies what the verboten forms of access are (read or write). 343static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access, 344 Loop *L, const SCEV *BECount, 345 unsigned StoreSize, AliasAnalysis &AA, 346 Instruction *IgnoredStore) { 347 // Get the location that may be stored across the loop. Since the access is 348 // strided positively through memory, we say that the modified location starts 349 // at the pointer and has infinite size. 350 uint64_t AccessSize = AliasAnalysis::UnknownSize; 351 352 // If the loop iterates a fixed number of times, we can refine the access size 353 // to be exactly the size of the memset, which is (BECount+1)*StoreSize 354 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 355 AccessSize = (BECst->getValue()->getZExtValue()+1)*StoreSize; 356 357 // TODO: For this to be really effective, we have to dive into the pointer 358 // operand in the store. Store to &A[i] of 100 will always return may alias 359 // with store of &A[100], we need to StoreLoc to be "A" with size of 100, 360 // which will then no-alias a store to &A[100]. 361 AliasAnalysis::Location StoreLoc(Ptr, AccessSize); 362 363 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E; 364 ++BI) 365 for (BasicBlock::iterator I = (*BI)->begin(), E = (*BI)->end(); I != E; ++I) 366 if (&*I != IgnoredStore && 367 (AA.getModRefInfo(I, StoreLoc) & Access)) 368 return true; 369 370 return false; 371} 372 373/// getMemSetPatternValue - If a strided store of the specified value is safe to 374/// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should 375/// be passed in. Otherwise, return null. 376/// 377/// Note that we don't ever attempt to use memset_pattern8 or 4, because these 378/// just replicate their input array and then pass on to memset_pattern16. 379static Constant *getMemSetPatternValue(Value *V, const TargetData &TD) { 380 // If the value isn't a constant, we can't promote it to being in a constant 381 // array. We could theoretically do a store to an alloca or something, but 382 // that doesn't seem worthwhile. 383 Constant *C = dyn_cast<Constant>(V); 384 if (C == 0) return 0; 385 386 // Only handle simple values that are a power of two bytes in size. 387 uint64_t Size = TD.getTypeSizeInBits(V->getType()); 388 if (Size == 0 || (Size & 7) || (Size & (Size-1))) 389 return 0; 390 391 // Don't care enough about darwin/ppc to implement this. 392 if (TD.isBigEndian()) 393 return 0; 394 395 // Convert to size in bytes. 396 Size /= 8; 397 398 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see 399 // if the top and bottom are the same (e.g. for vectors and large integers). 400 if (Size > 16) return 0; 401 402 // If the constant is exactly 16 bytes, just use it. 403 if (Size == 16) return C; 404 405 // Otherwise, we'll use an array of the constants. 406 unsigned ArraySize = 16/Size; 407 ArrayType *AT = ArrayType::get(V->getType(), ArraySize); 408 return ConstantArray::get(AT, std::vector<Constant*>(ArraySize, C)); 409} 410 411 412/// processLoopStridedStore - We see a strided store of some value. If we can 413/// transform this into a memset or memset_pattern in the loop preheader, do so. 414bool LoopIdiomRecognize:: 415processLoopStridedStore(Value *DestPtr, unsigned StoreSize, 416 unsigned StoreAlignment, Value *StoredVal, 417 Instruction *TheStore, const SCEVAddRecExpr *Ev, 418 const SCEV *BECount) { 419 420 // If the stored value is a byte-wise value (like i32 -1), then it may be 421 // turned into a memset of i8 -1, assuming that all the consecutive bytes 422 // are stored. A store of i32 0x01020304 can never be turned into a memset, 423 // but it can be turned into memset_pattern if the target supports it. 424 Value *SplatValue = isBytewiseValue(StoredVal); 425 Constant *PatternValue = 0; 426 427 // If we're allowed to form a memset, and the stored value would be acceptable 428 // for memset, use it. 429 if (SplatValue && TLI->has(LibFunc::memset) && 430 // Verify that the stored value is loop invariant. If not, we can't 431 // promote the memset. 432 CurLoop->isLoopInvariant(SplatValue)) { 433 // Keep and use SplatValue. 434 PatternValue = 0; 435 } else if (TLI->has(LibFunc::memset_pattern16) && 436 (PatternValue = getMemSetPatternValue(StoredVal, *TD))) { 437 // It looks like we can use PatternValue! 438 SplatValue = 0; 439 } else { 440 // Otherwise, this isn't an idiom we can transform. For example, we can't 441 // do anything with a 3-byte store, for example. 442 return false; 443 } 444 445 446 // Okay, we have a strided store "p[i]" of a splattable value. We can turn 447 // this into a memset in the loop preheader now if we want. However, this 448 // would be unsafe to do if there is anything else in the loop that may read 449 // or write to the aliased location. Check for an alias. 450 if (mayLoopAccessLocation(DestPtr, AliasAnalysis::ModRef, 451 CurLoop, BECount, 452 StoreSize, getAnalysis<AliasAnalysis>(), TheStore)) 453 return false; 454 455 // Okay, everything looks good, insert the memset. 456 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 457 458 IRBuilder<> Builder(Preheader->getTerminator()); 459 460 // The trip count of the loop and the base pointer of the addrec SCEV is 461 // guaranteed to be loop invariant, which means that it should dominate the 462 // header. Just insert code for it in the preheader. 463 SCEVExpander Expander(*SE); 464 465 unsigned AddrSpace = cast<PointerType>(DestPtr->getType())->getAddressSpace(); 466 Value *BasePtr = 467 Expander.expandCodeFor(Ev->getStart(), Builder.getInt8PtrTy(AddrSpace), 468 Preheader->getTerminator()); 469 470 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to 471 // pointer size if it isn't already. 472 const Type *IntPtr = TD->getIntPtrType(DestPtr->getContext()); 473 BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr); 474 475 const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1), 476 true /*no unsigned overflow*/); 477 if (StoreSize != 1) 478 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize), 479 true /*no unsigned overflow*/); 480 481 Value *NumBytes = 482 Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator()); 483 484 Value *NewCall; 485 if (SplatValue) 486 NewCall = Builder.CreateMemSet(BasePtr, SplatValue,NumBytes,StoreAlignment); 487 else { 488 Module *M = TheStore->getParent()->getParent()->getParent(); 489 Value *MSP = M->getOrInsertFunction("memset_pattern16", 490 Builder.getVoidTy(), 491 Builder.getInt8PtrTy(), 492 Builder.getInt8PtrTy(), IntPtr, 493 (void*)0); 494 495 // Otherwise we should form a memset_pattern16. PatternValue is known to be 496 // an constant array of 16-bytes. Plop the value into a mergable global. 497 GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true, 498 GlobalValue::InternalLinkage, 499 PatternValue, ".memset_pattern"); 500 GV->setUnnamedAddr(true); // Ok to merge these. 501 GV->setAlignment(16); 502 Value *PatternPtr = ConstantExpr::getBitCast(GV, Builder.getInt8PtrTy()); 503 NewCall = Builder.CreateCall3(MSP, BasePtr, PatternPtr, NumBytes); 504 } 505 506 DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n" 507 << " from store to: " << *Ev << " at: " << *TheStore << "\n"); 508 (void)NewCall; 509 510 // Okay, the memset has been formed. Zap the original store and anything that 511 // feeds into it. 512 DeleteDeadInstruction(TheStore, *SE); 513 ++NumMemSet; 514 return true; 515} 516 517/// processLoopStoreOfLoopLoad - We see a strided store whose value is a 518/// same-strided load. 519bool LoopIdiomRecognize:: 520processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize, 521 const SCEVAddRecExpr *StoreEv, 522 const SCEVAddRecExpr *LoadEv, 523 const SCEV *BECount) { 524 // If we're not allowed to form memcpy, we fail. 525 if (!TLI->has(LibFunc::memcpy)) 526 return false; 527 528 LoadInst *LI = cast<LoadInst>(SI->getValueOperand()); 529 530 // Okay, we have a strided store "p[i]" of a loaded value. We can turn 531 // this into a memcpy in the loop preheader now if we want. However, this 532 // would be unsafe to do if there is anything else in the loop that may read 533 // or write to the stored location (including the load feeding the stores). 534 // Check for an alias. 535 if (mayLoopAccessLocation(SI->getPointerOperand(), AliasAnalysis::ModRef, 536 CurLoop, BECount, StoreSize, 537 getAnalysis<AliasAnalysis>(), SI)) 538 return false; 539 540 // For a memcpy, we have to make sure that the input array is not being 541 // mutated by the loop. 542 if (mayLoopAccessLocation(LI->getPointerOperand(), AliasAnalysis::Mod, 543 CurLoop, BECount, StoreSize, 544 getAnalysis<AliasAnalysis>(), SI)) 545 return false; 546 547 // Okay, everything looks good, insert the memcpy. 548 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 549 550 IRBuilder<> Builder(Preheader->getTerminator()); 551 552 // The trip count of the loop and the base pointer of the addrec SCEV is 553 // guaranteed to be loop invariant, which means that it should dominate the 554 // header. Just insert code for it in the preheader. 555 SCEVExpander Expander(*SE); 556 557 Value *LoadBasePtr = 558 Expander.expandCodeFor(LoadEv->getStart(), 559 Builder.getInt8PtrTy(LI->getPointerAddressSpace()), 560 Preheader->getTerminator()); 561 Value *StoreBasePtr = 562 Expander.expandCodeFor(StoreEv->getStart(), 563 Builder.getInt8PtrTy(SI->getPointerAddressSpace()), 564 Preheader->getTerminator()); 565 566 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to 567 // pointer size if it isn't already. 568 const Type *IntPtr = TD->getIntPtrType(SI->getContext()); 569 BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr); 570 571 const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1), 572 true /*no unsigned overflow*/); 573 if (StoreSize != 1) 574 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize), 575 true /*no unsigned overflow*/); 576 577 Value *NumBytes = 578 Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator()); 579 580 Value *NewCall = 581 Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes, 582 std::min(SI->getAlignment(), LI->getAlignment())); 583 584 DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n" 585 << " from load ptr=" << *LoadEv << " at: " << *LI << "\n" 586 << " from store ptr=" << *StoreEv << " at: " << *SI << "\n"); 587 (void)NewCall; 588 589 // Okay, the memset has been formed. Zap the original store and anything that 590 // feeds into it. 591 DeleteDeadInstruction(SI, *SE); 592 ++NumMemCpy; 593 return true; 594} 595