ScalarEvolutionExpander.cpp revision 327952
1//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the implementation of the scalar evolution expander, 11// which is used to generate the code corresponding to a given scalar evolution 12// expression. 13// 14//===----------------------------------------------------------------------===// 15 16#include "llvm/Analysis/ScalarEvolutionExpander.h" 17#include "llvm/ADT/STLExtras.h" 18#include "llvm/ADT/SmallSet.h" 19#include "llvm/Analysis/InstructionSimplify.h" 20#include "llvm/Analysis/LoopInfo.h" 21#include "llvm/Analysis/TargetTransformInfo.h" 22#include "llvm/IR/DataLayout.h" 23#include "llvm/IR/Dominators.h" 24#include "llvm/IR/IntrinsicInst.h" 25#include "llvm/IR/LLVMContext.h" 26#include "llvm/IR/Module.h" 27#include "llvm/IR/PatternMatch.h" 28#include "llvm/Support/Debug.h" 29#include "llvm/Support/raw_ostream.h" 30 31using namespace llvm; 32using namespace PatternMatch; 33 34/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP, 35/// reusing an existing cast if a suitable one exists, moving an existing 36/// cast if a suitable one exists but isn't in the right place, or 37/// creating a new one. 38Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty, 39 Instruction::CastOps Op, 40 BasicBlock::iterator IP) { 41 // This function must be called with the builder having a valid insertion 42 // point. It doesn't need to be the actual IP where the uses of the returned 43 // cast will be added, but it must dominate such IP. 44 // We use this precondition to produce a cast that will dominate all its 45 // uses. In particular, this is crucial for the case where the builder's 46 // insertion point *is* the point where we were asked to put the cast. 47 // Since we don't know the builder's insertion point is actually 48 // where the uses will be added (only that it dominates it), we are 49 // not allowed to move it. 50 BasicBlock::iterator BIP = Builder.GetInsertPoint(); 51 52 Instruction *Ret = nullptr; 53 54 // Check to see if there is already a cast! 55 for (User *U : V->users()) 56 if (U->getType() == Ty) 57 if (CastInst *CI = dyn_cast<CastInst>(U)) 58 if (CI->getOpcode() == Op) { 59 // If the cast isn't where we want it, create a new cast at IP. 60 // Likewise, do not reuse a cast at BIP because it must dominate 61 // instructions that might be inserted before BIP. 62 if (BasicBlock::iterator(CI) != IP || BIP == IP) { 63 // Create a new cast, and leave the old cast in place in case 64 // it is being used as an insert point. Clear its operand 65 // so that it doesn't hold anything live. 66 Ret = CastInst::Create(Op, V, Ty, "", &*IP); 67 Ret->takeName(CI); 68 CI->replaceAllUsesWith(Ret); 69 CI->setOperand(0, UndefValue::get(V->getType())); 70 break; 71 } 72 Ret = CI; 73 break; 74 } 75 76 // Create a new cast. 77 if (!Ret) 78 Ret = CastInst::Create(Op, V, Ty, V->getName(), &*IP); 79 80 // We assert at the end of the function since IP might point to an 81 // instruction with different dominance properties than a cast 82 // (an invoke for example) and not dominate BIP (but the cast does). 83 assert(SE.DT.dominates(Ret, &*BIP)); 84 85 rememberInstruction(Ret); 86 return Ret; 87} 88 89static BasicBlock::iterator findInsertPointAfter(Instruction *I, 90 BasicBlock *MustDominate) { 91 BasicBlock::iterator IP = ++I->getIterator(); 92 if (auto *II = dyn_cast<InvokeInst>(I)) 93 IP = II->getNormalDest()->begin(); 94 95 while (isa<PHINode>(IP)) 96 ++IP; 97 98 if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) { 99 ++IP; 100 } else if (isa<CatchSwitchInst>(IP)) { 101 IP = MustDominate->getFirstInsertionPt(); 102 } else { 103 assert(!IP->isEHPad() && "unexpected eh pad!"); 104 } 105 106 return IP; 107} 108 109/// InsertNoopCastOfTo - Insert a cast of V to the specified type, 110/// which must be possible with a noop cast, doing what we can to share 111/// the casts. 112Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) { 113 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false); 114 assert((Op == Instruction::BitCast || 115 Op == Instruction::PtrToInt || 116 Op == Instruction::IntToPtr) && 117 "InsertNoopCastOfTo cannot perform non-noop casts!"); 118 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) && 119 "InsertNoopCastOfTo cannot change sizes!"); 120 121 // Short-circuit unnecessary bitcasts. 122 if (Op == Instruction::BitCast) { 123 if (V->getType() == Ty) 124 return V; 125 if (CastInst *CI = dyn_cast<CastInst>(V)) { 126 if (CI->getOperand(0)->getType() == Ty) 127 return CI->getOperand(0); 128 } 129 } 130 // Short-circuit unnecessary inttoptr<->ptrtoint casts. 131 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) && 132 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { 133 if (CastInst *CI = dyn_cast<CastInst>(V)) 134 if ((CI->getOpcode() == Instruction::PtrToInt || 135 CI->getOpcode() == Instruction::IntToPtr) && 136 SE.getTypeSizeInBits(CI->getType()) == 137 SE.getTypeSizeInBits(CI->getOperand(0)->getType())) 138 return CI->getOperand(0); 139 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 140 if ((CE->getOpcode() == Instruction::PtrToInt || 141 CE->getOpcode() == Instruction::IntToPtr) && 142 SE.getTypeSizeInBits(CE->getType()) == 143 SE.getTypeSizeInBits(CE->getOperand(0)->getType())) 144 return CE->getOperand(0); 145 } 146 147 // Fold a cast of a constant. 148 if (Constant *C = dyn_cast<Constant>(V)) 149 return ConstantExpr::getCast(Op, C, Ty); 150 151 // Cast the argument at the beginning of the entry block, after 152 // any bitcasts of other arguments. 153 if (Argument *A = dyn_cast<Argument>(V)) { 154 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin(); 155 while ((isa<BitCastInst>(IP) && 156 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) && 157 cast<BitCastInst>(IP)->getOperand(0) != A) || 158 isa<DbgInfoIntrinsic>(IP)) 159 ++IP; 160 return ReuseOrCreateCast(A, Ty, Op, IP); 161 } 162 163 // Cast the instruction immediately after the instruction. 164 Instruction *I = cast<Instruction>(V); 165 BasicBlock::iterator IP = findInsertPointAfter(I, Builder.GetInsertBlock()); 166 return ReuseOrCreateCast(I, Ty, Op, IP); 167} 168 169/// InsertBinop - Insert the specified binary operator, doing a small amount 170/// of work to avoid inserting an obviously redundant operation. 171Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, 172 Value *LHS, Value *RHS) { 173 // Fold a binop with constant operands. 174 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 175 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 176 return ConstantExpr::get(Opcode, CLHS, CRHS); 177 178 // Do a quick scan to see if we have this binop nearby. If so, reuse it. 179 unsigned ScanLimit = 6; 180 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 181 // Scanning starts from the last instruction before the insertion point. 182 BasicBlock::iterator IP = Builder.GetInsertPoint(); 183 if (IP != BlockBegin) { 184 --IP; 185 for (; ScanLimit; --IP, --ScanLimit) { 186 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 187 // generated code. 188 if (isa<DbgInfoIntrinsic>(IP)) 189 ScanLimit++; 190 191 // Conservatively, do not use any instruction which has any of wrap/exact 192 // flags installed. 193 // TODO: Instead of simply disable poison instructions we can be clever 194 // here and match SCEV to this instruction. 195 auto canGeneratePoison = [](Instruction *I) { 196 if (isa<OverflowingBinaryOperator>(I) && 197 (I->hasNoSignedWrap() || I->hasNoUnsignedWrap())) 198 return true; 199 if (isa<PossiblyExactOperator>(I) && I->isExact()) 200 return true; 201 return false; 202 }; 203 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && 204 IP->getOperand(1) == RHS && !canGeneratePoison(&*IP)) 205 return &*IP; 206 if (IP == BlockBegin) break; 207 } 208 } 209 210 // Save the original insertion point so we can restore it when we're done. 211 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc(); 212 SCEVInsertPointGuard Guard(Builder, this); 213 214 // Move the insertion point out of as many loops as we can. 215 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) { 216 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break; 217 BasicBlock *Preheader = L->getLoopPreheader(); 218 if (!Preheader) break; 219 220 // Ok, move up a level. 221 Builder.SetInsertPoint(Preheader->getTerminator()); 222 } 223 224 // If we haven't found this binop, insert it. 225 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS)); 226 BO->setDebugLoc(Loc); 227 rememberInstruction(BO); 228 229 return BO; 230} 231 232/// FactorOutConstant - Test if S is divisible by Factor, using signed 233/// division. If so, update S with Factor divided out and return true. 234/// S need not be evenly divisible if a reasonable remainder can be 235/// computed. 236/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made 237/// unnecessary; in its place, just signed-divide Ops[i] by the scale and 238/// check to see if the divide was folded. 239static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder, 240 const SCEV *Factor, ScalarEvolution &SE, 241 const DataLayout &DL) { 242 // Everything is divisible by one. 243 if (Factor->isOne()) 244 return true; 245 246 // x/x == 1. 247 if (S == Factor) { 248 S = SE.getConstant(S->getType(), 1); 249 return true; 250 } 251 252 // For a Constant, check for a multiple of the given factor. 253 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 254 // 0/x == 0. 255 if (C->isZero()) 256 return true; 257 // Check for divisibility. 258 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) { 259 ConstantInt *CI = 260 ConstantInt::get(SE.getContext(), C->getAPInt().sdiv(FC->getAPInt())); 261 // If the quotient is zero and the remainder is non-zero, reject 262 // the value at this scale. It will be considered for subsequent 263 // smaller scales. 264 if (!CI->isZero()) { 265 const SCEV *Div = SE.getConstant(CI); 266 S = Div; 267 Remainder = SE.getAddExpr( 268 Remainder, SE.getConstant(C->getAPInt().srem(FC->getAPInt()))); 269 return true; 270 } 271 } 272 } 273 274 // In a Mul, check if there is a constant operand which is a multiple 275 // of the given factor. 276 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 277 // Size is known, check if there is a constant operand which is a multiple 278 // of the given factor. If so, we can factor it. 279 const SCEVConstant *FC = cast<SCEVConstant>(Factor); 280 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0))) 281 if (!C->getAPInt().srem(FC->getAPInt())) { 282 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 283 NewMulOps[0] = SE.getConstant(C->getAPInt().sdiv(FC->getAPInt())); 284 S = SE.getMulExpr(NewMulOps); 285 return true; 286 } 287 } 288 289 // In an AddRec, check if both start and step are divisible. 290 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 291 const SCEV *Step = A->getStepRecurrence(SE); 292 const SCEV *StepRem = SE.getConstant(Step->getType(), 0); 293 if (!FactorOutConstant(Step, StepRem, Factor, SE, DL)) 294 return false; 295 if (!StepRem->isZero()) 296 return false; 297 const SCEV *Start = A->getStart(); 298 if (!FactorOutConstant(Start, Remainder, Factor, SE, DL)) 299 return false; 300 S = SE.getAddRecExpr(Start, Step, A->getLoop(), 301 A->getNoWrapFlags(SCEV::FlagNW)); 302 return true; 303 } 304 305 return false; 306} 307 308/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs 309/// is the number of SCEVAddRecExprs present, which are kept at the end of 310/// the list. 311/// 312static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops, 313 Type *Ty, 314 ScalarEvolution &SE) { 315 unsigned NumAddRecs = 0; 316 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i) 317 ++NumAddRecs; 318 // Group Ops into non-addrecs and addrecs. 319 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs); 320 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end()); 321 // Let ScalarEvolution sort and simplify the non-addrecs list. 322 const SCEV *Sum = NoAddRecs.empty() ? 323 SE.getConstant(Ty, 0) : 324 SE.getAddExpr(NoAddRecs); 325 // If it returned an add, use the operands. Otherwise it simplified 326 // the sum into a single value, so just use that. 327 Ops.clear(); 328 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum)) 329 Ops.append(Add->op_begin(), Add->op_end()); 330 else if (!Sum->isZero()) 331 Ops.push_back(Sum); 332 // Then append the addrecs. 333 Ops.append(AddRecs.begin(), AddRecs.end()); 334} 335 336/// SplitAddRecs - Flatten a list of add operands, moving addrec start values 337/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}. 338/// This helps expose more opportunities for folding parts of the expressions 339/// into GEP indices. 340/// 341static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops, 342 Type *Ty, 343 ScalarEvolution &SE) { 344 // Find the addrecs. 345 SmallVector<const SCEV *, 8> AddRecs; 346 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 347 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) { 348 const SCEV *Start = A->getStart(); 349 if (Start->isZero()) break; 350 const SCEV *Zero = SE.getConstant(Ty, 0); 351 AddRecs.push_back(SE.getAddRecExpr(Zero, 352 A->getStepRecurrence(SE), 353 A->getLoop(), 354 A->getNoWrapFlags(SCEV::FlagNW))); 355 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) { 356 Ops[i] = Zero; 357 Ops.append(Add->op_begin(), Add->op_end()); 358 e += Add->getNumOperands(); 359 } else { 360 Ops[i] = Start; 361 } 362 } 363 if (!AddRecs.empty()) { 364 // Add the addrecs onto the end of the list. 365 Ops.append(AddRecs.begin(), AddRecs.end()); 366 // Resort the operand list, moving any constants to the front. 367 SimplifyAddOperands(Ops, Ty, SE); 368 } 369} 370 371/// expandAddToGEP - Expand an addition expression with a pointer type into 372/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps 373/// BasicAliasAnalysis and other passes analyze the result. See the rules 374/// for getelementptr vs. inttoptr in 375/// http://llvm.org/docs/LangRef.html#pointeraliasing 376/// for details. 377/// 378/// Design note: The correctness of using getelementptr here depends on 379/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as 380/// they may introduce pointer arithmetic which may not be safely converted 381/// into getelementptr. 382/// 383/// Design note: It might seem desirable for this function to be more 384/// loop-aware. If some of the indices are loop-invariant while others 385/// aren't, it might seem desirable to emit multiple GEPs, keeping the 386/// loop-invariant portions of the overall computation outside the loop. 387/// However, there are a few reasons this is not done here. Hoisting simple 388/// arithmetic is a low-level optimization that often isn't very 389/// important until late in the optimization process. In fact, passes 390/// like InstructionCombining will combine GEPs, even if it means 391/// pushing loop-invariant computation down into loops, so even if the 392/// GEPs were split here, the work would quickly be undone. The 393/// LoopStrengthReduction pass, which is usually run quite late (and 394/// after the last InstructionCombining pass), takes care of hoisting 395/// loop-invariant portions of expressions, after considering what 396/// can be folded using target addressing modes. 397/// 398Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, 399 const SCEV *const *op_end, 400 PointerType *PTy, 401 Type *Ty, 402 Value *V) { 403 Type *OriginalElTy = PTy->getElementType(); 404 Type *ElTy = OriginalElTy; 405 SmallVector<Value *, 4> GepIndices; 406 SmallVector<const SCEV *, 8> Ops(op_begin, op_end); 407 bool AnyNonZeroIndices = false; 408 409 // Split AddRecs up into parts as either of the parts may be usable 410 // without the other. 411 SplitAddRecs(Ops, Ty, SE); 412 413 Type *IntPtrTy = DL.getIntPtrType(PTy); 414 415 // Descend down the pointer's type and attempt to convert the other 416 // operands into GEP indices, at each level. The first index in a GEP 417 // indexes into the array implied by the pointer operand; the rest of 418 // the indices index into the element or field type selected by the 419 // preceding index. 420 for (;;) { 421 // If the scale size is not 0, attempt to factor out a scale for 422 // array indexing. 423 SmallVector<const SCEV *, 8> ScaledOps; 424 if (ElTy->isSized()) { 425 const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy); 426 if (!ElSize->isZero()) { 427 SmallVector<const SCEV *, 8> NewOps; 428 for (const SCEV *Op : Ops) { 429 const SCEV *Remainder = SE.getConstant(Ty, 0); 430 if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) { 431 // Op now has ElSize factored out. 432 ScaledOps.push_back(Op); 433 if (!Remainder->isZero()) 434 NewOps.push_back(Remainder); 435 AnyNonZeroIndices = true; 436 } else { 437 // The operand was not divisible, so add it to the list of operands 438 // we'll scan next iteration. 439 NewOps.push_back(Op); 440 } 441 } 442 // If we made any changes, update Ops. 443 if (!ScaledOps.empty()) { 444 Ops = NewOps; 445 SimplifyAddOperands(Ops, Ty, SE); 446 } 447 } 448 } 449 450 // Record the scaled array index for this level of the type. If 451 // we didn't find any operands that could be factored, tentatively 452 // assume that element zero was selected (since the zero offset 453 // would obviously be folded away). 454 Value *Scaled = ScaledOps.empty() ? 455 Constant::getNullValue(Ty) : 456 expandCodeFor(SE.getAddExpr(ScaledOps), Ty); 457 GepIndices.push_back(Scaled); 458 459 // Collect struct field index operands. 460 while (StructType *STy = dyn_cast<StructType>(ElTy)) { 461 bool FoundFieldNo = false; 462 // An empty struct has no fields. 463 if (STy->getNumElements() == 0) break; 464 // Field offsets are known. See if a constant offset falls within any of 465 // the struct fields. 466 if (Ops.empty()) 467 break; 468 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) 469 if (SE.getTypeSizeInBits(C->getType()) <= 64) { 470 const StructLayout &SL = *DL.getStructLayout(STy); 471 uint64_t FullOffset = C->getValue()->getZExtValue(); 472 if (FullOffset < SL.getSizeInBytes()) { 473 unsigned ElIdx = SL.getElementContainingOffset(FullOffset); 474 GepIndices.push_back( 475 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx)); 476 ElTy = STy->getTypeAtIndex(ElIdx); 477 Ops[0] = 478 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx)); 479 AnyNonZeroIndices = true; 480 FoundFieldNo = true; 481 } 482 } 483 // If no struct field offsets were found, tentatively assume that 484 // field zero was selected (since the zero offset would obviously 485 // be folded away). 486 if (!FoundFieldNo) { 487 ElTy = STy->getTypeAtIndex(0u); 488 GepIndices.push_back( 489 Constant::getNullValue(Type::getInt32Ty(Ty->getContext()))); 490 } 491 } 492 493 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) 494 ElTy = ATy->getElementType(); 495 else 496 break; 497 } 498 499 // If none of the operands were convertible to proper GEP indices, cast 500 // the base to i8* and do an ugly getelementptr with that. It's still 501 // better than ptrtoint+arithmetic+inttoptr at least. 502 if (!AnyNonZeroIndices) { 503 // Cast the base to i8*. 504 V = InsertNoopCastOfTo(V, 505 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace())); 506 507 assert(!isa<Instruction>(V) || 508 SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint())); 509 510 // Expand the operands for a plain byte offset. 511 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); 512 513 // Fold a GEP with constant operands. 514 if (Constant *CLHS = dyn_cast<Constant>(V)) 515 if (Constant *CRHS = dyn_cast<Constant>(Idx)) 516 return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()), 517 CLHS, CRHS); 518 519 // Do a quick scan to see if we have this GEP nearby. If so, reuse it. 520 unsigned ScanLimit = 6; 521 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 522 // Scanning starts from the last instruction before the insertion point. 523 BasicBlock::iterator IP = Builder.GetInsertPoint(); 524 if (IP != BlockBegin) { 525 --IP; 526 for (; ScanLimit; --IP, --ScanLimit) { 527 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 528 // generated code. 529 if (isa<DbgInfoIntrinsic>(IP)) 530 ScanLimit++; 531 if (IP->getOpcode() == Instruction::GetElementPtr && 532 IP->getOperand(0) == V && IP->getOperand(1) == Idx) 533 return &*IP; 534 if (IP == BlockBegin) break; 535 } 536 } 537 538 // Save the original insertion point so we can restore it when we're done. 539 SCEVInsertPointGuard Guard(Builder, this); 540 541 // Move the insertion point out of as many loops as we can. 542 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) { 543 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break; 544 BasicBlock *Preheader = L->getLoopPreheader(); 545 if (!Preheader) break; 546 547 // Ok, move up a level. 548 Builder.SetInsertPoint(Preheader->getTerminator()); 549 } 550 551 // Emit a GEP. 552 Value *GEP = Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep"); 553 rememberInstruction(GEP); 554 555 return GEP; 556 } 557 558 { 559 SCEVInsertPointGuard Guard(Builder, this); 560 561 // Move the insertion point out of as many loops as we can. 562 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) { 563 if (!L->isLoopInvariant(V)) break; 564 565 bool AnyIndexNotLoopInvariant = any_of( 566 GepIndices, [L](Value *Op) { return !L->isLoopInvariant(Op); }); 567 568 if (AnyIndexNotLoopInvariant) 569 break; 570 571 BasicBlock *Preheader = L->getLoopPreheader(); 572 if (!Preheader) break; 573 574 // Ok, move up a level. 575 Builder.SetInsertPoint(Preheader->getTerminator()); 576 } 577 578 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds, 579 // because ScalarEvolution may have changed the address arithmetic to 580 // compute a value which is beyond the end of the allocated object. 581 Value *Casted = V; 582 if (V->getType() != PTy) 583 Casted = InsertNoopCastOfTo(Casted, PTy); 584 Value *GEP = Builder.CreateGEP(OriginalElTy, Casted, GepIndices, "scevgep"); 585 Ops.push_back(SE.getUnknown(GEP)); 586 rememberInstruction(GEP); 587 } 588 589 return expand(SE.getAddExpr(Ops)); 590} 591 592/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for 593/// SCEV expansion. If they are nested, this is the most nested. If they are 594/// neighboring, pick the later. 595static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B, 596 DominatorTree &DT) { 597 if (!A) return B; 598 if (!B) return A; 599 if (A->contains(B)) return B; 600 if (B->contains(A)) return A; 601 if (DT.dominates(A->getHeader(), B->getHeader())) return B; 602 if (DT.dominates(B->getHeader(), A->getHeader())) return A; 603 return A; // Arbitrarily break the tie. 604} 605 606/// getRelevantLoop - Get the most relevant loop associated with the given 607/// expression, according to PickMostRelevantLoop. 608const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) { 609 // Test whether we've already computed the most relevant loop for this SCEV. 610 auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr)); 611 if (!Pair.second) 612 return Pair.first->second; 613 614 if (isa<SCEVConstant>(S)) 615 // A constant has no relevant loops. 616 return nullptr; 617 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 618 if (const Instruction *I = dyn_cast<Instruction>(U->getValue())) 619 return Pair.first->second = SE.LI.getLoopFor(I->getParent()); 620 // A non-instruction has no relevant loops. 621 return nullptr; 622 } 623 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) { 624 const Loop *L = nullptr; 625 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 626 L = AR->getLoop(); 627 for (const SCEV *Op : N->operands()) 628 L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT); 629 return RelevantLoops[N] = L; 630 } 631 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) { 632 const Loop *Result = getRelevantLoop(C->getOperand()); 633 return RelevantLoops[C] = Result; 634 } 635 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 636 const Loop *Result = PickMostRelevantLoop( 637 getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT); 638 return RelevantLoops[D] = Result; 639 } 640 llvm_unreachable("Unexpected SCEV type!"); 641} 642 643namespace { 644 645/// LoopCompare - Compare loops by PickMostRelevantLoop. 646class LoopCompare { 647 DominatorTree &DT; 648public: 649 explicit LoopCompare(DominatorTree &dt) : DT(dt) {} 650 651 bool operator()(std::pair<const Loop *, const SCEV *> LHS, 652 std::pair<const Loop *, const SCEV *> RHS) const { 653 // Keep pointer operands sorted at the end. 654 if (LHS.second->getType()->isPointerTy() != 655 RHS.second->getType()->isPointerTy()) 656 return LHS.second->getType()->isPointerTy(); 657 658 // Compare loops with PickMostRelevantLoop. 659 if (LHS.first != RHS.first) 660 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first; 661 662 // If one operand is a non-constant negative and the other is not, 663 // put the non-constant negative on the right so that a sub can 664 // be used instead of a negate and add. 665 if (LHS.second->isNonConstantNegative()) { 666 if (!RHS.second->isNonConstantNegative()) 667 return false; 668 } else if (RHS.second->isNonConstantNegative()) 669 return true; 670 671 // Otherwise they are equivalent according to this comparison. 672 return false; 673 } 674}; 675 676} 677 678Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { 679 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 680 681 // Collect all the add operands in a loop, along with their associated loops. 682 // Iterate in reverse so that constants are emitted last, all else equal, and 683 // so that pointer operands are inserted first, which the code below relies on 684 // to form more involved GEPs. 685 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 686 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()), 687 E(S->op_begin()); I != E; ++I) 688 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 689 690 // Sort by loop. Use a stable sort so that constants follow non-constants and 691 // pointer operands precede non-pointer operands. 692 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT)); 693 694 // Emit instructions to add all the operands. Hoist as much as possible 695 // out of loops, and form meaningful getelementptrs where possible. 696 Value *Sum = nullptr; 697 for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) { 698 const Loop *CurLoop = I->first; 699 const SCEV *Op = I->second; 700 if (!Sum) { 701 // This is the first operand. Just expand it. 702 Sum = expand(Op); 703 ++I; 704 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) { 705 // The running sum expression is a pointer. Try to form a getelementptr 706 // at this level with that as the base. 707 SmallVector<const SCEV *, 4> NewOps; 708 for (; I != E && I->first == CurLoop; ++I) { 709 // If the operand is SCEVUnknown and not instructions, peek through 710 // it, to enable more of it to be folded into the GEP. 711 const SCEV *X = I->second; 712 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X)) 713 if (!isa<Instruction>(U->getValue())) 714 X = SE.getSCEV(U->getValue()); 715 NewOps.push_back(X); 716 } 717 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum); 718 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) { 719 // The running sum is an integer, and there's a pointer at this level. 720 // Try to form a getelementptr. If the running sum is instructions, 721 // use a SCEVUnknown to avoid re-analyzing them. 722 SmallVector<const SCEV *, 4> NewOps; 723 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) : 724 SE.getSCEV(Sum)); 725 for (++I; I != E && I->first == CurLoop; ++I) 726 NewOps.push_back(I->second); 727 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op)); 728 } else if (Op->isNonConstantNegative()) { 729 // Instead of doing a negate and add, just do a subtract. 730 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty); 731 Sum = InsertNoopCastOfTo(Sum, Ty); 732 Sum = InsertBinop(Instruction::Sub, Sum, W); 733 ++I; 734 } else { 735 // A simple add. 736 Value *W = expandCodeFor(Op, Ty); 737 Sum = InsertNoopCastOfTo(Sum, Ty); 738 // Canonicalize a constant to the RHS. 739 if (isa<Constant>(Sum)) std::swap(Sum, W); 740 Sum = InsertBinop(Instruction::Add, Sum, W); 741 ++I; 742 } 743 } 744 745 return Sum; 746} 747 748Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { 749 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 750 751 // Collect all the mul operands in a loop, along with their associated loops. 752 // Iterate in reverse so that constants are emitted last, all else equal. 753 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 754 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()), 755 E(S->op_begin()); I != E; ++I) 756 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 757 758 // Sort by loop. Use a stable sort so that constants follow non-constants. 759 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT)); 760 761 // Emit instructions to mul all the operands. Hoist as much as possible 762 // out of loops. 763 Value *Prod = nullptr; 764 auto I = OpsAndLoops.begin(); 765 766 // Expand the calculation of X pow N in the following manner: 767 // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then: 768 // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK). 769 const auto ExpandOpBinPowN = [this, &I, &OpsAndLoops, &Ty]() { 770 auto E = I; 771 // Calculate how many times the same operand from the same loop is included 772 // into this power. 773 uint64_t Exponent = 0; 774 const uint64_t MaxExponent = UINT64_MAX >> 1; 775 // No one sane will ever try to calculate such huge exponents, but if we 776 // need this, we stop on UINT64_MAX / 2 because we need to exit the loop 777 // below when the power of 2 exceeds our Exponent, and we want it to be 778 // 1u << 31 at most to not deal with unsigned overflow. 779 while (E != OpsAndLoops.end() && *I == *E && Exponent != MaxExponent) { 780 ++Exponent; 781 ++E; 782 } 783 assert(Exponent > 0 && "Trying to calculate a zeroth exponent of operand?"); 784 785 // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them 786 // that are needed into the result. 787 Value *P = expandCodeFor(I->second, Ty); 788 Value *Result = nullptr; 789 if (Exponent & 1) 790 Result = P; 791 for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) { 792 P = InsertBinop(Instruction::Mul, P, P); 793 if (Exponent & BinExp) 794 Result = Result ? InsertBinop(Instruction::Mul, Result, P) : P; 795 } 796 797 I = E; 798 assert(Result && "Nothing was expanded?"); 799 return Result; 800 }; 801 802 while (I != OpsAndLoops.end()) { 803 if (!Prod) { 804 // This is the first operand. Just expand it. 805 Prod = ExpandOpBinPowN(); 806 } else if (I->second->isAllOnesValue()) { 807 // Instead of doing a multiply by negative one, just do a negate. 808 Prod = InsertNoopCastOfTo(Prod, Ty); 809 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod); 810 ++I; 811 } else { 812 // A simple mul. 813 Value *W = ExpandOpBinPowN(); 814 Prod = InsertNoopCastOfTo(Prod, Ty); 815 // Canonicalize a constant to the RHS. 816 if (isa<Constant>(Prod)) std::swap(Prod, W); 817 const APInt *RHS; 818 if (match(W, m_Power2(RHS))) { 819 // Canonicalize Prod*(1<<C) to Prod<<C. 820 assert(!Ty->isVectorTy() && "vector types are not SCEVable"); 821 Prod = InsertBinop(Instruction::Shl, Prod, 822 ConstantInt::get(Ty, RHS->logBase2())); 823 } else { 824 Prod = InsertBinop(Instruction::Mul, Prod, W); 825 } 826 } 827 } 828 829 return Prod; 830} 831 832Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { 833 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 834 835 Value *LHS = expandCodeFor(S->getLHS(), Ty); 836 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) { 837 const APInt &RHS = SC->getAPInt(); 838 if (RHS.isPowerOf2()) 839 return InsertBinop(Instruction::LShr, LHS, 840 ConstantInt::get(Ty, RHS.logBase2())); 841 } 842 843 Value *RHS = expandCodeFor(S->getRHS(), Ty); 844 return InsertBinop(Instruction::UDiv, LHS, RHS); 845} 846 847/// Move parts of Base into Rest to leave Base with the minimal 848/// expression that provides a pointer operand suitable for a 849/// GEP expansion. 850static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, 851 ScalarEvolution &SE) { 852 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) { 853 Base = A->getStart(); 854 Rest = SE.getAddExpr(Rest, 855 SE.getAddRecExpr(SE.getConstant(A->getType(), 0), 856 A->getStepRecurrence(SE), 857 A->getLoop(), 858 A->getNoWrapFlags(SCEV::FlagNW))); 859 } 860 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) { 861 Base = A->getOperand(A->getNumOperands()-1); 862 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end()); 863 NewAddOps.back() = Rest; 864 Rest = SE.getAddExpr(NewAddOps); 865 ExposePointerBase(Base, Rest, SE); 866 } 867} 868 869/// Determine if this is a well-behaved chain of instructions leading back to 870/// the PHI. If so, it may be reused by expanded expressions. 871bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, 872 const Loop *L) { 873 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) || 874 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV))) 875 return false; 876 // If any of the operands don't dominate the insert position, bail. 877 // Addrec operands are always loop-invariant, so this can only happen 878 // if there are instructions which haven't been hoisted. 879 if (L == IVIncInsertLoop) { 880 for (User::op_iterator OI = IncV->op_begin()+1, 881 OE = IncV->op_end(); OI != OE; ++OI) 882 if (Instruction *OInst = dyn_cast<Instruction>(OI)) 883 if (!SE.DT.dominates(OInst, IVIncInsertPos)) 884 return false; 885 } 886 // Advance to the next instruction. 887 IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 888 if (!IncV) 889 return false; 890 891 if (IncV->mayHaveSideEffects()) 892 return false; 893 894 if (IncV == PN) 895 return true; 896 897 return isNormalAddRecExprPHI(PN, IncV, L); 898} 899 900/// getIVIncOperand returns an induction variable increment's induction 901/// variable operand. 902/// 903/// If allowScale is set, any type of GEP is allowed as long as the nonIV 904/// operands dominate InsertPos. 905/// 906/// If allowScale is not set, ensure that a GEP increment conforms to one of the 907/// simple patterns generated by getAddRecExprPHILiterally and 908/// expandAddtoGEP. If the pattern isn't recognized, return NULL. 909Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV, 910 Instruction *InsertPos, 911 bool allowScale) { 912 if (IncV == InsertPos) 913 return nullptr; 914 915 switch (IncV->getOpcode()) { 916 default: 917 return nullptr; 918 // Check for a simple Add/Sub or GEP of a loop invariant step. 919 case Instruction::Add: 920 case Instruction::Sub: { 921 Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1)); 922 if (!OInst || SE.DT.dominates(OInst, InsertPos)) 923 return dyn_cast<Instruction>(IncV->getOperand(0)); 924 return nullptr; 925 } 926 case Instruction::BitCast: 927 return dyn_cast<Instruction>(IncV->getOperand(0)); 928 case Instruction::GetElementPtr: 929 for (auto I = IncV->op_begin() + 1, E = IncV->op_end(); I != E; ++I) { 930 if (isa<Constant>(*I)) 931 continue; 932 if (Instruction *OInst = dyn_cast<Instruction>(*I)) { 933 if (!SE.DT.dominates(OInst, InsertPos)) 934 return nullptr; 935 } 936 if (allowScale) { 937 // allow any kind of GEP as long as it can be hoisted. 938 continue; 939 } 940 // This must be a pointer addition of constants (pretty), which is already 941 // handled, or some number of address-size elements (ugly). Ugly geps 942 // have 2 operands. i1* is used by the expander to represent an 943 // address-size element. 944 if (IncV->getNumOperands() != 2) 945 return nullptr; 946 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace(); 947 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS) 948 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS)) 949 return nullptr; 950 break; 951 } 952 return dyn_cast<Instruction>(IncV->getOperand(0)); 953 } 954} 955 956/// If the insert point of the current builder or any of the builders on the 957/// stack of saved builders has 'I' as its insert point, update it to point to 958/// the instruction after 'I'. This is intended to be used when the instruction 959/// 'I' is being moved. If this fixup is not done and 'I' is moved to a 960/// different block, the inconsistent insert point (with a mismatched 961/// Instruction and Block) can lead to an instruction being inserted in a block 962/// other than its parent. 963void SCEVExpander::fixupInsertPoints(Instruction *I) { 964 BasicBlock::iterator It(*I); 965 BasicBlock::iterator NewInsertPt = std::next(It); 966 if (Builder.GetInsertPoint() == It) 967 Builder.SetInsertPoint(&*NewInsertPt); 968 for (auto *InsertPtGuard : InsertPointGuards) 969 if (InsertPtGuard->GetInsertPoint() == It) 970 InsertPtGuard->SetInsertPoint(NewInsertPt); 971} 972 973/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make 974/// it available to other uses in this loop. Recursively hoist any operands, 975/// until we reach a value that dominates InsertPos. 976bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) { 977 if (SE.DT.dominates(IncV, InsertPos)) 978 return true; 979 980 // InsertPos must itself dominate IncV so that IncV's new position satisfies 981 // its existing users. 982 if (isa<PHINode>(InsertPos) || 983 !SE.DT.dominates(InsertPos->getParent(), IncV->getParent())) 984 return false; 985 986 if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos)) 987 return false; 988 989 // Check that the chain of IV operands leading back to Phi can be hoisted. 990 SmallVector<Instruction*, 4> IVIncs; 991 for(;;) { 992 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true); 993 if (!Oper) 994 return false; 995 // IncV is safe to hoist. 996 IVIncs.push_back(IncV); 997 IncV = Oper; 998 if (SE.DT.dominates(IncV, InsertPos)) 999 break; 1000 } 1001 for (auto I = IVIncs.rbegin(), E = IVIncs.rend(); I != E; ++I) { 1002 fixupInsertPoints(*I); 1003 (*I)->moveBefore(InsertPos); 1004 } 1005 return true; 1006} 1007 1008/// Determine if this cyclic phi is in a form that would have been generated by 1009/// LSR. We don't care if the phi was actually expanded in this pass, as long 1010/// as it is in a low-cost form, for example, no implied multiplication. This 1011/// should match any patterns generated by getAddRecExprPHILiterally and 1012/// expandAddtoGEP. 1013bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, 1014 const Loop *L) { 1015 for(Instruction *IVOper = IncV; 1016 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(), 1017 /*allowScale=*/false));) { 1018 if (IVOper == PN) 1019 return true; 1020 } 1021 return false; 1022} 1023 1024/// expandIVInc - Expand an IV increment at Builder's current InsertPos. 1025/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may 1026/// need to materialize IV increments elsewhere to handle difficult situations. 1027Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L, 1028 Type *ExpandTy, Type *IntTy, 1029 bool useSubtract) { 1030 Value *IncV; 1031 // If the PHI is a pointer, use a GEP, otherwise use an add or sub. 1032 if (ExpandTy->isPointerTy()) { 1033 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy); 1034 // If the step isn't constant, don't use an implicitly scaled GEP, because 1035 // that would require a multiply inside the loop. 1036 if (!isa<ConstantInt>(StepV)) 1037 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()), 1038 GEPPtrTy->getAddressSpace()); 1039 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) }; 1040 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN); 1041 if (IncV->getType() != PN->getType()) { 1042 IncV = Builder.CreateBitCast(IncV, PN->getType()); 1043 rememberInstruction(IncV); 1044 } 1045 } else { 1046 IncV = useSubtract ? 1047 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") : 1048 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next"); 1049 rememberInstruction(IncV); 1050 } 1051 return IncV; 1052} 1053 1054/// \brief Hoist the addrec instruction chain rooted in the loop phi above the 1055/// position. This routine assumes that this is possible (has been checked). 1056void SCEVExpander::hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist, 1057 Instruction *Pos, PHINode *LoopPhi) { 1058 do { 1059 if (DT->dominates(InstToHoist, Pos)) 1060 break; 1061 // Make sure the increment is where we want it. But don't move it 1062 // down past a potential existing post-inc user. 1063 fixupInsertPoints(InstToHoist); 1064 InstToHoist->moveBefore(Pos); 1065 Pos = InstToHoist; 1066 InstToHoist = cast<Instruction>(InstToHoist->getOperand(0)); 1067 } while (InstToHoist != LoopPhi); 1068} 1069 1070/// \brief Check whether we can cheaply express the requested SCEV in terms of 1071/// the available PHI SCEV by truncation and/or inversion of the step. 1072static bool canBeCheaplyTransformed(ScalarEvolution &SE, 1073 const SCEVAddRecExpr *Phi, 1074 const SCEVAddRecExpr *Requested, 1075 bool &InvertStep) { 1076 Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType()); 1077 Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType()); 1078 1079 if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth()) 1080 return false; 1081 1082 // Try truncate it if necessary. 1083 Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy)); 1084 if (!Phi) 1085 return false; 1086 1087 // Check whether truncation will help. 1088 if (Phi == Requested) { 1089 InvertStep = false; 1090 return true; 1091 } 1092 1093 // Check whether inverting will help: {R,+,-1} == R - {0,+,1}. 1094 if (SE.getAddExpr(Requested->getStart(), 1095 SE.getNegativeSCEV(Requested)) == Phi) { 1096 InvertStep = true; 1097 return true; 1098 } 1099 1100 return false; 1101} 1102 1103static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) { 1104 if (!isa<IntegerType>(AR->getType())) 1105 return false; 1106 1107 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth(); 1108 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2); 1109 const SCEV *Step = AR->getStepRecurrence(SE); 1110 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy), 1111 SE.getSignExtendExpr(AR, WideTy)); 1112 const SCEV *ExtendAfterOp = 1113 SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy); 1114 return ExtendAfterOp == OpAfterExtend; 1115} 1116 1117static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) { 1118 if (!isa<IntegerType>(AR->getType())) 1119 return false; 1120 1121 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth(); 1122 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2); 1123 const SCEV *Step = AR->getStepRecurrence(SE); 1124 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy), 1125 SE.getZeroExtendExpr(AR, WideTy)); 1126 const SCEV *ExtendAfterOp = 1127 SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy); 1128 return ExtendAfterOp == OpAfterExtend; 1129} 1130 1131/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand 1132/// the base addrec, which is the addrec without any non-loop-dominating 1133/// values, and return the PHI. 1134PHINode * 1135SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, 1136 const Loop *L, 1137 Type *ExpandTy, 1138 Type *IntTy, 1139 Type *&TruncTy, 1140 bool &InvertStep) { 1141 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position"); 1142 1143 // Reuse a previously-inserted PHI, if present. 1144 BasicBlock *LatchBlock = L->getLoopLatch(); 1145 if (LatchBlock) { 1146 PHINode *AddRecPhiMatch = nullptr; 1147 Instruction *IncV = nullptr; 1148 TruncTy = nullptr; 1149 InvertStep = false; 1150 1151 // Only try partially matching scevs that need truncation and/or 1152 // step-inversion if we know this loop is outside the current loop. 1153 bool TryNonMatchingSCEV = 1154 IVIncInsertLoop && 1155 SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader()); 1156 1157 for (PHINode &PN : L->getHeader()->phis()) { 1158 if (!SE.isSCEVable(PN.getType())) 1159 continue; 1160 1161 const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN)); 1162 if (!PhiSCEV) 1163 continue; 1164 1165 bool IsMatchingSCEV = PhiSCEV == Normalized; 1166 // We only handle truncation and inversion of phi recurrences for the 1167 // expanded expression if the expanded expression's loop dominates the 1168 // loop we insert to. Check now, so we can bail out early. 1169 if (!IsMatchingSCEV && !TryNonMatchingSCEV) 1170 continue; 1171 1172 Instruction *TempIncV = 1173 cast<Instruction>(PN.getIncomingValueForBlock(LatchBlock)); 1174 1175 // Check whether we can reuse this PHI node. 1176 if (LSRMode) { 1177 if (!isExpandedAddRecExprPHI(&PN, TempIncV, L)) 1178 continue; 1179 if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos)) 1180 continue; 1181 } else { 1182 if (!isNormalAddRecExprPHI(&PN, TempIncV, L)) 1183 continue; 1184 } 1185 1186 // Stop if we have found an exact match SCEV. 1187 if (IsMatchingSCEV) { 1188 IncV = TempIncV; 1189 TruncTy = nullptr; 1190 InvertStep = false; 1191 AddRecPhiMatch = &PN; 1192 break; 1193 } 1194 1195 // Try whether the phi can be translated into the requested form 1196 // (truncated and/or offset by a constant). 1197 if ((!TruncTy || InvertStep) && 1198 canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) { 1199 // Record the phi node. But don't stop we might find an exact match 1200 // later. 1201 AddRecPhiMatch = &PN; 1202 IncV = TempIncV; 1203 TruncTy = SE.getEffectiveSCEVType(Normalized->getType()); 1204 } 1205 } 1206 1207 if (AddRecPhiMatch) { 1208 // Potentially, move the increment. We have made sure in 1209 // isExpandedAddRecExprPHI or hoistIVInc that this is possible. 1210 if (L == IVIncInsertLoop) 1211 hoistBeforePos(&SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch); 1212 1213 // Ok, the add recurrence looks usable. 1214 // Remember this PHI, even in post-inc mode. 1215 InsertedValues.insert(AddRecPhiMatch); 1216 // Remember the increment. 1217 rememberInstruction(IncV); 1218 return AddRecPhiMatch; 1219 } 1220 } 1221 1222 // Save the original insertion point so we can restore it when we're done. 1223 SCEVInsertPointGuard Guard(Builder, this); 1224 1225 // Another AddRec may need to be recursively expanded below. For example, if 1226 // this AddRec is quadratic, the StepV may itself be an AddRec in this 1227 // loop. Remove this loop from the PostIncLoops set before expanding such 1228 // AddRecs. Otherwise, we cannot find a valid position for the step 1229 // (i.e. StepV can never dominate its loop header). Ideally, we could do 1230 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element, 1231 // so it's not worth implementing SmallPtrSet::swap. 1232 PostIncLoopSet SavedPostIncLoops = PostIncLoops; 1233 PostIncLoops.clear(); 1234 1235 // Expand code for the start value into the loop preheader. 1236 assert(L->getLoopPreheader() && 1237 "Can't expand add recurrences without a loop preheader!"); 1238 Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy, 1239 L->getLoopPreheader()->getTerminator()); 1240 1241 // StartV must have been be inserted into L's preheader to dominate the new 1242 // phi. 1243 assert(!isa<Instruction>(StartV) || 1244 SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(), 1245 L->getHeader())); 1246 1247 // Expand code for the step value. Do this before creating the PHI so that PHI 1248 // reuse code doesn't see an incomplete PHI. 1249 const SCEV *Step = Normalized->getStepRecurrence(SE); 1250 // If the stride is negative, insert a sub instead of an add for the increment 1251 // (unless it's a constant, because subtracts of constants are canonicalized 1252 // to adds). 1253 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1254 if (useSubtract) 1255 Step = SE.getNegativeSCEV(Step); 1256 // Expand the step somewhere that dominates the loop header. 1257 Value *StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front()); 1258 1259 // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if 1260 // we actually do emit an addition. It does not apply if we emit a 1261 // subtraction. 1262 bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized); 1263 bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized); 1264 1265 // Create the PHI. 1266 BasicBlock *Header = L->getHeader(); 1267 Builder.SetInsertPoint(Header, Header->begin()); 1268 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1269 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE), 1270 Twine(IVName) + ".iv"); 1271 rememberInstruction(PN); 1272 1273 // Create the step instructions and populate the PHI. 1274 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1275 BasicBlock *Pred = *HPI; 1276 1277 // Add a start value. 1278 if (!L->contains(Pred)) { 1279 PN->addIncoming(StartV, Pred); 1280 continue; 1281 } 1282 1283 // Create a step value and add it to the PHI. 1284 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the 1285 // instructions at IVIncInsertPos. 1286 Instruction *InsertPos = L == IVIncInsertLoop ? 1287 IVIncInsertPos : Pred->getTerminator(); 1288 Builder.SetInsertPoint(InsertPos); 1289 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1290 1291 if (isa<OverflowingBinaryOperator>(IncV)) { 1292 if (IncrementIsNUW) 1293 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap(); 1294 if (IncrementIsNSW) 1295 cast<BinaryOperator>(IncV)->setHasNoSignedWrap(); 1296 } 1297 PN->addIncoming(IncV, Pred); 1298 } 1299 1300 // After expanding subexpressions, restore the PostIncLoops set so the caller 1301 // can ensure that IVIncrement dominates the current uses. 1302 PostIncLoops = SavedPostIncLoops; 1303 1304 // Remember this PHI, even in post-inc mode. 1305 InsertedValues.insert(PN); 1306 1307 return PN; 1308} 1309 1310Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { 1311 Type *STy = S->getType(); 1312 Type *IntTy = SE.getEffectiveSCEVType(STy); 1313 const Loop *L = S->getLoop(); 1314 1315 // Determine a normalized form of this expression, which is the expression 1316 // before any post-inc adjustment is made. 1317 const SCEVAddRecExpr *Normalized = S; 1318 if (PostIncLoops.count(L)) { 1319 PostIncLoopSet Loops; 1320 Loops.insert(L); 1321 Normalized = cast<SCEVAddRecExpr>(normalizeForPostIncUse(S, Loops, SE)); 1322 } 1323 1324 // Strip off any non-loop-dominating component from the addrec start. 1325 const SCEV *Start = Normalized->getStart(); 1326 const SCEV *PostLoopOffset = nullptr; 1327 if (!SE.properlyDominates(Start, L->getHeader())) { 1328 PostLoopOffset = Start; 1329 Start = SE.getConstant(Normalized->getType(), 0); 1330 Normalized = cast<SCEVAddRecExpr>( 1331 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE), 1332 Normalized->getLoop(), 1333 Normalized->getNoWrapFlags(SCEV::FlagNW))); 1334 } 1335 1336 // Strip off any non-loop-dominating component from the addrec step. 1337 const SCEV *Step = Normalized->getStepRecurrence(SE); 1338 const SCEV *PostLoopScale = nullptr; 1339 if (!SE.dominates(Step, L->getHeader())) { 1340 PostLoopScale = Step; 1341 Step = SE.getConstant(Normalized->getType(), 1); 1342 if (!Start->isZero()) { 1343 // The normalization below assumes that Start is constant zero, so if 1344 // it isn't re-associate Start to PostLoopOffset. 1345 assert(!PostLoopOffset && "Start not-null but PostLoopOffset set?"); 1346 PostLoopOffset = Start; 1347 Start = SE.getConstant(Normalized->getType(), 0); 1348 } 1349 Normalized = 1350 cast<SCEVAddRecExpr>(SE.getAddRecExpr( 1351 Start, Step, Normalized->getLoop(), 1352 Normalized->getNoWrapFlags(SCEV::FlagNW))); 1353 } 1354 1355 // Expand the core addrec. If we need post-loop scaling, force it to 1356 // expand to an integer type to avoid the need for additional casting. 1357 Type *ExpandTy = PostLoopScale ? IntTy : STy; 1358 // We can't use a pointer type for the addrec if the pointer type is 1359 // non-integral. 1360 Type *AddRecPHIExpandTy = 1361 DL.isNonIntegralPointerType(STy) ? Normalized->getType() : ExpandTy; 1362 1363 // In some cases, we decide to reuse an existing phi node but need to truncate 1364 // it and/or invert the step. 1365 Type *TruncTy = nullptr; 1366 bool InvertStep = false; 1367 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, AddRecPHIExpandTy, 1368 IntTy, TruncTy, InvertStep); 1369 1370 // Accommodate post-inc mode, if necessary. 1371 Value *Result; 1372 if (!PostIncLoops.count(L)) 1373 Result = PN; 1374 else { 1375 // In PostInc mode, use the post-incremented value. 1376 BasicBlock *LatchBlock = L->getLoopLatch(); 1377 assert(LatchBlock && "PostInc mode requires a unique loop latch!"); 1378 Result = PN->getIncomingValueForBlock(LatchBlock); 1379 1380 // For an expansion to use the postinc form, the client must call 1381 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop 1382 // or dominated by IVIncInsertPos. 1383 if (isa<Instruction>(Result) && 1384 !SE.DT.dominates(cast<Instruction>(Result), 1385 &*Builder.GetInsertPoint())) { 1386 // The induction variable's postinc expansion does not dominate this use. 1387 // IVUsers tries to prevent this case, so it is rare. However, it can 1388 // happen when an IVUser outside the loop is not dominated by the latch 1389 // block. Adjusting IVIncInsertPos before expansion begins cannot handle 1390 // all cases. Consider a phi outide whose operand is replaced during 1391 // expansion with the value of the postinc user. Without fundamentally 1392 // changing the way postinc users are tracked, the only remedy is 1393 // inserting an extra IV increment. StepV might fold into PostLoopOffset, 1394 // but hopefully expandCodeFor handles that. 1395 bool useSubtract = 1396 !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1397 if (useSubtract) 1398 Step = SE.getNegativeSCEV(Step); 1399 Value *StepV; 1400 { 1401 // Expand the step somewhere that dominates the loop header. 1402 SCEVInsertPointGuard Guard(Builder, this); 1403 StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front()); 1404 } 1405 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1406 } 1407 } 1408 1409 // We have decided to reuse an induction variable of a dominating loop. Apply 1410 // truncation and/or invertion of the step. 1411 if (TruncTy) { 1412 Type *ResTy = Result->getType(); 1413 // Normalize the result type. 1414 if (ResTy != SE.getEffectiveSCEVType(ResTy)) 1415 Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy)); 1416 // Truncate the result. 1417 if (TruncTy != Result->getType()) { 1418 Result = Builder.CreateTrunc(Result, TruncTy); 1419 rememberInstruction(Result); 1420 } 1421 // Invert the result. 1422 if (InvertStep) { 1423 Result = Builder.CreateSub(expandCodeFor(Normalized->getStart(), TruncTy), 1424 Result); 1425 rememberInstruction(Result); 1426 } 1427 } 1428 1429 // Re-apply any non-loop-dominating scale. 1430 if (PostLoopScale) { 1431 assert(S->isAffine() && "Can't linearly scale non-affine recurrences."); 1432 Result = InsertNoopCastOfTo(Result, IntTy); 1433 Result = Builder.CreateMul(Result, 1434 expandCodeFor(PostLoopScale, IntTy)); 1435 rememberInstruction(Result); 1436 } 1437 1438 // Re-apply any non-loop-dominating offset. 1439 if (PostLoopOffset) { 1440 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) { 1441 if (Result->getType()->isIntegerTy()) { 1442 Value *Base = expandCodeFor(PostLoopOffset, ExpandTy); 1443 const SCEV *const OffsetArray[1] = {SE.getUnknown(Result)}; 1444 Result = expandAddToGEP(OffsetArray, OffsetArray + 1, PTy, IntTy, Base); 1445 } else { 1446 const SCEV *const OffsetArray[1] = {PostLoopOffset}; 1447 Result = 1448 expandAddToGEP(OffsetArray, OffsetArray + 1, PTy, IntTy, Result); 1449 } 1450 } else { 1451 Result = InsertNoopCastOfTo(Result, IntTy); 1452 Result = Builder.CreateAdd(Result, 1453 expandCodeFor(PostLoopOffset, IntTy)); 1454 rememberInstruction(Result); 1455 } 1456 } 1457 1458 return Result; 1459} 1460 1461Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { 1462 if (!CanonicalMode) return expandAddRecExprLiterally(S); 1463 1464 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1465 const Loop *L = S->getLoop(); 1466 1467 // First check for an existing canonical IV in a suitable type. 1468 PHINode *CanonicalIV = nullptr; 1469 if (PHINode *PN = L->getCanonicalInductionVariable()) 1470 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) 1471 CanonicalIV = PN; 1472 1473 // Rewrite an AddRec in terms of the canonical induction variable, if 1474 // its type is more narrow. 1475 if (CanonicalIV && 1476 SE.getTypeSizeInBits(CanonicalIV->getType()) > 1477 SE.getTypeSizeInBits(Ty)) { 1478 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands()); 1479 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) 1480 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType()); 1481 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(), 1482 S->getNoWrapFlags(SCEV::FlagNW))); 1483 BasicBlock::iterator NewInsertPt = 1484 findInsertPointAfter(cast<Instruction>(V), Builder.GetInsertBlock()); 1485 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr, 1486 &*NewInsertPt); 1487 return V; 1488 } 1489 1490 // {X,+,F} --> X + {0,+,F} 1491 if (!S->getStart()->isZero()) { 1492 SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end()); 1493 NewOps[0] = SE.getConstant(Ty, 0); 1494 const SCEV *Rest = SE.getAddRecExpr(NewOps, L, 1495 S->getNoWrapFlags(SCEV::FlagNW)); 1496 1497 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 1498 // comments on expandAddToGEP for details. 1499 const SCEV *Base = S->getStart(); 1500 const SCEV *RestArray[1] = { Rest }; 1501 // Dig into the expression to find the pointer base for a GEP. 1502 ExposePointerBase(Base, RestArray[0], SE); 1503 // If we found a pointer, expand the AddRec with a GEP. 1504 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) { 1505 // Make sure the Base isn't something exotic, such as a multiplied 1506 // or divided pointer value. In those cases, the result type isn't 1507 // actually a pointer type. 1508 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) { 1509 Value *StartV = expand(Base); 1510 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!"); 1511 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV); 1512 } 1513 } 1514 1515 // Just do a normal add. Pre-expand the operands to suppress folding. 1516 // 1517 // The LHS and RHS values are factored out of the expand call to make the 1518 // output independent of the argument evaluation order. 1519 const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart())); 1520 const SCEV *AddExprRHS = SE.getUnknown(expand(Rest)); 1521 return expand(SE.getAddExpr(AddExprLHS, AddExprRHS)); 1522 } 1523 1524 // If we don't yet have a canonical IV, create one. 1525 if (!CanonicalIV) { 1526 // Create and insert the PHI node for the induction variable in the 1527 // specified loop. 1528 BasicBlock *Header = L->getHeader(); 1529 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1530 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar", 1531 &Header->front()); 1532 rememberInstruction(CanonicalIV); 1533 1534 SmallSet<BasicBlock *, 4> PredSeen; 1535 Constant *One = ConstantInt::get(Ty, 1); 1536 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1537 BasicBlock *HP = *HPI; 1538 if (!PredSeen.insert(HP).second) { 1539 // There must be an incoming value for each predecessor, even the 1540 // duplicates! 1541 CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP); 1542 continue; 1543 } 1544 1545 if (L->contains(HP)) { 1546 // Insert a unit add instruction right before the terminator 1547 // corresponding to the back-edge. 1548 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One, 1549 "indvar.next", 1550 HP->getTerminator()); 1551 Add->setDebugLoc(HP->getTerminator()->getDebugLoc()); 1552 rememberInstruction(Add); 1553 CanonicalIV->addIncoming(Add, HP); 1554 } else { 1555 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP); 1556 } 1557 } 1558 } 1559 1560 // {0,+,1} --> Insert a canonical induction variable into the loop! 1561 if (S->isAffine() && S->getOperand(1)->isOne()) { 1562 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) && 1563 "IVs with types different from the canonical IV should " 1564 "already have been handled!"); 1565 return CanonicalIV; 1566 } 1567 1568 // {0,+,F} --> {0,+,1} * F 1569 1570 // If this is a simple linear addrec, emit it now as a special case. 1571 if (S->isAffine()) // {0,+,F} --> i*F 1572 return 1573 expand(SE.getTruncateOrNoop( 1574 SE.getMulExpr(SE.getUnknown(CanonicalIV), 1575 SE.getNoopOrAnyExtend(S->getOperand(1), 1576 CanonicalIV->getType())), 1577 Ty)); 1578 1579 // If this is a chain of recurrences, turn it into a closed form, using the 1580 // folders, then expandCodeFor the closed form. This allows the folders to 1581 // simplify the expression without having to build a bunch of special code 1582 // into this folder. 1583 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV. 1584 1585 // Promote S up to the canonical IV type, if the cast is foldable. 1586 const SCEV *NewS = S; 1587 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType()); 1588 if (isa<SCEVAddRecExpr>(Ext)) 1589 NewS = Ext; 1590 1591 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE); 1592 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; 1593 1594 // Truncate the result down to the original type, if needed. 1595 const SCEV *T = SE.getTruncateOrNoop(V, Ty); 1596 return expand(T); 1597} 1598 1599Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) { 1600 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1601 Value *V = expandCodeFor(S->getOperand(), 1602 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1603 Value *I = Builder.CreateTrunc(V, Ty); 1604 rememberInstruction(I); 1605 return I; 1606} 1607 1608Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) { 1609 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1610 Value *V = expandCodeFor(S->getOperand(), 1611 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1612 Value *I = Builder.CreateZExt(V, Ty); 1613 rememberInstruction(I); 1614 return I; 1615} 1616 1617Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) { 1618 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1619 Value *V = expandCodeFor(S->getOperand(), 1620 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1621 Value *I = Builder.CreateSExt(V, Ty); 1622 rememberInstruction(I); 1623 return I; 1624} 1625 1626Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) { 1627 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1628 Type *Ty = LHS->getType(); 1629 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1630 // In the case of mixed integer and pointer types, do the 1631 // rest of the comparisons as integer. 1632 if (S->getOperand(i)->getType() != Ty) { 1633 Ty = SE.getEffectiveSCEVType(Ty); 1634 LHS = InsertNoopCastOfTo(LHS, Ty); 1635 } 1636 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1637 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS); 1638 rememberInstruction(ICmp); 1639 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax"); 1640 rememberInstruction(Sel); 1641 LHS = Sel; 1642 } 1643 // In the case of mixed integer and pointer types, cast the 1644 // final result back to the pointer type. 1645 if (LHS->getType() != S->getType()) 1646 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1647 return LHS; 1648} 1649 1650Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { 1651 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1652 Type *Ty = LHS->getType(); 1653 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1654 // In the case of mixed integer and pointer types, do the 1655 // rest of the comparisons as integer. 1656 if (S->getOperand(i)->getType() != Ty) { 1657 Ty = SE.getEffectiveSCEVType(Ty); 1658 LHS = InsertNoopCastOfTo(LHS, Ty); 1659 } 1660 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1661 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS); 1662 rememberInstruction(ICmp); 1663 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax"); 1664 rememberInstruction(Sel); 1665 LHS = Sel; 1666 } 1667 // In the case of mixed integer and pointer types, cast the 1668 // final result back to the pointer type. 1669 if (LHS->getType() != S->getType()) 1670 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1671 return LHS; 1672} 1673 1674Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty, 1675 Instruction *IP) { 1676 setInsertPoint(IP); 1677 return expandCodeFor(SH, Ty); 1678} 1679 1680Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) { 1681 // Expand the code for this SCEV. 1682 Value *V = expand(SH); 1683 if (Ty) { 1684 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) && 1685 "non-trivial casts should be done with the SCEVs directly!"); 1686 V = InsertNoopCastOfTo(V, Ty); 1687 } 1688 return V; 1689} 1690 1691ScalarEvolution::ValueOffsetPair 1692SCEVExpander::FindValueInExprValueMap(const SCEV *S, 1693 const Instruction *InsertPt) { 1694 SetVector<ScalarEvolution::ValueOffsetPair> *Set = SE.getSCEVValues(S); 1695 // If the expansion is not in CanonicalMode, and the SCEV contains any 1696 // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally. 1697 if (CanonicalMode || !SE.containsAddRecurrence(S)) { 1698 // If S is scConstant, it may be worse to reuse an existing Value. 1699 if (S->getSCEVType() != scConstant && Set) { 1700 // Choose a Value from the set which dominates the insertPt. 1701 // insertPt should be inside the Value's parent loop so as not to break 1702 // the LCSSA form. 1703 for (auto const &VOPair : *Set) { 1704 Value *V = VOPair.first; 1705 ConstantInt *Offset = VOPair.second; 1706 Instruction *EntInst = nullptr; 1707 if (V && isa<Instruction>(V) && (EntInst = cast<Instruction>(V)) && 1708 S->getType() == V->getType() && 1709 EntInst->getFunction() == InsertPt->getFunction() && 1710 SE.DT.dominates(EntInst, InsertPt) && 1711 (SE.LI.getLoopFor(EntInst->getParent()) == nullptr || 1712 SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt))) 1713 return {V, Offset}; 1714 } 1715 } 1716 } 1717 return {nullptr, nullptr}; 1718} 1719 1720// The expansion of SCEV will either reuse a previous Value in ExprValueMap, 1721// or expand the SCEV literally. Specifically, if the expansion is in LSRMode, 1722// and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded 1723// literally, to prevent LSR's transformed SCEV from being reverted. Otherwise, 1724// the expansion will try to reuse Value from ExprValueMap, and only when it 1725// fails, expand the SCEV literally. 1726Value *SCEVExpander::expand(const SCEV *S) { 1727 // Compute an insertion point for this SCEV object. Hoist the instructions 1728 // as far out in the loop nest as possible. 1729 Instruction *InsertPt = &*Builder.GetInsertPoint(); 1730 for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());; 1731 L = L->getParentLoop()) 1732 if (SE.isLoopInvariant(S, L)) { 1733 if (!L) break; 1734 if (BasicBlock *Preheader = L->getLoopPreheader()) 1735 InsertPt = Preheader->getTerminator(); 1736 else { 1737 // LSR sets the insertion point for AddRec start/step values to the 1738 // block start to simplify value reuse, even though it's an invalid 1739 // position. SCEVExpander must correct for this in all cases. 1740 InsertPt = &*L->getHeader()->getFirstInsertionPt(); 1741 } 1742 } else { 1743 // We can move insertion point only if there is no div or rem operations 1744 // otherwise we are risky to move it over the check for zero denominator. 1745 auto SafeToHoist = [](const SCEV *S) { 1746 return !SCEVExprContains(S, [](const SCEV *S) { 1747 if (const auto *D = dyn_cast<SCEVUDivExpr>(S)) { 1748 if (const auto *SC = dyn_cast<SCEVConstant>(D->getRHS())) 1749 // Division by non-zero constants can be hoisted. 1750 return SC->getValue()->isZero(); 1751 // All other divisions should not be moved as they may be 1752 // divisions by zero and should be kept within the 1753 // conditions of the surrounding loops that guard their 1754 // execution (see PR35406). 1755 return true; 1756 } 1757 return false; 1758 }); 1759 }; 1760 // If the SCEV is computable at this level, insert it into the header 1761 // after the PHIs (and after any other instructions that we've inserted 1762 // there) so that it is guaranteed to dominate any user inside the loop. 1763 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L) && 1764 SafeToHoist(S)) 1765 InsertPt = &*L->getHeader()->getFirstInsertionPt(); 1766 while (InsertPt->getIterator() != Builder.GetInsertPoint() && 1767 (isInsertedInstruction(InsertPt) || 1768 isa<DbgInfoIntrinsic>(InsertPt))) { 1769 InsertPt = &*std::next(InsertPt->getIterator()); 1770 } 1771 break; 1772 } 1773 1774 // Check to see if we already expanded this here. 1775 auto I = InsertedExpressions.find(std::make_pair(S, InsertPt)); 1776 if (I != InsertedExpressions.end()) 1777 return I->second; 1778 1779 SCEVInsertPointGuard Guard(Builder, this); 1780 Builder.SetInsertPoint(InsertPt); 1781 1782 // Expand the expression into instructions. 1783 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, InsertPt); 1784 Value *V = VO.first; 1785 1786 if (!V) 1787 V = visit(S); 1788 else if (VO.second) { 1789 if (PointerType *Vty = dyn_cast<PointerType>(V->getType())) { 1790 Type *Ety = Vty->getPointerElementType(); 1791 int64_t Offset = VO.second->getSExtValue(); 1792 int64_t ESize = SE.getTypeSizeInBits(Ety); 1793 if ((Offset * 8) % ESize == 0) { 1794 ConstantInt *Idx = 1795 ConstantInt::getSigned(VO.second->getType(), -(Offset * 8) / ESize); 1796 V = Builder.CreateGEP(Ety, V, Idx, "scevgep"); 1797 } else { 1798 ConstantInt *Idx = 1799 ConstantInt::getSigned(VO.second->getType(), -Offset); 1800 unsigned AS = Vty->getAddressSpace(); 1801 V = Builder.CreateBitCast(V, Type::getInt8PtrTy(SE.getContext(), AS)); 1802 V = Builder.CreateGEP(Type::getInt8Ty(SE.getContext()), V, Idx, 1803 "uglygep"); 1804 V = Builder.CreateBitCast(V, Vty); 1805 } 1806 } else { 1807 V = Builder.CreateSub(V, VO.second); 1808 } 1809 } 1810 // Remember the expanded value for this SCEV at this location. 1811 // 1812 // This is independent of PostIncLoops. The mapped value simply materializes 1813 // the expression at this insertion point. If the mapped value happened to be 1814 // a postinc expansion, it could be reused by a non-postinc user, but only if 1815 // its insertion point was already at the head of the loop. 1816 InsertedExpressions[std::make_pair(S, InsertPt)] = V; 1817 return V; 1818} 1819 1820void SCEVExpander::rememberInstruction(Value *I) { 1821 if (!PostIncLoops.empty()) 1822 InsertedPostIncValues.insert(I); 1823 else 1824 InsertedValues.insert(I); 1825} 1826 1827/// getOrInsertCanonicalInductionVariable - This method returns the 1828/// canonical induction variable of the specified type for the specified 1829/// loop (inserting one if there is none). A canonical induction variable 1830/// starts at zero and steps by one on each iteration. 1831PHINode * 1832SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, 1833 Type *Ty) { 1834 assert(Ty->isIntegerTy() && "Can only insert integer induction variables!"); 1835 1836 // Build a SCEV for {0,+,1}<L>. 1837 // Conservatively use FlagAnyWrap for now. 1838 const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0), 1839 SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap); 1840 1841 // Emit code for it. 1842 SCEVInsertPointGuard Guard(Builder, this); 1843 PHINode *V = 1844 cast<PHINode>(expandCodeFor(H, nullptr, &L->getHeader()->front())); 1845 1846 return V; 1847} 1848 1849/// replaceCongruentIVs - Check for congruent phis in this loop header and 1850/// replace them with their most canonical representative. Return the number of 1851/// phis eliminated. 1852/// 1853/// This does not depend on any SCEVExpander state but should be used in 1854/// the same context that SCEVExpander is used. 1855unsigned 1856SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, 1857 SmallVectorImpl<WeakTrackingVH> &DeadInsts, 1858 const TargetTransformInfo *TTI) { 1859 // Find integer phis in order of increasing width. 1860 SmallVector<PHINode*, 8> Phis; 1861 for (PHINode &PN : L->getHeader()->phis()) 1862 Phis.push_back(&PN); 1863 1864 if (TTI) 1865 std::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) { 1866 // Put pointers at the back and make sure pointer < pointer = false. 1867 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 1868 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy(); 1869 return RHS->getType()->getPrimitiveSizeInBits() < 1870 LHS->getType()->getPrimitiveSizeInBits(); 1871 }); 1872 1873 unsigned NumElim = 0; 1874 DenseMap<const SCEV *, PHINode *> ExprToIVMap; 1875 // Process phis from wide to narrow. Map wide phis to their truncation 1876 // so narrow phis can reuse them. 1877 for (PHINode *Phi : Phis) { 1878 auto SimplifyPHINode = [&](PHINode *PN) -> Value * { 1879 if (Value *V = SimplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC})) 1880 return V; 1881 if (!SE.isSCEVable(PN->getType())) 1882 return nullptr; 1883 auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN)); 1884 if (!Const) 1885 return nullptr; 1886 return Const->getValue(); 1887 }; 1888 1889 // Fold constant phis. They may be congruent to other constant phis and 1890 // would confuse the logic below that expects proper IVs. 1891 if (Value *V = SimplifyPHINode(Phi)) { 1892 if (V->getType() != Phi->getType()) 1893 continue; 1894 Phi->replaceAllUsesWith(V); 1895 DeadInsts.emplace_back(Phi); 1896 ++NumElim; 1897 DEBUG_WITH_TYPE(DebugType, dbgs() 1898 << "INDVARS: Eliminated constant iv: " << *Phi << '\n'); 1899 continue; 1900 } 1901 1902 if (!SE.isSCEVable(Phi->getType())) 1903 continue; 1904 1905 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)]; 1906 if (!OrigPhiRef) { 1907 OrigPhiRef = Phi; 1908 if (Phi->getType()->isIntegerTy() && TTI && 1909 TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) { 1910 // This phi can be freely truncated to the narrowest phi type. Map the 1911 // truncated expression to it so it will be reused for narrow types. 1912 const SCEV *TruncExpr = 1913 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType()); 1914 ExprToIVMap[TruncExpr] = Phi; 1915 } 1916 continue; 1917 } 1918 1919 // Replacing a pointer phi with an integer phi or vice-versa doesn't make 1920 // sense. 1921 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy()) 1922 continue; 1923 1924 if (BasicBlock *LatchBlock = L->getLoopLatch()) { 1925 Instruction *OrigInc = dyn_cast<Instruction>( 1926 OrigPhiRef->getIncomingValueForBlock(LatchBlock)); 1927 Instruction *IsomorphicInc = 1928 dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock)); 1929 1930 if (OrigInc && IsomorphicInc) { 1931 // If this phi has the same width but is more canonical, replace the 1932 // original with it. As part of the "more canonical" determination, 1933 // respect a prior decision to use an IV chain. 1934 if (OrigPhiRef->getType() == Phi->getType() && 1935 !(ChainedPhis.count(Phi) || 1936 isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) && 1937 (ChainedPhis.count(Phi) || 1938 isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) { 1939 std::swap(OrigPhiRef, Phi); 1940 std::swap(OrigInc, IsomorphicInc); 1941 } 1942 // Replacing the congruent phi is sufficient because acyclic 1943 // redundancy elimination, CSE/GVN, should handle the 1944 // rest. However, once SCEV proves that a phi is congruent, 1945 // it's often the head of an IV user cycle that is isomorphic 1946 // with the original phi. It's worth eagerly cleaning up the 1947 // common case of a single IV increment so that DeleteDeadPHIs 1948 // can remove cycles that had postinc uses. 1949 const SCEV *TruncExpr = 1950 SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType()); 1951 if (OrigInc != IsomorphicInc && 1952 TruncExpr == SE.getSCEV(IsomorphicInc) && 1953 SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc) && 1954 hoistIVInc(OrigInc, IsomorphicInc)) { 1955 DEBUG_WITH_TYPE(DebugType, 1956 dbgs() << "INDVARS: Eliminated congruent iv.inc: " 1957 << *IsomorphicInc << '\n'); 1958 Value *NewInc = OrigInc; 1959 if (OrigInc->getType() != IsomorphicInc->getType()) { 1960 Instruction *IP = nullptr; 1961 if (PHINode *PN = dyn_cast<PHINode>(OrigInc)) 1962 IP = &*PN->getParent()->getFirstInsertionPt(); 1963 else 1964 IP = OrigInc->getNextNode(); 1965 1966 IRBuilder<> Builder(IP); 1967 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc()); 1968 NewInc = Builder.CreateTruncOrBitCast( 1969 OrigInc, IsomorphicInc->getType(), IVName); 1970 } 1971 IsomorphicInc->replaceAllUsesWith(NewInc); 1972 DeadInsts.emplace_back(IsomorphicInc); 1973 } 1974 } 1975 } 1976 DEBUG_WITH_TYPE(DebugType, dbgs() << "INDVARS: Eliminated congruent iv: " 1977 << *Phi << '\n'); 1978 ++NumElim; 1979 Value *NewIV = OrigPhiRef; 1980 if (OrigPhiRef->getType() != Phi->getType()) { 1981 IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt()); 1982 Builder.SetCurrentDebugLocation(Phi->getDebugLoc()); 1983 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName); 1984 } 1985 Phi->replaceAllUsesWith(NewIV); 1986 DeadInsts.emplace_back(Phi); 1987 } 1988 return NumElim; 1989} 1990 1991Value *SCEVExpander::getExactExistingExpansion(const SCEV *S, 1992 const Instruction *At, Loop *L) { 1993 Optional<ScalarEvolution::ValueOffsetPair> VO = 1994 getRelatedExistingExpansion(S, At, L); 1995 if (VO && VO.getValue().second == nullptr) 1996 return VO.getValue().first; 1997 return nullptr; 1998} 1999 2000Optional<ScalarEvolution::ValueOffsetPair> 2001SCEVExpander::getRelatedExistingExpansion(const SCEV *S, const Instruction *At, 2002 Loop *L) { 2003 using namespace llvm::PatternMatch; 2004 2005 SmallVector<BasicBlock *, 4> ExitingBlocks; 2006 L->getExitingBlocks(ExitingBlocks); 2007 2008 // Look for suitable value in simple conditions at the loop exits. 2009 for (BasicBlock *BB : ExitingBlocks) { 2010 ICmpInst::Predicate Pred; 2011 Instruction *LHS, *RHS; 2012 BasicBlock *TrueBB, *FalseBB; 2013 2014 if (!match(BB->getTerminator(), 2015 m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)), 2016 TrueBB, FalseBB))) 2017 continue; 2018 2019 if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At)) 2020 return ScalarEvolution::ValueOffsetPair(LHS, nullptr); 2021 2022 if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At)) 2023 return ScalarEvolution::ValueOffsetPair(RHS, nullptr); 2024 } 2025 2026 // Use expand's logic which is used for reusing a previous Value in 2027 // ExprValueMap. 2028 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, At); 2029 if (VO.first) 2030 return VO; 2031 2032 // There is potential to make this significantly smarter, but this simple 2033 // heuristic already gets some interesting cases. 2034 2035 // Can not find suitable value. 2036 return None; 2037} 2038 2039bool SCEVExpander::isHighCostExpansionHelper( 2040 const SCEV *S, Loop *L, const Instruction *At, 2041 SmallPtrSetImpl<const SCEV *> &Processed) { 2042 2043 // If we can find an existing value for this scev available at the point "At" 2044 // then consider the expression cheap. 2045 if (At && getRelatedExistingExpansion(S, At, L)) 2046 return false; 2047 2048 // Zero/One operand expressions 2049 switch (S->getSCEVType()) { 2050 case scUnknown: 2051 case scConstant: 2052 return false; 2053 case scTruncate: 2054 return isHighCostExpansionHelper(cast<SCEVTruncateExpr>(S)->getOperand(), 2055 L, At, Processed); 2056 case scZeroExtend: 2057 return isHighCostExpansionHelper(cast<SCEVZeroExtendExpr>(S)->getOperand(), 2058 L, At, Processed); 2059 case scSignExtend: 2060 return isHighCostExpansionHelper(cast<SCEVSignExtendExpr>(S)->getOperand(), 2061 L, At, Processed); 2062 } 2063 2064 if (!Processed.insert(S).second) 2065 return false; 2066 2067 if (auto *UDivExpr = dyn_cast<SCEVUDivExpr>(S)) { 2068 // If the divisor is a power of two and the SCEV type fits in a native 2069 // integer, consider the division cheap irrespective of whether it occurs in 2070 // the user code since it can be lowered into a right shift. 2071 if (auto *SC = dyn_cast<SCEVConstant>(UDivExpr->getRHS())) 2072 if (SC->getAPInt().isPowerOf2()) { 2073 const DataLayout &DL = 2074 L->getHeader()->getParent()->getParent()->getDataLayout(); 2075 unsigned Width = cast<IntegerType>(UDivExpr->getType())->getBitWidth(); 2076 return DL.isIllegalInteger(Width); 2077 } 2078 2079 // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or 2080 // HowManyLessThans produced to compute a precise expression, rather than a 2081 // UDiv from the user's code. If we can't find a UDiv in the code with some 2082 // simple searching, assume the former consider UDivExpr expensive to 2083 // compute. 2084 BasicBlock *ExitingBB = L->getExitingBlock(); 2085 if (!ExitingBB) 2086 return true; 2087 2088 // At the beginning of this function we already tried to find existing value 2089 // for plain 'S'. Now try to lookup 'S + 1' since it is common pattern 2090 // involving division. This is just a simple search heuristic. 2091 if (!At) 2092 At = &ExitingBB->back(); 2093 if (!getRelatedExistingExpansion( 2094 SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), At, L)) 2095 return true; 2096 } 2097 2098 // HowManyLessThans uses a Max expression whenever the loop is not guarded by 2099 // the exit condition. 2100 if (isa<SCEVSMaxExpr>(S) || isa<SCEVUMaxExpr>(S)) 2101 return true; 2102 2103 // Recurse past nary expressions, which commonly occur in the 2104 // BackedgeTakenCount. They may already exist in program code, and if not, 2105 // they are not too expensive rematerialize. 2106 if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(S)) { 2107 for (auto *Op : NAry->operands()) 2108 if (isHighCostExpansionHelper(Op, L, At, Processed)) 2109 return true; 2110 } 2111 2112 // If we haven't recognized an expensive SCEV pattern, assume it's an 2113 // expression produced by program code. 2114 return false; 2115} 2116 2117Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred, 2118 Instruction *IP) { 2119 assert(IP); 2120 switch (Pred->getKind()) { 2121 case SCEVPredicate::P_Union: 2122 return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP); 2123 case SCEVPredicate::P_Equal: 2124 return expandEqualPredicate(cast<SCEVEqualPredicate>(Pred), IP); 2125 case SCEVPredicate::P_Wrap: { 2126 auto *AddRecPred = cast<SCEVWrapPredicate>(Pred); 2127 return expandWrapPredicate(AddRecPred, IP); 2128 } 2129 } 2130 llvm_unreachable("Unknown SCEV predicate type"); 2131} 2132 2133Value *SCEVExpander::expandEqualPredicate(const SCEVEqualPredicate *Pred, 2134 Instruction *IP) { 2135 Value *Expr0 = expandCodeFor(Pred->getLHS(), Pred->getLHS()->getType(), IP); 2136 Value *Expr1 = expandCodeFor(Pred->getRHS(), Pred->getRHS()->getType(), IP); 2137 2138 Builder.SetInsertPoint(IP); 2139 auto *I = Builder.CreateICmpNE(Expr0, Expr1, "ident.check"); 2140 return I; 2141} 2142 2143Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR, 2144 Instruction *Loc, bool Signed) { 2145 assert(AR->isAffine() && "Cannot generate RT check for " 2146 "non-affine expression"); 2147 2148 SCEVUnionPredicate Pred; 2149 const SCEV *ExitCount = 2150 SE.getPredicatedBackedgeTakenCount(AR->getLoop(), Pred); 2151 2152 assert(ExitCount != SE.getCouldNotCompute() && "Invalid loop count"); 2153 2154 const SCEV *Step = AR->getStepRecurrence(SE); 2155 const SCEV *Start = AR->getStart(); 2156 2157 unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType()); 2158 unsigned DstBits = SE.getTypeSizeInBits(AR->getType()); 2159 2160 // The expression {Start,+,Step} has nusw/nssw if 2161 // Step < 0, Start - |Step| * Backedge <= Start 2162 // Step >= 0, Start + |Step| * Backedge > Start 2163 // and |Step| * Backedge doesn't unsigned overflow. 2164 2165 IntegerType *CountTy = IntegerType::get(Loc->getContext(), SrcBits); 2166 Builder.SetInsertPoint(Loc); 2167 Value *TripCountVal = expandCodeFor(ExitCount, CountTy, Loc); 2168 2169 IntegerType *Ty = 2170 IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(AR->getType())); 2171 2172 Value *StepValue = expandCodeFor(Step, Ty, Loc); 2173 Value *NegStepValue = expandCodeFor(SE.getNegativeSCEV(Step), Ty, Loc); 2174 Value *StartValue = expandCodeFor(Start, Ty, Loc); 2175 2176 ConstantInt *Zero = 2177 ConstantInt::get(Loc->getContext(), APInt::getNullValue(DstBits)); 2178 2179 Builder.SetInsertPoint(Loc); 2180 // Compute |Step| 2181 Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero); 2182 Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue); 2183 2184 // Get the backedge taken count and truncate or extended to the AR type. 2185 Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty); 2186 auto *MulF = Intrinsic::getDeclaration(Loc->getModule(), 2187 Intrinsic::umul_with_overflow, Ty); 2188 2189 // Compute |Step| * Backedge 2190 CallInst *Mul = Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul"); 2191 Value *MulV = Builder.CreateExtractValue(Mul, 0, "mul.result"); 2192 Value *OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow"); 2193 2194 // Compute: 2195 // Start + |Step| * Backedge < Start 2196 // Start - |Step| * Backedge > Start 2197 Value *Add = Builder.CreateAdd(StartValue, MulV); 2198 Value *Sub = Builder.CreateSub(StartValue, MulV); 2199 2200 Value *EndCompareGT = Builder.CreateICmp( 2201 Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue); 2202 2203 Value *EndCompareLT = Builder.CreateICmp( 2204 Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue); 2205 2206 // Select the answer based on the sign of Step. 2207 Value *EndCheck = 2208 Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT); 2209 2210 // If the backedge taken count type is larger than the AR type, 2211 // check that we don't drop any bits by truncating it. If we are 2212 // droping bits, then we have overflow (unless the step is zero). 2213 if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) { 2214 auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits); 2215 auto *BackedgeCheck = 2216 Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal, 2217 ConstantInt::get(Loc->getContext(), MaxVal)); 2218 BackedgeCheck = Builder.CreateAnd( 2219 BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero)); 2220 2221 EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck); 2222 } 2223 2224 EndCheck = Builder.CreateOr(EndCheck, OfMul); 2225 return EndCheck; 2226} 2227 2228Value *SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate *Pred, 2229 Instruction *IP) { 2230 const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr()); 2231 Value *NSSWCheck = nullptr, *NUSWCheck = nullptr; 2232 2233 // Add a check for NUSW 2234 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNUSW) 2235 NUSWCheck = generateOverflowCheck(A, IP, false); 2236 2237 // Add a check for NSSW 2238 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNSSW) 2239 NSSWCheck = generateOverflowCheck(A, IP, true); 2240 2241 if (NUSWCheck && NSSWCheck) 2242 return Builder.CreateOr(NUSWCheck, NSSWCheck); 2243 2244 if (NUSWCheck) 2245 return NUSWCheck; 2246 2247 if (NSSWCheck) 2248 return NSSWCheck; 2249 2250 return ConstantInt::getFalse(IP->getContext()); 2251} 2252 2253Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union, 2254 Instruction *IP) { 2255 auto *BoolType = IntegerType::get(IP->getContext(), 1); 2256 Value *Check = ConstantInt::getNullValue(BoolType); 2257 2258 // Loop over all checks in this set. 2259 for (auto Pred : Union->getPredicates()) { 2260 auto *NextCheck = expandCodeForPredicate(Pred, IP); 2261 Builder.SetInsertPoint(IP); 2262 Check = Builder.CreateOr(Check, NextCheck); 2263 } 2264 2265 return Check; 2266} 2267 2268namespace { 2269// Search for a SCEV subexpression that is not safe to expand. Any expression 2270// that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely 2271// UDiv expressions. We don't know if the UDiv is derived from an IR divide 2272// instruction, but the important thing is that we prove the denominator is 2273// nonzero before expansion. 2274// 2275// IVUsers already checks that IV-derived expressions are safe. So this check is 2276// only needed when the expression includes some subexpression that is not IV 2277// derived. 2278// 2279// Currently, we only allow division by a nonzero constant here. If this is 2280// inadequate, we could easily allow division by SCEVUnknown by using 2281// ValueTracking to check isKnownNonZero(). 2282// 2283// We cannot generally expand recurrences unless the step dominates the loop 2284// header. The expander handles the special case of affine recurrences by 2285// scaling the recurrence outside the loop, but this technique isn't generally 2286// applicable. Expanding a nested recurrence outside a loop requires computing 2287// binomial coefficients. This could be done, but the recurrence has to be in a 2288// perfectly reduced form, which can't be guaranteed. 2289struct SCEVFindUnsafe { 2290 ScalarEvolution &SE; 2291 bool IsUnsafe; 2292 2293 SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {} 2294 2295 bool follow(const SCEV *S) { 2296 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 2297 const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS()); 2298 if (!SC || SC->getValue()->isZero()) { 2299 IsUnsafe = true; 2300 return false; 2301 } 2302 } 2303 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 2304 const SCEV *Step = AR->getStepRecurrence(SE); 2305 if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) { 2306 IsUnsafe = true; 2307 return false; 2308 } 2309 } 2310 return true; 2311 } 2312 bool isDone() const { return IsUnsafe; } 2313}; 2314} 2315 2316namespace llvm { 2317bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) { 2318 SCEVFindUnsafe Search(SE); 2319 visitAll(S, Search); 2320 return !Search.IsUnsafe; 2321} 2322 2323bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint, 2324 ScalarEvolution &SE) { 2325 return isSafeToExpand(S, SE) && SE.dominates(S, InsertionPoint->getParent()); 2326} 2327} 2328