InstCombineInternal.h revision 344779
1//===- InstCombineInternal.h - InstCombine pass internals -------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10/// \file 11/// 12/// This file provides internal interfaces used to implement the InstCombine. 13// 14//===----------------------------------------------------------------------===// 15 16#ifndef LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 17#define LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 18 19#include "llvm/ADT/ArrayRef.h" 20#include "llvm/Analysis/AliasAnalysis.h" 21#include "llvm/Analysis/InstructionSimplify.h" 22#include "llvm/Analysis/TargetFolder.h" 23#include "llvm/Analysis/ValueTracking.h" 24#include "llvm/IR/Argument.h" 25#include "llvm/IR/BasicBlock.h" 26#include "llvm/IR/Constant.h" 27#include "llvm/IR/Constants.h" 28#include "llvm/IR/DerivedTypes.h" 29#include "llvm/IR/IRBuilder.h" 30#include "llvm/IR/InstVisitor.h" 31#include "llvm/IR/InstrTypes.h" 32#include "llvm/IR/Instruction.h" 33#include "llvm/IR/IntrinsicInst.h" 34#include "llvm/IR/Intrinsics.h" 35#include "llvm/IR/PatternMatch.h" 36#include "llvm/IR/Use.h" 37#include "llvm/IR/Value.h" 38#include "llvm/Support/Casting.h" 39#include "llvm/Support/Compiler.h" 40#include "llvm/Support/Debug.h" 41#include "llvm/Support/KnownBits.h" 42#include "llvm/Support/raw_ostream.h" 43#include "llvm/Transforms/InstCombine/InstCombineWorklist.h" 44#include "llvm/Transforms/Utils/Local.h" 45#include <cassert> 46#include <cstdint> 47 48#define DEBUG_TYPE "instcombine" 49 50using namespace llvm::PatternMatch; 51 52namespace llvm { 53 54class APInt; 55class AssumptionCache; 56class CallSite; 57class DataLayout; 58class DominatorTree; 59class GEPOperator; 60class GlobalVariable; 61class LoopInfo; 62class OptimizationRemarkEmitter; 63class TargetLibraryInfo; 64class User; 65 66/// Assign a complexity or rank value to LLVM Values. This is used to reduce 67/// the amount of pattern matching needed for compares and commutative 68/// instructions. For example, if we have: 69/// icmp ugt X, Constant 70/// or 71/// xor (add X, Constant), cast Z 72/// 73/// We do not have to consider the commuted variants of these patterns because 74/// canonicalization based on complexity guarantees the above ordering. 75/// 76/// This routine maps IR values to various complexity ranks: 77/// 0 -> undef 78/// 1 -> Constants 79/// 2 -> Other non-instructions 80/// 3 -> Arguments 81/// 4 -> Cast and (f)neg/not instructions 82/// 5 -> Other instructions 83static inline unsigned getComplexity(Value *V) { 84 if (isa<Instruction>(V)) { 85 if (isa<CastInst>(V) || match(V, m_Neg(m_Value())) || 86 match(V, m_Not(m_Value())) || match(V, m_FNeg(m_Value()))) 87 return 4; 88 return 5; 89 } 90 if (isa<Argument>(V)) 91 return 3; 92 return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2; 93} 94 95/// Predicate canonicalization reduces the number of patterns that need to be 96/// matched by other transforms. For example, we may swap the operands of a 97/// conditional branch or select to create a compare with a canonical (inverted) 98/// predicate which is then more likely to be matched with other values. 99static inline bool isCanonicalPredicate(CmpInst::Predicate Pred) { 100 switch (Pred) { 101 case CmpInst::ICMP_NE: 102 case CmpInst::ICMP_ULE: 103 case CmpInst::ICMP_SLE: 104 case CmpInst::ICMP_UGE: 105 case CmpInst::ICMP_SGE: 106 // TODO: There are 16 FCMP predicates. Should others be (not) canonical? 107 case CmpInst::FCMP_ONE: 108 case CmpInst::FCMP_OLE: 109 case CmpInst::FCMP_OGE: 110 return false; 111 default: 112 return true; 113 } 114} 115 116/// Return the source operand of a potentially bitcasted value while optionally 117/// checking if it has one use. If there is no bitcast or the one use check is 118/// not met, return the input value itself. 119static inline Value *peekThroughBitcast(Value *V, bool OneUseOnly = false) { 120 if (auto *BitCast = dyn_cast<BitCastInst>(V)) 121 if (!OneUseOnly || BitCast->hasOneUse()) 122 return BitCast->getOperand(0); 123 124 // V is not a bitcast or V has more than one use and OneUseOnly is true. 125 return V; 126} 127 128/// Add one to a Constant 129static inline Constant *AddOne(Constant *C) { 130 return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1)); 131} 132 133/// Subtract one from a Constant 134static inline Constant *SubOne(Constant *C) { 135 return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1)); 136} 137 138/// Return true if the specified value is free to invert (apply ~ to). 139/// This happens in cases where the ~ can be eliminated. If WillInvertAllUses 140/// is true, work under the assumption that the caller intends to remove all 141/// uses of V and only keep uses of ~V. 142static inline bool IsFreeToInvert(Value *V, bool WillInvertAllUses) { 143 // ~(~(X)) -> X. 144 if (match(V, m_Not(m_Value()))) 145 return true; 146 147 // Constants can be considered to be not'ed values. 148 if (isa<ConstantInt>(V)) 149 return true; 150 151 // A vector of constant integers can be inverted easily. 152 if (V->getType()->isVectorTy() && isa<Constant>(V)) { 153 unsigned NumElts = V->getType()->getVectorNumElements(); 154 for (unsigned i = 0; i != NumElts; ++i) { 155 Constant *Elt = cast<Constant>(V)->getAggregateElement(i); 156 if (!Elt) 157 return false; 158 159 if (isa<UndefValue>(Elt)) 160 continue; 161 162 if (!isa<ConstantInt>(Elt)) 163 return false; 164 } 165 return true; 166 } 167 168 // Compares can be inverted if all of their uses are being modified to use the 169 // ~V. 170 if (isa<CmpInst>(V)) 171 return WillInvertAllUses; 172 173 // If `V` is of the form `A + Constant` then `-1 - V` can be folded into `(-1 174 // - Constant) - A` if we are willing to invert all of the uses. 175 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) 176 if (BO->getOpcode() == Instruction::Add || 177 BO->getOpcode() == Instruction::Sub) 178 if (isa<Constant>(BO->getOperand(0)) || isa<Constant>(BO->getOperand(1))) 179 return WillInvertAllUses; 180 181 // Selects with invertible operands are freely invertible 182 if (match(V, m_Select(m_Value(), m_Not(m_Value()), m_Not(m_Value())))) 183 return WillInvertAllUses; 184 185 return false; 186} 187 188/// Specific patterns of overflow check idioms that we match. 189enum OverflowCheckFlavor { 190 OCF_UNSIGNED_ADD, 191 OCF_SIGNED_ADD, 192 OCF_UNSIGNED_SUB, 193 OCF_SIGNED_SUB, 194 OCF_UNSIGNED_MUL, 195 OCF_SIGNED_MUL, 196 197 OCF_INVALID 198}; 199 200/// Returns the OverflowCheckFlavor corresponding to a overflow_with_op 201/// intrinsic. 202static inline OverflowCheckFlavor 203IntrinsicIDToOverflowCheckFlavor(unsigned ID) { 204 switch (ID) { 205 default: 206 return OCF_INVALID; 207 case Intrinsic::uadd_with_overflow: 208 return OCF_UNSIGNED_ADD; 209 case Intrinsic::sadd_with_overflow: 210 return OCF_SIGNED_ADD; 211 case Intrinsic::usub_with_overflow: 212 return OCF_UNSIGNED_SUB; 213 case Intrinsic::ssub_with_overflow: 214 return OCF_SIGNED_SUB; 215 case Intrinsic::umul_with_overflow: 216 return OCF_UNSIGNED_MUL; 217 case Intrinsic::smul_with_overflow: 218 return OCF_SIGNED_MUL; 219 } 220} 221 222/// Some binary operators require special handling to avoid poison and undefined 223/// behavior. If a constant vector has undef elements, replace those undefs with 224/// identity constants if possible because those are always safe to execute. 225/// If no identity constant exists, replace undef with some other safe constant. 226static inline Constant *getSafeVectorConstantForBinop( 227 BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant) { 228 assert(In->getType()->isVectorTy() && "Not expecting scalars here"); 229 230 Type *EltTy = In->getType()->getVectorElementType(); 231 auto *SafeC = ConstantExpr::getBinOpIdentity(Opcode, EltTy, IsRHSConstant); 232 if (!SafeC) { 233 // TODO: Should this be available as a constant utility function? It is 234 // similar to getBinOpAbsorber(). 235 if (IsRHSConstant) { 236 switch (Opcode) { 237 case Instruction::SRem: // X % 1 = 0 238 case Instruction::URem: // X %u 1 = 0 239 SafeC = ConstantInt::get(EltTy, 1); 240 break; 241 case Instruction::FRem: // X % 1.0 (doesn't simplify, but it is safe) 242 SafeC = ConstantFP::get(EltTy, 1.0); 243 break; 244 default: 245 llvm_unreachable("Only rem opcodes have no identity constant for RHS"); 246 } 247 } else { 248 switch (Opcode) { 249 case Instruction::Shl: // 0 << X = 0 250 case Instruction::LShr: // 0 >>u X = 0 251 case Instruction::AShr: // 0 >> X = 0 252 case Instruction::SDiv: // 0 / X = 0 253 case Instruction::UDiv: // 0 /u X = 0 254 case Instruction::SRem: // 0 % X = 0 255 case Instruction::URem: // 0 %u X = 0 256 case Instruction::Sub: // 0 - X (doesn't simplify, but it is safe) 257 case Instruction::FSub: // 0.0 - X (doesn't simplify, but it is safe) 258 case Instruction::FDiv: // 0.0 / X (doesn't simplify, but it is safe) 259 case Instruction::FRem: // 0.0 % X = 0 260 SafeC = Constant::getNullValue(EltTy); 261 break; 262 default: 263 llvm_unreachable("Expected to find identity constant for opcode"); 264 } 265 } 266 } 267 assert(SafeC && "Must have safe constant for binop"); 268 unsigned NumElts = In->getType()->getVectorNumElements(); 269 SmallVector<Constant *, 16> Out(NumElts); 270 for (unsigned i = 0; i != NumElts; ++i) { 271 Constant *C = In->getAggregateElement(i); 272 Out[i] = isa<UndefValue>(C) ? SafeC : C; 273 } 274 return ConstantVector::get(Out); 275} 276 277/// The core instruction combiner logic. 278/// 279/// This class provides both the logic to recursively visit instructions and 280/// combine them. 281class LLVM_LIBRARY_VISIBILITY InstCombiner 282 : public InstVisitor<InstCombiner, Instruction *> { 283 // FIXME: These members shouldn't be public. 284public: 285 /// A worklist of the instructions that need to be simplified. 286 InstCombineWorklist &Worklist; 287 288 /// An IRBuilder that automatically inserts new instructions into the 289 /// worklist. 290 using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>; 291 BuilderTy &Builder; 292 293private: 294 // Mode in which we are running the combiner. 295 const bool MinimizeSize; 296 297 /// Enable combines that trigger rarely but are costly in compiletime. 298 const bool ExpensiveCombines; 299 300 AliasAnalysis *AA; 301 302 // Required analyses. 303 AssumptionCache &AC; 304 TargetLibraryInfo &TLI; 305 DominatorTree &DT; 306 const DataLayout &DL; 307 const SimplifyQuery SQ; 308 OptimizationRemarkEmitter &ORE; 309 310 // Optional analyses. When non-null, these can both be used to do better 311 // combining and will be updated to reflect any changes. 312 LoopInfo *LI; 313 314 bool MadeIRChange = false; 315 316public: 317 InstCombiner(InstCombineWorklist &Worklist, BuilderTy &Builder, 318 bool MinimizeSize, bool ExpensiveCombines, AliasAnalysis *AA, 319 AssumptionCache &AC, TargetLibraryInfo &TLI, DominatorTree &DT, 320 OptimizationRemarkEmitter &ORE, const DataLayout &DL, 321 LoopInfo *LI) 322 : Worklist(Worklist), Builder(Builder), MinimizeSize(MinimizeSize), 323 ExpensiveCombines(ExpensiveCombines), AA(AA), AC(AC), TLI(TLI), DT(DT), 324 DL(DL), SQ(DL, &TLI, &DT, &AC), ORE(ORE), LI(LI) {} 325 326 /// Run the combiner over the entire worklist until it is empty. 327 /// 328 /// \returns true if the IR is changed. 329 bool run(); 330 331 AssumptionCache &getAssumptionCache() const { return AC; } 332 333 const DataLayout &getDataLayout() const { return DL; } 334 335 DominatorTree &getDominatorTree() const { return DT; } 336 337 LoopInfo *getLoopInfo() const { return LI; } 338 339 TargetLibraryInfo &getTargetLibraryInfo() const { return TLI; } 340 341 // Visitation implementation - Implement instruction combining for different 342 // instruction types. The semantics are as follows: 343 // Return Value: 344 // null - No change was made 345 // I - Change was made, I is still valid, I may be dead though 346 // otherwise - Change was made, replace I with returned instruction 347 // 348 Instruction *visitAdd(BinaryOperator &I); 349 Instruction *visitFAdd(BinaryOperator &I); 350 Value *OptimizePointerDifference(Value *LHS, Value *RHS, Type *Ty); 351 Instruction *visitSub(BinaryOperator &I); 352 Instruction *visitFSub(BinaryOperator &I); 353 Instruction *visitMul(BinaryOperator &I); 354 Instruction *visitFMul(BinaryOperator &I); 355 Instruction *visitURem(BinaryOperator &I); 356 Instruction *visitSRem(BinaryOperator &I); 357 Instruction *visitFRem(BinaryOperator &I); 358 bool simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I); 359 Instruction *commonRemTransforms(BinaryOperator &I); 360 Instruction *commonIRemTransforms(BinaryOperator &I); 361 Instruction *commonDivTransforms(BinaryOperator &I); 362 Instruction *commonIDivTransforms(BinaryOperator &I); 363 Instruction *visitUDiv(BinaryOperator &I); 364 Instruction *visitSDiv(BinaryOperator &I); 365 Instruction *visitFDiv(BinaryOperator &I); 366 Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted); 367 Instruction *visitAnd(BinaryOperator &I); 368 Instruction *visitOr(BinaryOperator &I); 369 Instruction *visitXor(BinaryOperator &I); 370 Instruction *visitShl(BinaryOperator &I); 371 Instruction *visitAShr(BinaryOperator &I); 372 Instruction *visitLShr(BinaryOperator &I); 373 Instruction *commonShiftTransforms(BinaryOperator &I); 374 Instruction *visitFCmpInst(FCmpInst &I); 375 Instruction *visitICmpInst(ICmpInst &I); 376 Instruction *FoldShiftByConstant(Value *Op0, Constant *Op1, 377 BinaryOperator &I); 378 Instruction *commonCastTransforms(CastInst &CI); 379 Instruction *commonPointerCastTransforms(CastInst &CI); 380 Instruction *visitTrunc(TruncInst &CI); 381 Instruction *visitZExt(ZExtInst &CI); 382 Instruction *visitSExt(SExtInst &CI); 383 Instruction *visitFPTrunc(FPTruncInst &CI); 384 Instruction *visitFPExt(CastInst &CI); 385 Instruction *visitFPToUI(FPToUIInst &FI); 386 Instruction *visitFPToSI(FPToSIInst &FI); 387 Instruction *visitUIToFP(CastInst &CI); 388 Instruction *visitSIToFP(CastInst &CI); 389 Instruction *visitPtrToInt(PtrToIntInst &CI); 390 Instruction *visitIntToPtr(IntToPtrInst &CI); 391 Instruction *visitBitCast(BitCastInst &CI); 392 Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI); 393 Instruction *FoldItoFPtoI(Instruction &FI); 394 Instruction *visitSelectInst(SelectInst &SI); 395 Instruction *visitCallInst(CallInst &CI); 396 Instruction *visitInvokeInst(InvokeInst &II); 397 398 Instruction *SliceUpIllegalIntegerPHI(PHINode &PN); 399 Instruction *visitPHINode(PHINode &PN); 400 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP); 401 Instruction *visitAllocaInst(AllocaInst &AI); 402 Instruction *visitAllocSite(Instruction &FI); 403 Instruction *visitFree(CallInst &FI); 404 Instruction *visitLoadInst(LoadInst &LI); 405 Instruction *visitStoreInst(StoreInst &SI); 406 Instruction *visitBranchInst(BranchInst &BI); 407 Instruction *visitFenceInst(FenceInst &FI); 408 Instruction *visitSwitchInst(SwitchInst &SI); 409 Instruction *visitReturnInst(ReturnInst &RI); 410 Instruction *visitInsertValueInst(InsertValueInst &IV); 411 Instruction *visitInsertElementInst(InsertElementInst &IE); 412 Instruction *visitExtractElementInst(ExtractElementInst &EI); 413 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI); 414 Instruction *visitExtractValueInst(ExtractValueInst &EV); 415 Instruction *visitLandingPadInst(LandingPadInst &LI); 416 Instruction *visitVAStartInst(VAStartInst &I); 417 Instruction *visitVACopyInst(VACopyInst &I); 418 419 /// Specify what to return for unhandled instructions. 420 Instruction *visitInstruction(Instruction &I) { return nullptr; } 421 422 /// True when DB dominates all uses of DI except UI. 423 /// UI must be in the same block as DI. 424 /// The routine checks that the DI parent and DB are different. 425 bool dominatesAllUses(const Instruction *DI, const Instruction *UI, 426 const BasicBlock *DB) const; 427 428 /// Try to replace select with select operand SIOpd in SI-ICmp sequence. 429 bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp, 430 const unsigned SIOpd); 431 432 /// Try to replace instruction \p I with value \p V which are pointers 433 /// in different address space. 434 /// \return true if successful. 435 bool replacePointer(Instruction &I, Value *V); 436 437private: 438 bool shouldChangeType(unsigned FromBitWidth, unsigned ToBitWidth) const; 439 bool shouldChangeType(Type *From, Type *To) const; 440 Value *dyn_castNegVal(Value *V) const; 441 Type *FindElementAtOffset(PointerType *PtrTy, int64_t Offset, 442 SmallVectorImpl<Value *> &NewIndices); 443 444 /// Classify whether a cast is worth optimizing. 445 /// 446 /// This is a helper to decide whether the simplification of 447 /// logic(cast(A), cast(B)) to cast(logic(A, B)) should be performed. 448 /// 449 /// \param CI The cast we are interested in. 450 /// 451 /// \return true if this cast actually results in any code being generated and 452 /// if it cannot already be eliminated by some other transformation. 453 bool shouldOptimizeCast(CastInst *CI); 454 455 /// Try to optimize a sequence of instructions checking if an operation 456 /// on LHS and RHS overflows. 457 /// 458 /// If this overflow check is done via one of the overflow check intrinsics, 459 /// then CtxI has to be the call instruction calling that intrinsic. If this 460 /// overflow check is done by arithmetic followed by a compare, then CtxI has 461 /// to be the arithmetic instruction. 462 /// 463 /// If a simplification is possible, stores the simplified result of the 464 /// operation in OperationResult and result of the overflow check in 465 /// OverflowResult, and return true. If no simplification is possible, 466 /// returns false. 467 bool OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS, Value *RHS, 468 Instruction &CtxI, Value *&OperationResult, 469 Constant *&OverflowResult); 470 471 Instruction *visitCallSite(CallSite CS); 472 Instruction *tryOptimizeCall(CallInst *CI); 473 bool transformConstExprCastCall(CallSite CS); 474 Instruction *transformCallThroughTrampoline(CallSite CS, 475 IntrinsicInst *Tramp); 476 477 /// Transform (zext icmp) to bitwise / integer operations in order to 478 /// eliminate it. 479 /// 480 /// \param ICI The icmp of the (zext icmp) pair we are interested in. 481 /// \parem CI The zext of the (zext icmp) pair we are interested in. 482 /// \param DoTransform Pass false to just test whether the given (zext icmp) 483 /// would be transformed. Pass true to actually perform the transformation. 484 /// 485 /// \return null if the transformation cannot be performed. If the 486 /// transformation can be performed the new instruction that replaces the 487 /// (zext icmp) pair will be returned (if \p DoTransform is false the 488 /// unmodified \p ICI will be returned in this case). 489 Instruction *transformZExtICmp(ICmpInst *ICI, ZExtInst &CI, 490 bool DoTransform = true); 491 492 Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI); 493 494 bool willNotOverflowSignedAdd(const Value *LHS, const Value *RHS, 495 const Instruction &CxtI) const { 496 return computeOverflowForSignedAdd(LHS, RHS, &CxtI) == 497 OverflowResult::NeverOverflows; 498 } 499 500 bool willNotOverflowUnsignedAdd(const Value *LHS, const Value *RHS, 501 const Instruction &CxtI) const { 502 return computeOverflowForUnsignedAdd(LHS, RHS, &CxtI) == 503 OverflowResult::NeverOverflows; 504 } 505 506 bool willNotOverflowAdd(const Value *LHS, const Value *RHS, 507 const Instruction &CxtI, bool IsSigned) const { 508 return IsSigned ? willNotOverflowSignedAdd(LHS, RHS, CxtI) 509 : willNotOverflowUnsignedAdd(LHS, RHS, CxtI); 510 } 511 512 bool willNotOverflowSignedSub(const Value *LHS, const Value *RHS, 513 const Instruction &CxtI) const { 514 return computeOverflowForSignedSub(LHS, RHS, &CxtI) == 515 OverflowResult::NeverOverflows; 516 } 517 518 bool willNotOverflowUnsignedSub(const Value *LHS, const Value *RHS, 519 const Instruction &CxtI) const { 520 return computeOverflowForUnsignedSub(LHS, RHS, &CxtI) == 521 OverflowResult::NeverOverflows; 522 } 523 524 bool willNotOverflowSub(const Value *LHS, const Value *RHS, 525 const Instruction &CxtI, bool IsSigned) const { 526 return IsSigned ? willNotOverflowSignedSub(LHS, RHS, CxtI) 527 : willNotOverflowUnsignedSub(LHS, RHS, CxtI); 528 } 529 530 bool willNotOverflowSignedMul(const Value *LHS, const Value *RHS, 531 const Instruction &CxtI) const { 532 return computeOverflowForSignedMul(LHS, RHS, &CxtI) == 533 OverflowResult::NeverOverflows; 534 } 535 536 bool willNotOverflowUnsignedMul(const Value *LHS, const Value *RHS, 537 const Instruction &CxtI) const { 538 return computeOverflowForUnsignedMul(LHS, RHS, &CxtI) == 539 OverflowResult::NeverOverflows; 540 } 541 542 bool willNotOverflowMul(const Value *LHS, const Value *RHS, 543 const Instruction &CxtI, bool IsSigned) const { 544 return IsSigned ? willNotOverflowSignedMul(LHS, RHS, CxtI) 545 : willNotOverflowUnsignedMul(LHS, RHS, CxtI); 546 } 547 548 bool willNotOverflow(BinaryOperator::BinaryOps Opcode, const Value *LHS, 549 const Value *RHS, const Instruction &CxtI, 550 bool IsSigned) const { 551 switch (Opcode) { 552 case Instruction::Add: return willNotOverflowAdd(LHS, RHS, CxtI, IsSigned); 553 case Instruction::Sub: return willNotOverflowSub(LHS, RHS, CxtI, IsSigned); 554 case Instruction::Mul: return willNotOverflowMul(LHS, RHS, CxtI, IsSigned); 555 default: llvm_unreachable("Unexpected opcode for overflow query"); 556 } 557 } 558 559 Value *EmitGEPOffset(User *GEP); 560 Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN); 561 Instruction *foldCastedBitwiseLogic(BinaryOperator &I); 562 Instruction *narrowBinOp(TruncInst &Trunc); 563 Instruction *narrowMaskedBinOp(BinaryOperator &And); 564 Instruction *narrowMathIfNoOverflow(BinaryOperator &I); 565 Instruction *narrowRotate(TruncInst &Trunc); 566 Instruction *optimizeBitCastFromPhi(CastInst &CI, PHINode *PN); 567 568 /// Determine if a pair of casts can be replaced by a single cast. 569 /// 570 /// \param CI1 The first of a pair of casts. 571 /// \param CI2 The second of a pair of casts. 572 /// 573 /// \return 0 if the cast pair cannot be eliminated, otherwise returns an 574 /// Instruction::CastOps value for a cast that can replace the pair, casting 575 /// CI1->getSrcTy() to CI2->getDstTy(). 576 /// 577 /// \see CastInst::isEliminableCastPair 578 Instruction::CastOps isEliminableCastPair(const CastInst *CI1, 579 const CastInst *CI2); 580 581 Value *foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction &CxtI); 582 Value *foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, Instruction &CxtI); 583 Value *foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS); 584 585 /// Optimize (fcmp)&(fcmp) or (fcmp)|(fcmp). 586 /// NOTE: Unlike most of instcombine, this returns a Value which should 587 /// already be inserted into the function. 588 Value *foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd); 589 590 Value *foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS, 591 bool JoinedByAnd, Instruction &CxtI); 592 Value *matchSelectFromAndOr(Value *A, Value *B, Value *C, Value *D); 593 Value *getSelectCondition(Value *A, Value *B); 594 595public: 596 /// Inserts an instruction \p New before instruction \p Old 597 /// 598 /// Also adds the new instruction to the worklist and returns \p New so that 599 /// it is suitable for use as the return from the visitation patterns. 600 Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) { 601 assert(New && !New->getParent() && 602 "New instruction already inserted into a basic block!"); 603 BasicBlock *BB = Old.getParent(); 604 BB->getInstList().insert(Old.getIterator(), New); // Insert inst 605 Worklist.Add(New); 606 return New; 607 } 608 609 /// Same as InsertNewInstBefore, but also sets the debug loc. 610 Instruction *InsertNewInstWith(Instruction *New, Instruction &Old) { 611 New->setDebugLoc(Old.getDebugLoc()); 612 return InsertNewInstBefore(New, Old); 613 } 614 615 /// A combiner-aware RAUW-like routine. 616 /// 617 /// This method is to be used when an instruction is found to be dead, 618 /// replaceable with another preexisting expression. Here we add all uses of 619 /// I to the worklist, replace all uses of I with the new value, then return 620 /// I, so that the inst combiner will know that I was modified. 621 Instruction *replaceInstUsesWith(Instruction &I, Value *V) { 622 // If there are no uses to replace, then we return nullptr to indicate that 623 // no changes were made to the program. 624 if (I.use_empty()) return nullptr; 625 626 Worklist.AddUsersToWorkList(I); // Add all modified instrs to worklist. 627 628 // If we are replacing the instruction with itself, this must be in a 629 // segment of unreachable code, so just clobber the instruction. 630 if (&I == V) 631 V = UndefValue::get(I.getType()); 632 633 LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n" 634 << " with " << *V << '\n'); 635 636 I.replaceAllUsesWith(V); 637 return &I; 638 } 639 640 /// Creates a result tuple for an overflow intrinsic \p II with a given 641 /// \p Result and a constant \p Overflow value. 642 Instruction *CreateOverflowTuple(IntrinsicInst *II, Value *Result, 643 Constant *Overflow) { 644 Constant *V[] = {UndefValue::get(Result->getType()), Overflow}; 645 StructType *ST = cast<StructType>(II->getType()); 646 Constant *Struct = ConstantStruct::get(ST, V); 647 return InsertValueInst::Create(Struct, Result, 0); 648 } 649 650 /// Combiner aware instruction erasure. 651 /// 652 /// When dealing with an instruction that has side effects or produces a void 653 /// value, we can't rely on DCE to delete the instruction. Instead, visit 654 /// methods should return the value returned by this function. 655 Instruction *eraseInstFromFunction(Instruction &I) { 656 LLVM_DEBUG(dbgs() << "IC: ERASE " << I << '\n'); 657 assert(I.use_empty() && "Cannot erase instruction that is used!"); 658 salvageDebugInfo(I); 659 660 // Make sure that we reprocess all operands now that we reduced their 661 // use counts. 662 if (I.getNumOperands() < 8) { 663 for (Use &Operand : I.operands()) 664 if (auto *Inst = dyn_cast<Instruction>(Operand)) 665 Worklist.Add(Inst); 666 } 667 Worklist.Remove(&I); 668 I.eraseFromParent(); 669 MadeIRChange = true; 670 return nullptr; // Don't do anything with FI 671 } 672 673 void computeKnownBits(const Value *V, KnownBits &Known, 674 unsigned Depth, const Instruction *CxtI) const { 675 llvm::computeKnownBits(V, Known, DL, Depth, &AC, CxtI, &DT); 676 } 677 678 KnownBits computeKnownBits(const Value *V, unsigned Depth, 679 const Instruction *CxtI) const { 680 return llvm::computeKnownBits(V, DL, Depth, &AC, CxtI, &DT); 681 } 682 683 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false, 684 unsigned Depth = 0, 685 const Instruction *CxtI = nullptr) { 686 return llvm::isKnownToBeAPowerOfTwo(V, DL, OrZero, Depth, &AC, CxtI, &DT); 687 } 688 689 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth = 0, 690 const Instruction *CxtI = nullptr) const { 691 return llvm::MaskedValueIsZero(V, Mask, DL, Depth, &AC, CxtI, &DT); 692 } 693 694 unsigned ComputeNumSignBits(const Value *Op, unsigned Depth = 0, 695 const Instruction *CxtI = nullptr) const { 696 return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT); 697 } 698 699 OverflowResult computeOverflowForUnsignedMul(const Value *LHS, 700 const Value *RHS, 701 const Instruction *CxtI) const { 702 return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT); 703 } 704 705 OverflowResult computeOverflowForSignedMul(const Value *LHS, 706 const Value *RHS, 707 const Instruction *CxtI) const { 708 return llvm::computeOverflowForSignedMul(LHS, RHS, DL, &AC, CxtI, &DT); 709 } 710 711 OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, 712 const Value *RHS, 713 const Instruction *CxtI) const { 714 return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, &AC, CxtI, &DT); 715 } 716 717 OverflowResult computeOverflowForSignedAdd(const Value *LHS, 718 const Value *RHS, 719 const Instruction *CxtI) const { 720 return llvm::computeOverflowForSignedAdd(LHS, RHS, DL, &AC, CxtI, &DT); 721 } 722 723 OverflowResult computeOverflowForUnsignedSub(const Value *LHS, 724 const Value *RHS, 725 const Instruction *CxtI) const { 726 return llvm::computeOverflowForUnsignedSub(LHS, RHS, DL, &AC, CxtI, &DT); 727 } 728 729 OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, 730 const Instruction *CxtI) const { 731 return llvm::computeOverflowForSignedSub(LHS, RHS, DL, &AC, CxtI, &DT); 732 } 733 734 /// Maximum size of array considered when transforming. 735 uint64_t MaxArraySizeForCombine; 736 737private: 738 /// Performs a few simplifications for operators which are associative 739 /// or commutative. 740 bool SimplifyAssociativeOrCommutative(BinaryOperator &I); 741 742 /// Tries to simplify binary operations which some other binary 743 /// operation distributes over. 744 /// 745 /// It does this by either by factorizing out common terms (eg "(A*B)+(A*C)" 746 /// -> "A*(B+C)") or expanding out if this results in simplifications (eg: "A 747 /// & (B | C) -> (A&B) | (A&C)" if this is a win). Returns the simplified 748 /// value, or null if it didn't simplify. 749 Value *SimplifyUsingDistributiveLaws(BinaryOperator &I); 750 751 /// Tries to simplify add operations using the definition of remainder. 752 /// 753 /// The definition of remainder is X % C = X - (X / C ) * C. The add 754 /// expression X % C0 + (( X / C0 ) % C1) * C0 can be simplified to 755 /// X % (C0 * C1) 756 Value *SimplifyAddWithRemainder(BinaryOperator &I); 757 758 // Binary Op helper for select operations where the expression can be 759 // efficiently reorganized. 760 Value *SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, 761 Value *RHS); 762 763 /// This tries to simplify binary operations by factorizing out common terms 764 /// (e. g. "(A*B)+(A*C)" -> "A*(B+C)"). 765 Value *tryFactorization(BinaryOperator &, Instruction::BinaryOps, Value *, 766 Value *, Value *, Value *); 767 768 /// Match a select chain which produces one of three values based on whether 769 /// the LHS is less than, equal to, or greater than RHS respectively. 770 /// Return true if we matched a three way compare idiom. The LHS, RHS, Less, 771 /// Equal and Greater values are saved in the matching process and returned to 772 /// the caller. 773 bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS, 774 ConstantInt *&Less, ConstantInt *&Equal, 775 ConstantInt *&Greater); 776 777 /// Attempts to replace V with a simpler value based on the demanded 778 /// bits. 779 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask, KnownBits &Known, 780 unsigned Depth, Instruction *CxtI); 781 bool SimplifyDemandedBits(Instruction *I, unsigned Op, 782 const APInt &DemandedMask, KnownBits &Known, 783 unsigned Depth = 0); 784 785 /// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne 786 /// bits. It also tries to handle simplifications that can be done based on 787 /// DemandedMask, but without modifying the Instruction. 788 Value *SimplifyMultipleUseDemandedBits(Instruction *I, 789 const APInt &DemandedMask, 790 KnownBits &Known, 791 unsigned Depth, Instruction *CxtI); 792 793 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify demanded 794 /// bit for "r1 = shr x, c1; r2 = shl r1, c2" instruction sequence. 795 Value *simplifyShrShlDemandedBits( 796 Instruction *Shr, const APInt &ShrOp1, Instruction *Shl, 797 const APInt &ShlOp1, const APInt &DemandedMask, KnownBits &Known); 798 799 /// Tries to simplify operands to an integer instruction based on its 800 /// demanded bits. 801 bool SimplifyDemandedInstructionBits(Instruction &Inst); 802 803 Value *simplifyAMDGCNMemoryIntrinsicDemanded(IntrinsicInst *II, 804 APInt DemandedElts, 805 int DmaskIdx = -1, 806 int TFCIdx = -1); 807 808 Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, 809 APInt &UndefElts, unsigned Depth = 0); 810 811 /// Canonicalize the position of binops relative to shufflevector. 812 Instruction *foldVectorBinop(BinaryOperator &Inst); 813 814 /// Given a binary operator, cast instruction, or select which has a PHI node 815 /// as operand #0, see if we can fold the instruction into the PHI (which is 816 /// only possible if all operands to the PHI are constants). 817 Instruction *foldOpIntoPhi(Instruction &I, PHINode *PN); 818 819 /// Given an instruction with a select as one operand and a constant as the 820 /// other operand, try to fold the binary operator into the select arguments. 821 /// This also works for Cast instructions, which obviously do not have a 822 /// second operand. 823 Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI); 824 825 /// This is a convenience wrapper function for the above two functions. 826 Instruction *foldBinOpIntoSelectOrPhi(BinaryOperator &I); 827 828 Instruction *foldAddWithConstant(BinaryOperator &Add); 829 830 /// Try to rotate an operation below a PHI node, using PHI nodes for 831 /// its operands. 832 Instruction *FoldPHIArgOpIntoPHI(PHINode &PN); 833 Instruction *FoldPHIArgBinOpIntoPHI(PHINode &PN); 834 Instruction *FoldPHIArgGEPIntoPHI(PHINode &PN); 835 Instruction *FoldPHIArgLoadIntoPHI(PHINode &PN); 836 Instruction *FoldPHIArgZextsIntoPHI(PHINode &PN); 837 838 /// If an integer typed PHI has only one use which is an IntToPtr operation, 839 /// replace the PHI with an existing pointer typed PHI if it exists. Otherwise 840 /// insert a new pointer typed PHI and replace the original one. 841 Instruction *FoldIntegerTypedPHI(PHINode &PN); 842 843 /// Helper function for FoldPHIArgXIntoPHI() to set debug location for the 844 /// folded operation. 845 void PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN); 846 847 Instruction *foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, 848 ICmpInst::Predicate Cond, Instruction &I); 849 Instruction *foldAllocaCmp(ICmpInst &ICI, const AllocaInst *Alloca, 850 const Value *Other); 851 Instruction *foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, 852 GlobalVariable *GV, CmpInst &ICI, 853 ConstantInt *AndCst = nullptr); 854 Instruction *foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, 855 Constant *RHSC); 856 Instruction *foldICmpAddOpConst(Value *X, const APInt &C, 857 ICmpInst::Predicate Pred); 858 Instruction *foldICmpWithCastAndCast(ICmpInst &ICI); 859 860 Instruction *foldICmpUsingKnownBits(ICmpInst &Cmp); 861 Instruction *foldICmpWithDominatingICmp(ICmpInst &Cmp); 862 Instruction *foldICmpWithConstant(ICmpInst &Cmp); 863 Instruction *foldICmpInstWithConstant(ICmpInst &Cmp); 864 Instruction *foldICmpInstWithConstantNotInt(ICmpInst &Cmp); 865 Instruction *foldICmpBinOp(ICmpInst &Cmp); 866 Instruction *foldICmpEquality(ICmpInst &Cmp); 867 Instruction *foldICmpWithZero(ICmpInst &Cmp); 868 869 Instruction *foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select, 870 ConstantInt *C); 871 Instruction *foldICmpBitCastConstant(ICmpInst &Cmp, BitCastInst *Bitcast, 872 const APInt &C); 873 Instruction *foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc, 874 const APInt &C); 875 Instruction *foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And, 876 const APInt &C); 877 Instruction *foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor, 878 const APInt &C); 879 Instruction *foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, 880 const APInt &C); 881 Instruction *foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul, 882 const APInt &C); 883 Instruction *foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl, 884 const APInt &C); 885 Instruction *foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr, 886 const APInt &C); 887 Instruction *foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv, 888 const APInt &C); 889 Instruction *foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div, 890 const APInt &C); 891 Instruction *foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub, 892 const APInt &C); 893 Instruction *foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add, 894 const APInt &C); 895 Instruction *foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And, 896 const APInt &C1); 897 Instruction *foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, 898 const APInt &C1, const APInt &C2); 899 Instruction *foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, 900 const APInt &C2); 901 Instruction *foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, 902 const APInt &C2); 903 904 Instruction *foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, 905 BinaryOperator *BO, 906 const APInt &C); 907 Instruction *foldICmpIntrinsicWithConstant(ICmpInst &ICI, const APInt &C); 908 909 // Helpers of visitSelectInst(). 910 Instruction *foldSelectExtConst(SelectInst &Sel); 911 Instruction *foldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI); 912 Instruction *foldSelectIntoOp(SelectInst &SI, Value *, Value *); 913 Instruction *foldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1, 914 Value *A, Value *B, Instruction &Outer, 915 SelectPatternFlavor SPF2, Value *C); 916 Instruction *foldSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI); 917 918 Instruction *OptAndOp(BinaryOperator *Op, ConstantInt *OpRHS, 919 ConstantInt *AndRHS, BinaryOperator &TheAnd); 920 921 Value *insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, 922 bool isSigned, bool Inside); 923 Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI); 924 bool mergeStoreIntoSuccessor(StoreInst &SI); 925 926 /// Given an 'or' instruction, check to see if it is part of a bswap idiom. 927 /// If so, return the equivalent bswap intrinsic. 928 Instruction *matchBSwap(BinaryOperator &Or); 929 930 Instruction *SimplifyAnyMemTransfer(AnyMemTransferInst *MI); 931 Instruction *SimplifyAnyMemSet(AnyMemSetInst *MI); 932 933 Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned); 934 935 /// Returns a value X such that Val = X * Scale, or null if none. 936 /// 937 /// If the multiplication is known not to overflow then NoSignedWrap is set. 938 Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap); 939}; 940 941} // end namespace llvm 942 943#undef DEBUG_TYPE 944 945#endif // LLVM_LIB_TRANSFORMS_INSTCOMBINE_INSTCOMBINEINTERNAL_H 946