InstCombineAddSub.cpp revision 204642
1//===- InstCombineAddSub.cpp ----------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the visit functions for add, fadd, sub, and fsub. 11// 12//===----------------------------------------------------------------------===// 13 14#include "InstCombine.h" 15#include "llvm/Analysis/InstructionSimplify.h" 16#include "llvm/Target/TargetData.h" 17#include "llvm/Support/GetElementPtrTypeIterator.h" 18#include "llvm/Support/PatternMatch.h" 19using namespace llvm; 20using namespace PatternMatch; 21 22/// AddOne - Add one to a ConstantInt. 23static Constant *AddOne(Constant *C) { 24 return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1)); 25} 26/// SubOne - Subtract one from a ConstantInt. 27static Constant *SubOne(ConstantInt *C) { 28 return ConstantInt::get(C->getContext(), C->getValue()-1); 29} 30 31 32// dyn_castFoldableMul - If this value is a multiply that can be folded into 33// other computations (because it has a constant operand), return the 34// non-constant operand of the multiply, and set CST to point to the multiplier. 35// Otherwise, return null. 36// 37static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST) { 38 if (!V->hasOneUse() || !V->getType()->isIntegerTy()) 39 return 0; 40 41 Instruction *I = dyn_cast<Instruction>(V); 42 if (I == 0) return 0; 43 44 if (I->getOpcode() == Instruction::Mul) 45 if ((CST = dyn_cast<ConstantInt>(I->getOperand(1)))) 46 return I->getOperand(0); 47 if (I->getOpcode() == Instruction::Shl) 48 if ((CST = dyn_cast<ConstantInt>(I->getOperand(1)))) { 49 // The multiplier is really 1 << CST. 50 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth(); 51 uint32_t CSTVal = CST->getLimitedValue(BitWidth); 52 CST = ConstantInt::get(V->getType()->getContext(), 53 APInt(BitWidth, 1).shl(CSTVal)); 54 return I->getOperand(0); 55 } 56 return 0; 57} 58 59 60/// WillNotOverflowSignedAdd - Return true if we can prove that: 61/// (sext (add LHS, RHS)) === (add (sext LHS), (sext RHS)) 62/// This basically requires proving that the add in the original type would not 63/// overflow to change the sign bit or have a carry out. 64bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS) { 65 // There are different heuristics we can use for this. Here are some simple 66 // ones. 67 68 // Add has the property that adding any two 2's complement numbers can only 69 // have one carry bit which can change a sign. As such, if LHS and RHS each 70 // have at least two sign bits, we know that the addition of the two values 71 // will sign extend fine. 72 if (ComputeNumSignBits(LHS) > 1 && ComputeNumSignBits(RHS) > 1) 73 return true; 74 75 76 // If one of the operands only has one non-zero bit, and if the other operand 77 // has a known-zero bit in a more significant place than it (not including the 78 // sign bit) the ripple may go up to and fill the zero, but won't change the 79 // sign. For example, (X & ~4) + 1. 80 81 // TODO: Implement. 82 83 return false; 84} 85 86Instruction *InstCombiner::visitAdd(BinaryOperator &I) { 87 bool Changed = SimplifyCommutative(I); 88 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 89 90 if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(), 91 I.hasNoUnsignedWrap(), TD)) 92 return ReplaceInstUsesWith(I, V); 93 94 95 if (Constant *RHSC = dyn_cast<Constant>(RHS)) { 96 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHSC)) { 97 // X + (signbit) --> X ^ signbit 98 const APInt& Val = CI->getValue(); 99 uint32_t BitWidth = Val.getBitWidth(); 100 if (Val == APInt::getSignBit(BitWidth)) 101 return BinaryOperator::CreateXor(LHS, RHS); 102 103 // See if SimplifyDemandedBits can simplify this. This handles stuff like 104 // (X & 254)+1 -> (X&254)|1 105 if (SimplifyDemandedInstructionBits(I)) 106 return &I; 107 108 // zext(bool) + C -> bool ? C + 1 : C 109 if (ZExtInst *ZI = dyn_cast<ZExtInst>(LHS)) 110 if (ZI->getSrcTy() == Type::getInt1Ty(I.getContext())) 111 return SelectInst::Create(ZI->getOperand(0), AddOne(CI), CI); 112 } 113 114 if (isa<PHINode>(LHS)) 115 if (Instruction *NV = FoldOpIntoPhi(I)) 116 return NV; 117 118 ConstantInt *XorRHS = 0; 119 Value *XorLHS = 0; 120 if (isa<ConstantInt>(RHSC) && 121 match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) { 122 uint32_t TySizeBits = I.getType()->getScalarSizeInBits(); 123 const APInt& RHSVal = cast<ConstantInt>(RHSC)->getValue(); 124 unsigned ExtendAmt = 0; 125 // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext. 126 // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext. 127 if (XorRHS->getValue() == -RHSVal) { 128 if (RHSVal.isPowerOf2()) 129 ExtendAmt = TySizeBits - RHSVal.logBase2() - 1; 130 else if (XorRHS->getValue().isPowerOf2()) 131 ExtendAmt = TySizeBits - XorRHS->getValue().logBase2() - 1; 132 } 133 134 if (ExtendAmt) { 135 APInt Mask = APInt::getHighBitsSet(TySizeBits, ExtendAmt); 136 if (!MaskedValueIsZero(XorLHS, Mask)) 137 ExtendAmt = 0; 138 } 139 140 if (ExtendAmt) { 141 Constant *ShAmt = ConstantInt::get(I.getType(), ExtendAmt); 142 Value *NewShl = Builder->CreateShl(XorLHS, ShAmt, "sext"); 143 return BinaryOperator::CreateAShr(NewShl, ShAmt); 144 } 145 } 146 } 147 148 if (I.getType()->isIntegerTy(1)) 149 return BinaryOperator::CreateXor(LHS, RHS); 150 151 if (I.getType()->isIntegerTy()) { 152 // X + X --> X << 1 153 if (LHS == RHS) 154 return BinaryOperator::CreateShl(LHS, ConstantInt::get(I.getType(), 1)); 155 156 if (Instruction *RHSI = dyn_cast<Instruction>(RHS)) { 157 if (RHSI->getOpcode() == Instruction::Sub) 158 if (LHS == RHSI->getOperand(1)) // A + (B - A) --> B 159 return ReplaceInstUsesWith(I, RHSI->getOperand(0)); 160 } 161 if (Instruction *LHSI = dyn_cast<Instruction>(LHS)) { 162 if (LHSI->getOpcode() == Instruction::Sub) 163 if (RHS == LHSI->getOperand(1)) // (B - A) + A --> B 164 return ReplaceInstUsesWith(I, LHSI->getOperand(0)); 165 } 166 } 167 168 // -A + B --> B - A 169 // -A + -B --> -(A + B) 170 if (Value *LHSV = dyn_castNegVal(LHS)) { 171 if (LHS->getType()->isIntOrIntVectorTy()) { 172 if (Value *RHSV = dyn_castNegVal(RHS)) { 173 Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum"); 174 return BinaryOperator::CreateNeg(NewAdd); 175 } 176 } 177 178 return BinaryOperator::CreateSub(RHS, LHSV); 179 } 180 181 // A + -B --> A - B 182 if (!isa<Constant>(RHS)) 183 if (Value *V = dyn_castNegVal(RHS)) 184 return BinaryOperator::CreateSub(LHS, V); 185 186 187 ConstantInt *C2; 188 if (Value *X = dyn_castFoldableMul(LHS, C2)) { 189 if (X == RHS) // X*C + X --> X * (C+1) 190 return BinaryOperator::CreateMul(RHS, AddOne(C2)); 191 192 // X*C1 + X*C2 --> X * (C1+C2) 193 ConstantInt *C1; 194 if (X == dyn_castFoldableMul(RHS, C1)) 195 return BinaryOperator::CreateMul(X, ConstantExpr::getAdd(C1, C2)); 196 } 197 198 // X + X*C --> X * (C+1) 199 if (dyn_castFoldableMul(RHS, C2) == LHS) 200 return BinaryOperator::CreateMul(LHS, AddOne(C2)); 201 202 // X + ~X --> -1 since ~X = -X-1 203 if (match(LHS, m_Not(m_Specific(RHS))) || 204 match(RHS, m_Not(m_Specific(LHS)))) 205 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType())); 206 207 // A+B --> A|B iff A and B have no bits set in common. 208 if (const IntegerType *IT = dyn_cast<IntegerType>(I.getType())) { 209 APInt Mask = APInt::getAllOnesValue(IT->getBitWidth()); 210 APInt LHSKnownOne(IT->getBitWidth(), 0); 211 APInt LHSKnownZero(IT->getBitWidth(), 0); 212 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne); 213 if (LHSKnownZero != 0) { 214 APInt RHSKnownOne(IT->getBitWidth(), 0); 215 APInt RHSKnownZero(IT->getBitWidth(), 0); 216 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne); 217 218 // No bits in common -> bitwise or. 219 if ((LHSKnownZero|RHSKnownZero).isAllOnesValue()) 220 return BinaryOperator::CreateOr(LHS, RHS); 221 } 222 } 223 224 // W*X + Y*Z --> W * (X+Z) iff W == Y 225 if (I.getType()->isIntOrIntVectorTy()) { 226 Value *W, *X, *Y, *Z; 227 if (match(LHS, m_Mul(m_Value(W), m_Value(X))) && 228 match(RHS, m_Mul(m_Value(Y), m_Value(Z)))) { 229 if (W != Y) { 230 if (W == Z) { 231 std::swap(Y, Z); 232 } else if (Y == X) { 233 std::swap(W, X); 234 } else if (X == Z) { 235 std::swap(Y, Z); 236 std::swap(W, X); 237 } 238 } 239 240 if (W == Y) { 241 Value *NewAdd = Builder->CreateAdd(X, Z, LHS->getName()); 242 return BinaryOperator::CreateMul(W, NewAdd); 243 } 244 } 245 } 246 247 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) { 248 Value *X = 0; 249 if (match(LHS, m_Not(m_Value(X)))) // ~X + C --> (C-1) - X 250 return BinaryOperator::CreateSub(SubOne(CRHS), X); 251 252 // (X & FF00) + xx00 -> (X+xx00) & FF00 253 if (LHS->hasOneUse() && 254 match(LHS, m_And(m_Value(X), m_ConstantInt(C2)))) { 255 Constant *Anded = ConstantExpr::getAnd(CRHS, C2); 256 if (Anded == CRHS) { 257 // See if all bits from the first bit set in the Add RHS up are included 258 // in the mask. First, get the rightmost bit. 259 const APInt &AddRHSV = CRHS->getValue(); 260 261 // Form a mask of all bits from the lowest bit added through the top. 262 APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1)); 263 264 // See if the and mask includes all of these bits. 265 APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue()); 266 267 if (AddRHSHighBits == AddRHSHighBitsAnd) { 268 // Okay, the xform is safe. Insert the new add pronto. 269 Value *NewAdd = Builder->CreateAdd(X, CRHS, LHS->getName()); 270 return BinaryOperator::CreateAnd(NewAdd, C2); 271 } 272 } 273 } 274 275 // Try to fold constant add into select arguments. 276 if (SelectInst *SI = dyn_cast<SelectInst>(LHS)) 277 if (Instruction *R = FoldOpIntoSelect(I, SI)) 278 return R; 279 } 280 281 // add (select X 0 (sub n A)) A --> select X A n 282 { 283 SelectInst *SI = dyn_cast<SelectInst>(LHS); 284 Value *A = RHS; 285 if (!SI) { 286 SI = dyn_cast<SelectInst>(RHS); 287 A = LHS; 288 } 289 if (SI && SI->hasOneUse()) { 290 Value *TV = SI->getTrueValue(); 291 Value *FV = SI->getFalseValue(); 292 Value *N; 293 294 // Can we fold the add into the argument of the select? 295 // We check both true and false select arguments for a matching subtract. 296 if (match(FV, m_Zero()) && 297 match(TV, m_Sub(m_Value(N), m_Specific(A)))) 298 // Fold the add into the true select value. 299 return SelectInst::Create(SI->getCondition(), N, A); 300 if (match(TV, m_Zero()) && 301 match(FV, m_Sub(m_Value(N), m_Specific(A)))) 302 // Fold the add into the false select value. 303 return SelectInst::Create(SI->getCondition(), A, N); 304 } 305 } 306 307 // Check for (add (sext x), y), see if we can merge this into an 308 // integer add followed by a sext. 309 if (SExtInst *LHSConv = dyn_cast<SExtInst>(LHS)) { 310 // (add (sext x), cst) --> (sext (add x, cst')) 311 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) { 312 Constant *CI = 313 ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType()); 314 if (LHSConv->hasOneUse() && 315 ConstantExpr::getSExt(CI, I.getType()) == RHSC && 316 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) { 317 // Insert the new, smaller add. 318 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0), 319 CI, "addconv"); 320 return new SExtInst(NewAdd, I.getType()); 321 } 322 } 323 324 // (add (sext x), (sext y)) --> (sext (add int x, y)) 325 if (SExtInst *RHSConv = dyn_cast<SExtInst>(RHS)) { 326 // Only do this if x/y have the same type, if at last one of them has a 327 // single use (so we don't increase the number of sexts), and if the 328 // integer add will not overflow. 329 if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&& 330 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) && 331 WillNotOverflowSignedAdd(LHSConv->getOperand(0), 332 RHSConv->getOperand(0))) { 333 // Insert the new integer add. 334 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0), 335 RHSConv->getOperand(0), "addconv"); 336 return new SExtInst(NewAdd, I.getType()); 337 } 338 } 339 } 340 341 return Changed ? &I : 0; 342} 343 344Instruction *InstCombiner::visitFAdd(BinaryOperator &I) { 345 bool Changed = SimplifyCommutative(I); 346 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 347 348 if (Constant *RHSC = dyn_cast<Constant>(RHS)) { 349 // X + 0 --> X 350 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHSC)) { 351 if (CFP->isExactlyValue(ConstantFP::getNegativeZero 352 (I.getType())->getValueAPF())) 353 return ReplaceInstUsesWith(I, LHS); 354 } 355 356 if (isa<PHINode>(LHS)) 357 if (Instruction *NV = FoldOpIntoPhi(I)) 358 return NV; 359 } 360 361 // -A + B --> B - A 362 // -A + -B --> -(A + B) 363 if (Value *LHSV = dyn_castFNegVal(LHS)) 364 return BinaryOperator::CreateFSub(RHS, LHSV); 365 366 // A + -B --> A - B 367 if (!isa<Constant>(RHS)) 368 if (Value *V = dyn_castFNegVal(RHS)) 369 return BinaryOperator::CreateFSub(LHS, V); 370 371 // Check for X+0.0. Simplify it to X if we know X is not -0.0. 372 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS)) 373 if (CFP->getValueAPF().isPosZero() && CannotBeNegativeZero(LHS)) 374 return ReplaceInstUsesWith(I, LHS); 375 376 // Check for (fadd double (sitofp x), y), see if we can merge this into an 377 // integer add followed by a promotion. 378 if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) { 379 // (fadd double (sitofp x), fpcst) --> (sitofp (add int x, intcst)) 380 // ... if the constant fits in the integer value. This is useful for things 381 // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer 382 // requires a constant pool load, and generally allows the add to be better 383 // instcombined. 384 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS)) { 385 Constant *CI = 386 ConstantExpr::getFPToSI(CFP, LHSConv->getOperand(0)->getType()); 387 if (LHSConv->hasOneUse() && 388 ConstantExpr::getSIToFP(CI, I.getType()) == CFP && 389 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) { 390 // Insert the new integer add. 391 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0), 392 CI, "addconv"); 393 return new SIToFPInst(NewAdd, I.getType()); 394 } 395 } 396 397 // (fadd double (sitofp x), (sitofp y)) --> (sitofp (add int x, y)) 398 if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) { 399 // Only do this if x/y have the same type, if at last one of them has a 400 // single use (so we don't increase the number of int->fp conversions), 401 // and if the integer add will not overflow. 402 if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&& 403 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) && 404 WillNotOverflowSignedAdd(LHSConv->getOperand(0), 405 RHSConv->getOperand(0))) { 406 // Insert the new integer add. 407 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0), 408 RHSConv->getOperand(0),"addconv"); 409 return new SIToFPInst(NewAdd, I.getType()); 410 } 411 } 412 } 413 414 return Changed ? &I : 0; 415} 416 417 418/// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the 419/// code necessary to compute the offset from the base pointer (without adding 420/// in the base pointer). Return the result as a signed integer of intptr size. 421Value *InstCombiner::EmitGEPOffset(User *GEP) { 422 TargetData &TD = *getTargetData(); 423 gep_type_iterator GTI = gep_type_begin(GEP); 424 const Type *IntPtrTy = TD.getIntPtrType(GEP->getContext()); 425 Value *Result = Constant::getNullValue(IntPtrTy); 426 427 // Build a mask for high order bits. 428 unsigned IntPtrWidth = TD.getPointerSizeInBits(); 429 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth); 430 431 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e; 432 ++i, ++GTI) { 433 Value *Op = *i; 434 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask; 435 if (ConstantInt *OpC = dyn_cast<ConstantInt>(Op)) { 436 if (OpC->isZero()) continue; 437 438 // Handle a struct index, which adds its field offset to the pointer. 439 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 440 Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 441 442 Result = Builder->CreateAdd(Result, 443 ConstantInt::get(IntPtrTy, Size), 444 GEP->getName()+".offs"); 445 continue; 446 } 447 448 Constant *Scale = ConstantInt::get(IntPtrTy, Size); 449 Constant *OC = 450 ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/); 451 Scale = ConstantExpr::getMul(OC, Scale); 452 // Emit an add instruction. 453 Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs"); 454 continue; 455 } 456 // Convert to correct type. 457 if (Op->getType() != IntPtrTy) 458 Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c"); 459 if (Size != 1) { 460 Constant *Scale = ConstantInt::get(IntPtrTy, Size); 461 // We'll let instcombine(mul) convert this to a shl if possible. 462 Op = Builder->CreateMul(Op, Scale, GEP->getName()+".idx"); 463 } 464 465 // Emit an add instruction. 466 Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs"); 467 } 468 return Result; 469} 470 471 472 473 474/// Optimize pointer differences into the same array into a size. Consider: 475/// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer 476/// operands to the ptrtoint instructions for the LHS/RHS of the subtract. 477/// 478Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS, 479 const Type *Ty) { 480 assert(TD && "Must have target data info for this"); 481 482 // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize 483 // this. 484 bool Swapped = false; 485 GetElementPtrInst *GEP = 0; 486 ConstantExpr *CstGEP = 0; 487 488 // TODO: Could also optimize &A[i] - &A[j] -> "i-j", and "&A.foo[i] - &A.foo". 489 // For now we require one side to be the base pointer "A" or a constant 490 // expression derived from it. 491 if (GetElementPtrInst *LHSGEP = dyn_cast<GetElementPtrInst>(LHS)) { 492 // (gep X, ...) - X 493 if (LHSGEP->getOperand(0) == RHS) { 494 GEP = LHSGEP; 495 Swapped = false; 496 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(RHS)) { 497 // (gep X, ...) - (ce_gep X, ...) 498 if (CE->getOpcode() == Instruction::GetElementPtr && 499 LHSGEP->getOperand(0) == CE->getOperand(0)) { 500 CstGEP = CE; 501 GEP = LHSGEP; 502 Swapped = false; 503 } 504 } 505 } 506 507 if (GetElementPtrInst *RHSGEP = dyn_cast<GetElementPtrInst>(RHS)) { 508 // X - (gep X, ...) 509 if (RHSGEP->getOperand(0) == LHS) { 510 GEP = RHSGEP; 511 Swapped = true; 512 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(LHS)) { 513 // (ce_gep X, ...) - (gep X, ...) 514 if (CE->getOpcode() == Instruction::GetElementPtr && 515 RHSGEP->getOperand(0) == CE->getOperand(0)) { 516 CstGEP = CE; 517 GEP = RHSGEP; 518 Swapped = true; 519 } 520 } 521 } 522 523 if (GEP == 0) 524 return 0; 525 526 // Emit the offset of the GEP and an intptr_t. 527 Value *Result = EmitGEPOffset(GEP); 528 529 // If we had a constant expression GEP on the other side offsetting the 530 // pointer, subtract it from the offset we have. 531 if (CstGEP) { 532 Value *CstOffset = EmitGEPOffset(CstGEP); 533 Result = Builder->CreateSub(Result, CstOffset); 534 } 535 536 537 // If we have p - gep(p, ...) then we have to negate the result. 538 if (Swapped) 539 Result = Builder->CreateNeg(Result, "diff.neg"); 540 541 return Builder->CreateIntCast(Result, Ty, true); 542} 543 544 545Instruction *InstCombiner::visitSub(BinaryOperator &I) { 546 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 547 548 if (Op0 == Op1) // sub X, X -> 0 549 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); 550 551 // If this is a 'B = x-(-A)', change to B = x+A. This preserves NSW/NUW. 552 if (Value *V = dyn_castNegVal(Op1)) { 553 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V); 554 Res->setHasNoSignedWrap(I.hasNoSignedWrap()); 555 Res->setHasNoUnsignedWrap(I.hasNoUnsignedWrap()); 556 return Res; 557 } 558 559 if (isa<UndefValue>(Op0)) 560 return ReplaceInstUsesWith(I, Op0); // undef - X -> undef 561 if (isa<UndefValue>(Op1)) 562 return ReplaceInstUsesWith(I, Op1); // X - undef -> undef 563 if (I.getType()->isIntegerTy(1)) 564 return BinaryOperator::CreateXor(Op0, Op1); 565 566 if (ConstantInt *C = dyn_cast<ConstantInt>(Op0)) { 567 // Replace (-1 - A) with (~A). 568 if (C->isAllOnesValue()) 569 return BinaryOperator::CreateNot(Op1); 570 571 // C - ~X == X + (1+C) 572 Value *X = 0; 573 if (match(Op1, m_Not(m_Value(X)))) 574 return BinaryOperator::CreateAdd(X, AddOne(C)); 575 576 // -(X >>u 31) -> (X >>s 31) 577 // -(X >>s 31) -> (X >>u 31) 578 if (C->isZero()) { 579 if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op1)) { 580 if (SI->getOpcode() == Instruction::LShr) { 581 if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) { 582 // Check to see if we are shifting out everything but the sign bit. 583 if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) == 584 SI->getType()->getPrimitiveSizeInBits()-1) { 585 // Ok, the transformation is safe. Insert AShr. 586 return BinaryOperator::Create(Instruction::AShr, 587 SI->getOperand(0), CU, SI->getName()); 588 } 589 } 590 } else if (SI->getOpcode() == Instruction::AShr) { 591 if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) { 592 // Check to see if we are shifting out everything but the sign bit. 593 if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) == 594 SI->getType()->getPrimitiveSizeInBits()-1) { 595 // Ok, the transformation is safe. Insert LShr. 596 return BinaryOperator::CreateLShr( 597 SI->getOperand(0), CU, SI->getName()); 598 } 599 } 600 } 601 } 602 } 603 604 // Try to fold constant sub into select arguments. 605 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) 606 if (Instruction *R = FoldOpIntoSelect(I, SI)) 607 return R; 608 609 // C - zext(bool) -> bool ? C - 1 : C 610 if (ZExtInst *ZI = dyn_cast<ZExtInst>(Op1)) 611 if (ZI->getSrcTy() == Type::getInt1Ty(I.getContext())) 612 return SelectInst::Create(ZI->getOperand(0), SubOne(C), C); 613 } 614 615 if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) { 616 if (Op1I->getOpcode() == Instruction::Add) { 617 if (Op1I->getOperand(0) == Op0) // X-(X+Y) == -Y 618 return BinaryOperator::CreateNeg(Op1I->getOperand(1), 619 I.getName()); 620 else if (Op1I->getOperand(1) == Op0) // X-(Y+X) == -Y 621 return BinaryOperator::CreateNeg(Op1I->getOperand(0), 622 I.getName()); 623 else if (ConstantInt *CI1 = dyn_cast<ConstantInt>(I.getOperand(0))) { 624 if (ConstantInt *CI2 = dyn_cast<ConstantInt>(Op1I->getOperand(1))) 625 // C1-(X+C2) --> (C1-C2)-X 626 return BinaryOperator::CreateSub( 627 ConstantExpr::getSub(CI1, CI2), Op1I->getOperand(0)); 628 } 629 } 630 631 if (Op1I->hasOneUse()) { 632 // Replace (x - (y - z)) with (x + (z - y)) if the (y - z) subexpression 633 // is not used by anyone else... 634 // 635 if (Op1I->getOpcode() == Instruction::Sub) { 636 // Swap the two operands of the subexpr... 637 Value *IIOp0 = Op1I->getOperand(0), *IIOp1 = Op1I->getOperand(1); 638 Op1I->setOperand(0, IIOp1); 639 Op1I->setOperand(1, IIOp0); 640 641 // Create the new top level add instruction... 642 return BinaryOperator::CreateAdd(Op0, Op1); 643 } 644 645 // Replace (A - (A & B)) with (A & ~B) if this is the only use of (A&B)... 646 // 647 if (Op1I->getOpcode() == Instruction::And && 648 (Op1I->getOperand(0) == Op0 || Op1I->getOperand(1) == Op0)) { 649 Value *OtherOp = Op1I->getOperand(Op1I->getOperand(0) == Op0); 650 651 Value *NewNot = Builder->CreateNot(OtherOp, "B.not"); 652 return BinaryOperator::CreateAnd(Op0, NewNot); 653 } 654 655 // 0 - (X sdiv C) -> (X sdiv -C) 656 if (Op1I->getOpcode() == Instruction::SDiv) 657 if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0)) 658 if (CSI->isZero()) 659 if (Constant *DivRHS = dyn_cast<Constant>(Op1I->getOperand(1))) 660 return BinaryOperator::CreateSDiv(Op1I->getOperand(0), 661 ConstantExpr::getNeg(DivRHS)); 662 663 // 0 - (C << X) -> (-C << X) 664 if (Op1I->getOpcode() == Instruction::Shl) 665 if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0)) 666 if (CSI->isZero()) 667 if (Value *ShlLHSNeg = dyn_castNegVal(Op1I->getOperand(0))) 668 return BinaryOperator::CreateShl(ShlLHSNeg, Op1I->getOperand(1)); 669 670 // X - X*C --> X * (1-C) 671 ConstantInt *C2 = 0; 672 if (dyn_castFoldableMul(Op1I, C2) == Op0) { 673 Constant *CP1 = 674 ConstantExpr::getSub(ConstantInt::get(I.getType(), 1), 675 C2); 676 return BinaryOperator::CreateMul(Op0, CP1); 677 } 678 } 679 } 680 681 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) { 682 if (Op0I->getOpcode() == Instruction::Add) { 683 if (Op0I->getOperand(0) == Op1) // (Y+X)-Y == X 684 return ReplaceInstUsesWith(I, Op0I->getOperand(1)); 685 else if (Op0I->getOperand(1) == Op1) // (X+Y)-Y == X 686 return ReplaceInstUsesWith(I, Op0I->getOperand(0)); 687 } else if (Op0I->getOpcode() == Instruction::Sub) { 688 if (Op0I->getOperand(0) == Op1) // (X-Y)-X == -Y 689 return BinaryOperator::CreateNeg(Op0I->getOperand(1), 690 I.getName()); 691 } 692 } 693 694 ConstantInt *C1; 695 if (Value *X = dyn_castFoldableMul(Op0, C1)) { 696 if (X == Op1) // X*C - X --> X * (C-1) 697 return BinaryOperator::CreateMul(Op1, SubOne(C1)); 698 699 ConstantInt *C2; // X*C1 - X*C2 -> X * (C1-C2) 700 if (X == dyn_castFoldableMul(Op1, C2)) 701 return BinaryOperator::CreateMul(X, ConstantExpr::getSub(C1, C2)); 702 } 703 704 // Optimize pointer differences into the same array into a size. Consider: 705 // &A[10] - &A[0]: we should compile this to "10". 706 if (TD) { 707 Value *LHSOp, *RHSOp; 708 if (match(Op0, m_PtrToInt(m_Value(LHSOp))) && 709 match(Op1, m_PtrToInt(m_Value(RHSOp)))) 710 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType())) 711 return ReplaceInstUsesWith(I, Res); 712 713 // trunc(p)-trunc(q) -> trunc(p-q) 714 if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) && 715 match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp))))) 716 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType())) 717 return ReplaceInstUsesWith(I, Res); 718 } 719 720 return 0; 721} 722 723Instruction *InstCombiner::visitFSub(BinaryOperator &I) { 724 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 725 726 // If this is a 'B = x-(-A)', change to B = x+A... 727 if (Value *V = dyn_castFNegVal(Op1)) 728 return BinaryOperator::CreateFAdd(Op0, V); 729 730 return 0; 731} 732