ValueTracking.cpp revision 263508
1//===- ValueTracking.cpp - Walk computations to compute properties --------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains routines that help analyze properties that chains of 11// computations have. 12// 13//===----------------------------------------------------------------------===// 14 15#include "llvm/Analysis/ValueTracking.h" 16#include "llvm/ADT/SmallPtrSet.h" 17#include "llvm/Analysis/InstructionSimplify.h" 18#include "llvm/Analysis/MemoryBuiltins.h" 19#include "llvm/IR/Constants.h" 20#include "llvm/IR/DataLayout.h" 21#include "llvm/IR/GlobalAlias.h" 22#include "llvm/IR/GlobalVariable.h" 23#include "llvm/IR/Instructions.h" 24#include "llvm/IR/IntrinsicInst.h" 25#include "llvm/IR/LLVMContext.h" 26#include "llvm/IR/Metadata.h" 27#include "llvm/IR/Operator.h" 28#include "llvm/Support/ConstantRange.h" 29#include "llvm/Support/GetElementPtrTypeIterator.h" 30#include "llvm/Support/MathExtras.h" 31#include "llvm/Support/PatternMatch.h" 32#include <cstring> 33using namespace llvm; 34using namespace llvm::PatternMatch; 35 36const unsigned MaxDepth = 6; 37 38/// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if 39/// unknown returns 0). For vector types, returns the element type's bitwidth. 40static unsigned getBitWidth(Type *Ty, const DataLayout *TD) { 41 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 42 return BitWidth; 43 44 return TD ? TD->getPointerTypeSizeInBits(Ty) : 0; 45} 46 47static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW, 48 APInt &KnownZero, APInt &KnownOne, 49 APInt &KnownZero2, APInt &KnownOne2, 50 const DataLayout *TD, unsigned Depth) { 51 if (!Add) { 52 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) { 53 // We know that the top bits of C-X are clear if X contains less bits 54 // than C (i.e. no wrap-around can happen). For example, 20-X is 55 // positive if we can prove that X is >= 0 and < 16. 56 if (!CLHS->getValue().isNegative()) { 57 unsigned BitWidth = KnownZero.getBitWidth(); 58 unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros(); 59 // NLZ can't be BitWidth with no sign bit 60 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 61 llvm::ComputeMaskedBits(Op1, KnownZero2, KnownOne2, TD, Depth+1); 62 63 // If all of the MaskV bits are known to be zero, then we know the 64 // output top bits are zero, because we now know that the output is 65 // from [0-C]. 66 if ((KnownZero2 & MaskV) == MaskV) { 67 unsigned NLZ2 = CLHS->getValue().countLeadingZeros(); 68 // Top bits known zero. 69 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2); 70 } 71 } 72 } 73 } 74 75 unsigned BitWidth = KnownZero.getBitWidth(); 76 77 // If one of the operands has trailing zeros, then the bits that the 78 // other operand has in those bit positions will be preserved in the 79 // result. For an add, this works with either operand. For a subtract, 80 // this only works if the known zeros are in the right operand. 81 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 82 llvm::ComputeMaskedBits(Op0, LHSKnownZero, LHSKnownOne, TD, Depth+1); 83 assert((LHSKnownZero & LHSKnownOne) == 0 && 84 "Bits known to be one AND zero?"); 85 unsigned LHSKnownZeroOut = LHSKnownZero.countTrailingOnes(); 86 87 llvm::ComputeMaskedBits(Op1, KnownZero2, KnownOne2, TD, Depth+1); 88 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 89 unsigned RHSKnownZeroOut = KnownZero2.countTrailingOnes(); 90 91 // Determine which operand has more trailing zeros, and use that 92 // many bits from the other operand. 93 if (LHSKnownZeroOut > RHSKnownZeroOut) { 94 if (Add) { 95 APInt Mask = APInt::getLowBitsSet(BitWidth, LHSKnownZeroOut); 96 KnownZero |= KnownZero2 & Mask; 97 KnownOne |= KnownOne2 & Mask; 98 } else { 99 // If the known zeros are in the left operand for a subtract, 100 // fall back to the minimum known zeros in both operands. 101 KnownZero |= APInt::getLowBitsSet(BitWidth, 102 std::min(LHSKnownZeroOut, 103 RHSKnownZeroOut)); 104 } 105 } else if (RHSKnownZeroOut >= LHSKnownZeroOut) { 106 APInt Mask = APInt::getLowBitsSet(BitWidth, RHSKnownZeroOut); 107 KnownZero |= LHSKnownZero & Mask; 108 KnownOne |= LHSKnownOne & Mask; 109 } 110 111 // Are we still trying to solve for the sign bit? 112 if (!KnownZero.isNegative() && !KnownOne.isNegative()) { 113 if (NSW) { 114 if (Add) { 115 // Adding two positive numbers can't wrap into negative 116 if (LHSKnownZero.isNegative() && KnownZero2.isNegative()) 117 KnownZero |= APInt::getSignBit(BitWidth); 118 // and adding two negative numbers can't wrap into positive. 119 else if (LHSKnownOne.isNegative() && KnownOne2.isNegative()) 120 KnownOne |= APInt::getSignBit(BitWidth); 121 } else { 122 // Subtracting a negative number from a positive one can't wrap 123 if (LHSKnownZero.isNegative() && KnownOne2.isNegative()) 124 KnownZero |= APInt::getSignBit(BitWidth); 125 // neither can subtracting a positive number from a negative one. 126 else if (LHSKnownOne.isNegative() && KnownZero2.isNegative()) 127 KnownOne |= APInt::getSignBit(BitWidth); 128 } 129 } 130 } 131} 132 133static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW, 134 APInt &KnownZero, APInt &KnownOne, 135 APInt &KnownZero2, APInt &KnownOne2, 136 const DataLayout *TD, unsigned Depth) { 137 unsigned BitWidth = KnownZero.getBitWidth(); 138 ComputeMaskedBits(Op1, KnownZero, KnownOne, TD, Depth+1); 139 ComputeMaskedBits(Op0, KnownZero2, KnownOne2, TD, Depth+1); 140 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 141 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 142 143 bool isKnownNegative = false; 144 bool isKnownNonNegative = false; 145 // If the multiplication is known not to overflow, compute the sign bit. 146 if (NSW) { 147 if (Op0 == Op1) { 148 // The product of a number with itself is non-negative. 149 isKnownNonNegative = true; 150 } else { 151 bool isKnownNonNegativeOp1 = KnownZero.isNegative(); 152 bool isKnownNonNegativeOp0 = KnownZero2.isNegative(); 153 bool isKnownNegativeOp1 = KnownOne.isNegative(); 154 bool isKnownNegativeOp0 = KnownOne2.isNegative(); 155 // The product of two numbers with the same sign is non-negative. 156 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 157 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 158 // The product of a negative number and a non-negative number is either 159 // negative or zero. 160 if (!isKnownNonNegative) 161 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 162 isKnownNonZero(Op0, TD, Depth)) || 163 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 164 isKnownNonZero(Op1, TD, Depth)); 165 } 166 } 167 168 // If low bits are zero in either operand, output low known-0 bits. 169 // Also compute a conserative estimate for high known-0 bits. 170 // More trickiness is possible, but this is sufficient for the 171 // interesting case of alignment computation. 172 KnownOne.clearAllBits(); 173 unsigned TrailZ = KnownZero.countTrailingOnes() + 174 KnownZero2.countTrailingOnes(); 175 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + 176 KnownZero2.countLeadingOnes(), 177 BitWidth) - BitWidth; 178 179 TrailZ = std::min(TrailZ, BitWidth); 180 LeadZ = std::min(LeadZ, BitWidth); 181 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | 182 APInt::getHighBitsSet(BitWidth, LeadZ); 183 184 // Only make use of no-wrap flags if we failed to compute the sign bit 185 // directly. This matters if the multiplication always overflows, in 186 // which case we prefer to follow the result of the direct computation, 187 // though as the program is invoking undefined behaviour we can choose 188 // whatever we like here. 189 if (isKnownNonNegative && !KnownOne.isNegative()) 190 KnownZero.setBit(BitWidth - 1); 191 else if (isKnownNegative && !KnownZero.isNegative()) 192 KnownOne.setBit(BitWidth - 1); 193} 194 195void llvm::computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero) { 196 unsigned BitWidth = KnownZero.getBitWidth(); 197 unsigned NumRanges = Ranges.getNumOperands() / 2; 198 assert(NumRanges >= 1); 199 200 // Use the high end of the ranges to find leading zeros. 201 unsigned MinLeadingZeros = BitWidth; 202 for (unsigned i = 0; i < NumRanges; ++i) { 203 ConstantInt *Lower = cast<ConstantInt>(Ranges.getOperand(2*i + 0)); 204 ConstantInt *Upper = cast<ConstantInt>(Ranges.getOperand(2*i + 1)); 205 ConstantRange Range(Lower->getValue(), Upper->getValue()); 206 if (Range.isWrappedSet()) 207 MinLeadingZeros = 0; // -1 has no zeros 208 unsigned LeadingZeros = (Upper->getValue() - 1).countLeadingZeros(); 209 MinLeadingZeros = std::min(LeadingZeros, MinLeadingZeros); 210 } 211 212 KnownZero = APInt::getHighBitsSet(BitWidth, MinLeadingZeros); 213} 214/// ComputeMaskedBits - Determine which of the bits are known to be either zero 215/// or one and return them in the KnownZero/KnownOne bit sets. 216/// 217/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 218/// we cannot optimize based on the assumption that it is zero without changing 219/// it to be an explicit zero. If we don't change it to zero, other code could 220/// optimized based on the contradictory assumption that it is non-zero. 221/// Because instcombine aggressively folds operations with undef args anyway, 222/// this won't lose us code quality. 223/// 224/// This function is defined on values with integer type, values with pointer 225/// type (but only if TD is non-null), and vectors of integers. In the case 226/// where V is a vector, known zero, and known one values are the 227/// same width as the vector element, and the bit is set only if it is true 228/// for all of the elements in the vector. 229void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne, 230 const DataLayout *TD, unsigned Depth) { 231 assert(V && "No Value?"); 232 assert(Depth <= MaxDepth && "Limit Search Depth"); 233 unsigned BitWidth = KnownZero.getBitWidth(); 234 235 assert((V->getType()->isIntOrIntVectorTy() || 236 V->getType()->getScalarType()->isPointerTy()) && 237 "Not integer or pointer type!"); 238 assert((!TD || 239 TD->getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) && 240 (!V->getType()->isIntOrIntVectorTy() || 241 V->getType()->getScalarSizeInBits() == BitWidth) && 242 KnownZero.getBitWidth() == BitWidth && 243 KnownOne.getBitWidth() == BitWidth && 244 "V, Mask, KnownOne and KnownZero should have same BitWidth"); 245 246 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 247 // We know all of the bits for a constant! 248 KnownOne = CI->getValue(); 249 KnownZero = ~KnownOne; 250 return; 251 } 252 // Null and aggregate-zero are all-zeros. 253 if (isa<ConstantPointerNull>(V) || 254 isa<ConstantAggregateZero>(V)) { 255 KnownOne.clearAllBits(); 256 KnownZero = APInt::getAllOnesValue(BitWidth); 257 return; 258 } 259 // Handle a constant vector by taking the intersection of the known bits of 260 // each element. There is no real need to handle ConstantVector here, because 261 // we don't handle undef in any particularly useful way. 262 if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { 263 // We know that CDS must be a vector of integers. Take the intersection of 264 // each element. 265 KnownZero.setAllBits(); KnownOne.setAllBits(); 266 APInt Elt(KnownZero.getBitWidth(), 0); 267 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 268 Elt = CDS->getElementAsInteger(i); 269 KnownZero &= ~Elt; 270 KnownOne &= Elt; 271 } 272 return; 273 } 274 275 // The address of an aligned GlobalValue has trailing zeros. 276 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 277 unsigned Align = GV->getAlignment(); 278 if (Align == 0 && TD) { 279 if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) { 280 Type *ObjectType = GVar->getType()->getElementType(); 281 if (ObjectType->isSized()) { 282 // If the object is defined in the current Module, we'll be giving 283 // it the preferred alignment. Otherwise, we have to assume that it 284 // may only have the minimum ABI alignment. 285 if (!GVar->isDeclaration() && !GVar->isWeakForLinker()) 286 Align = TD->getPreferredAlignment(GVar); 287 else 288 Align = TD->getABITypeAlignment(ObjectType); 289 } 290 } 291 } 292 if (Align > 0) 293 KnownZero = APInt::getLowBitsSet(BitWidth, 294 countTrailingZeros(Align)); 295 else 296 KnownZero.clearAllBits(); 297 KnownOne.clearAllBits(); 298 return; 299 } 300 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 301 // the bits of its aliasee. 302 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 303 if (GA->mayBeOverridden()) { 304 KnownZero.clearAllBits(); KnownOne.clearAllBits(); 305 } else { 306 ComputeMaskedBits(GA->getAliasee(), KnownZero, KnownOne, TD, Depth+1); 307 } 308 return; 309 } 310 311 if (Argument *A = dyn_cast<Argument>(V)) { 312 unsigned Align = 0; 313 314 if (A->hasByValAttr()) { 315 // Get alignment information off byval arguments if specified in the IR. 316 Align = A->getParamAlignment(); 317 } else if (TD && A->hasStructRetAttr()) { 318 // An sret parameter has at least the ABI alignment of the return type. 319 Type *EltTy = cast<PointerType>(A->getType())->getElementType(); 320 if (EltTy->isSized()) 321 Align = TD->getABITypeAlignment(EltTy); 322 } 323 324 if (Align) 325 KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align)); 326 return; 327 } 328 329 // Start out not knowing anything. 330 KnownZero.clearAllBits(); KnownOne.clearAllBits(); 331 332 if (Depth == MaxDepth) 333 return; // Limit search depth. 334 335 Operator *I = dyn_cast<Operator>(V); 336 if (!I) return; 337 338 APInt KnownZero2(KnownZero), KnownOne2(KnownOne); 339 switch (I->getOpcode()) { 340 default: break; 341 case Instruction::Load: 342 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range)) 343 computeMaskedBitsLoad(*MD, KnownZero); 344 return; 345 case Instruction::And: { 346 // If either the LHS or the RHS are Zero, the result is zero. 347 ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1); 348 ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1); 349 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 350 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 351 352 // Output known-1 bits are only known if set in both the LHS & RHS. 353 KnownOne &= KnownOne2; 354 // Output known-0 are known to be clear if zero in either the LHS | RHS. 355 KnownZero |= KnownZero2; 356 return; 357 } 358 case Instruction::Or: { 359 ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1); 360 ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1); 361 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 362 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 363 364 // Output known-0 bits are only known if clear in both the LHS & RHS. 365 KnownZero &= KnownZero2; 366 // Output known-1 are known to be set if set in either the LHS | RHS. 367 KnownOne |= KnownOne2; 368 return; 369 } 370 case Instruction::Xor: { 371 ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1); 372 ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1); 373 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 374 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 375 376 // Output known-0 bits are known if clear or set in both the LHS & RHS. 377 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 378 // Output known-1 are known to be set if set in only one of the LHS, RHS. 379 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 380 KnownZero = KnownZeroOut; 381 return; 382 } 383 case Instruction::Mul: { 384 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 385 ComputeMaskedBitsMul(I->getOperand(0), I->getOperand(1), NSW, 386 KnownZero, KnownOne, KnownZero2, KnownOne2, TD, Depth); 387 break; 388 } 389 case Instruction::UDiv: { 390 // For the purposes of computing leading zeros we can conservatively 391 // treat a udiv as a logical right shift by the power of 2 known to 392 // be less than the denominator. 393 ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1); 394 unsigned LeadZ = KnownZero2.countLeadingOnes(); 395 396 KnownOne2.clearAllBits(); 397 KnownZero2.clearAllBits(); 398 ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1); 399 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); 400 if (RHSUnknownLeadingOnes != BitWidth) 401 LeadZ = std::min(BitWidth, 402 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); 403 404 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ); 405 return; 406 } 407 case Instruction::Select: 408 ComputeMaskedBits(I->getOperand(2), KnownZero, KnownOne, TD, Depth+1); 409 ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD, 410 Depth+1); 411 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 412 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 413 414 // Only known if known in both the LHS and RHS. 415 KnownOne &= KnownOne2; 416 KnownZero &= KnownZero2; 417 return; 418 case Instruction::FPTrunc: 419 case Instruction::FPExt: 420 case Instruction::FPToUI: 421 case Instruction::FPToSI: 422 case Instruction::SIToFP: 423 case Instruction::UIToFP: 424 return; // Can't work with floating point. 425 case Instruction::PtrToInt: 426 case Instruction::IntToPtr: 427 // We can't handle these if we don't know the pointer size. 428 if (!TD) return; 429 // FALL THROUGH and handle them the same as zext/trunc. 430 case Instruction::ZExt: 431 case Instruction::Trunc: { 432 Type *SrcTy = I->getOperand(0)->getType(); 433 434 unsigned SrcBitWidth; 435 // Note that we handle pointer operands here because of inttoptr/ptrtoint 436 // which fall through here. 437 if(TD) { 438 SrcBitWidth = TD->getTypeSizeInBits(SrcTy->getScalarType()); 439 } else { 440 SrcBitWidth = SrcTy->getScalarSizeInBits(); 441 if (!SrcBitWidth) return; 442 } 443 444 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 445 KnownZero = KnownZero.zextOrTrunc(SrcBitWidth); 446 KnownOne = KnownOne.zextOrTrunc(SrcBitWidth); 447 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 448 KnownZero = KnownZero.zextOrTrunc(BitWidth); 449 KnownOne = KnownOne.zextOrTrunc(BitWidth); 450 // Any top bits are known to be zero. 451 if (BitWidth > SrcBitWidth) 452 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 453 return; 454 } 455 case Instruction::BitCast: { 456 Type *SrcTy = I->getOperand(0)->getType(); 457 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 458 // TODO: For now, not handling conversions like: 459 // (bitcast i64 %x to <2 x i32>) 460 !I->getType()->isVectorTy()) { 461 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 462 return; 463 } 464 break; 465 } 466 case Instruction::SExt: { 467 // Compute the bits in the result that are not present in the input. 468 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 469 470 KnownZero = KnownZero.trunc(SrcBitWidth); 471 KnownOne = KnownOne.trunc(SrcBitWidth); 472 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 473 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 474 KnownZero = KnownZero.zext(BitWidth); 475 KnownOne = KnownOne.zext(BitWidth); 476 477 // If the sign bit of the input is known set or clear, then we know the 478 // top bits of the result. 479 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero 480 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 481 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set 482 KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 483 return; 484 } 485 case Instruction::Shl: 486 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 487 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 488 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); 489 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 490 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 491 KnownZero <<= ShiftAmt; 492 KnownOne <<= ShiftAmt; 493 KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0 494 return; 495 } 496 break; 497 case Instruction::LShr: 498 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 499 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 500 // Compute the new bits that are at the top now. 501 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); 502 503 // Unsigned shift right. 504 ComputeMaskedBits(I->getOperand(0), KnownZero,KnownOne, TD, Depth+1); 505 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 506 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); 507 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); 508 // high bits known zero. 509 KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt); 510 return; 511 } 512 break; 513 case Instruction::AShr: 514 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 515 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 516 // Compute the new bits that are at the top now. 517 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); 518 519 // Signed shift right. 520 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 521 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 522 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); 523 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); 524 525 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt)); 526 if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero. 527 KnownZero |= HighBits; 528 else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one. 529 KnownOne |= HighBits; 530 return; 531 } 532 break; 533 case Instruction::Sub: { 534 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 535 ComputeMaskedBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 536 KnownZero, KnownOne, KnownZero2, KnownOne2, TD, 537 Depth); 538 break; 539 } 540 case Instruction::Add: { 541 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 542 ComputeMaskedBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 543 KnownZero, KnownOne, KnownZero2, KnownOne2, TD, 544 Depth); 545 break; 546 } 547 case Instruction::SRem: 548 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 549 APInt RA = Rem->getValue().abs(); 550 if (RA.isPowerOf2()) { 551 APInt LowBits = RA - 1; 552 ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1); 553 554 // The low bits of the first operand are unchanged by the srem. 555 KnownZero = KnownZero2 & LowBits; 556 KnownOne = KnownOne2 & LowBits; 557 558 // If the first operand is non-negative or has all low bits zero, then 559 // the upper bits are all zero. 560 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) 561 KnownZero |= ~LowBits; 562 563 // If the first operand is negative and not all low bits are zero, then 564 // the upper bits are all one. 565 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0)) 566 KnownOne |= ~LowBits; 567 568 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 569 } 570 } 571 572 // The sign bit is the LHS's sign bit, except when the result of the 573 // remainder is zero. 574 if (KnownZero.isNonNegative()) { 575 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 576 ComputeMaskedBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, TD, 577 Depth+1); 578 // If it's known zero, our sign bit is also zero. 579 if (LHSKnownZero.isNegative()) 580 KnownZero.setBit(BitWidth - 1); 581 } 582 583 break; 584 case Instruction::URem: { 585 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 586 APInt RA = Rem->getValue(); 587 if (RA.isPowerOf2()) { 588 APInt LowBits = (RA - 1); 589 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, 590 Depth+1); 591 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 592 KnownZero |= ~LowBits; 593 KnownOne &= LowBits; 594 break; 595 } 596 } 597 598 // Since the result is less than or equal to either operand, any leading 599 // zero bits in either operand must also exist in the result. 600 ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 601 ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1); 602 603 unsigned Leaders = std::max(KnownZero.countLeadingOnes(), 604 KnownZero2.countLeadingOnes()); 605 KnownOne.clearAllBits(); 606 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders); 607 break; 608 } 609 610 case Instruction::Alloca: { 611 AllocaInst *AI = cast<AllocaInst>(V); 612 unsigned Align = AI->getAlignment(); 613 if (Align == 0 && TD) 614 Align = TD->getABITypeAlignment(AI->getType()->getElementType()); 615 616 if (Align > 0) 617 KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align)); 618 break; 619 } 620 case Instruction::GetElementPtr: { 621 // Analyze all of the subscripts of this getelementptr instruction 622 // to determine if we can prove known low zero bits. 623 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0); 624 ComputeMaskedBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, TD, 625 Depth+1); 626 unsigned TrailZ = LocalKnownZero.countTrailingOnes(); 627 628 gep_type_iterator GTI = gep_type_begin(I); 629 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 630 Value *Index = I->getOperand(i); 631 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 632 // Handle struct member offset arithmetic. 633 if (!TD) 634 return; 635 636 // Handle case when index is vector zeroinitializer 637 Constant *CIndex = cast<Constant>(Index); 638 if (CIndex->isZeroValue()) 639 continue; 640 641 if (CIndex->getType()->isVectorTy()) 642 Index = CIndex->getSplatValue(); 643 644 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 645 const StructLayout *SL = TD->getStructLayout(STy); 646 uint64_t Offset = SL->getElementOffset(Idx); 647 TrailZ = std::min<unsigned>(TrailZ, 648 countTrailingZeros(Offset)); 649 } else { 650 // Handle array index arithmetic. 651 Type *IndexedTy = GTI.getIndexedType(); 652 if (!IndexedTy->isSized()) return; 653 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 654 uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1; 655 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0); 656 ComputeMaskedBits(Index, LocalKnownZero, LocalKnownOne, TD, Depth+1); 657 TrailZ = std::min(TrailZ, 658 unsigned(countTrailingZeros(TypeSize) + 659 LocalKnownZero.countTrailingOnes())); 660 } 661 } 662 663 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ); 664 break; 665 } 666 case Instruction::PHI: { 667 PHINode *P = cast<PHINode>(I); 668 // Handle the case of a simple two-predecessor recurrence PHI. 669 // There's a lot more that could theoretically be done here, but 670 // this is sufficient to catch some interesting cases. 671 if (P->getNumIncomingValues() == 2) { 672 for (unsigned i = 0; i != 2; ++i) { 673 Value *L = P->getIncomingValue(i); 674 Value *R = P->getIncomingValue(!i); 675 Operator *LU = dyn_cast<Operator>(L); 676 if (!LU) 677 continue; 678 unsigned Opcode = LU->getOpcode(); 679 // Check for operations that have the property that if 680 // both their operands have low zero bits, the result 681 // will have low zero bits. 682 if (Opcode == Instruction::Add || 683 Opcode == Instruction::Sub || 684 Opcode == Instruction::And || 685 Opcode == Instruction::Or || 686 Opcode == Instruction::Mul) { 687 Value *LL = LU->getOperand(0); 688 Value *LR = LU->getOperand(1); 689 // Find a recurrence. 690 if (LL == I) 691 L = LR; 692 else if (LR == I) 693 L = LL; 694 else 695 break; 696 // Ok, we have a PHI of the form L op= R. Check for low 697 // zero bits. 698 ComputeMaskedBits(R, KnownZero2, KnownOne2, TD, Depth+1); 699 700 // We need to take the minimum number of known bits 701 APInt KnownZero3(KnownZero), KnownOne3(KnownOne); 702 ComputeMaskedBits(L, KnownZero3, KnownOne3, TD, Depth+1); 703 704 KnownZero = APInt::getLowBitsSet(BitWidth, 705 std::min(KnownZero2.countTrailingOnes(), 706 KnownZero3.countTrailingOnes())); 707 break; 708 } 709 } 710 } 711 712 // Unreachable blocks may have zero-operand PHI nodes. 713 if (P->getNumIncomingValues() == 0) 714 return; 715 716 // Otherwise take the unions of the known bit sets of the operands, 717 // taking conservative care to avoid excessive recursion. 718 if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) { 719 // Skip if every incoming value references to ourself. 720 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 721 break; 722 723 KnownZero = APInt::getAllOnesValue(BitWidth); 724 KnownOne = APInt::getAllOnesValue(BitWidth); 725 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) { 726 // Skip direct self references. 727 if (P->getIncomingValue(i) == P) continue; 728 729 KnownZero2 = APInt(BitWidth, 0); 730 KnownOne2 = APInt(BitWidth, 0); 731 // Recurse, but cap the recursion to one level, because we don't 732 // want to waste time spinning around in loops. 733 ComputeMaskedBits(P->getIncomingValue(i), KnownZero2, KnownOne2, TD, 734 MaxDepth-1); 735 KnownZero &= KnownZero2; 736 KnownOne &= KnownOne2; 737 // If all bits have been ruled out, there's no need to check 738 // more operands. 739 if (!KnownZero && !KnownOne) 740 break; 741 } 742 } 743 break; 744 } 745 case Instruction::Call: 746 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 747 switch (II->getIntrinsicID()) { 748 default: break; 749 case Intrinsic::ctlz: 750 case Intrinsic::cttz: { 751 unsigned LowBits = Log2_32(BitWidth)+1; 752 // If this call is undefined for 0, the result will be less than 2^n. 753 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 754 LowBits -= 1; 755 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 756 break; 757 } 758 case Intrinsic::ctpop: { 759 unsigned LowBits = Log2_32(BitWidth)+1; 760 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 761 break; 762 } 763 case Intrinsic::x86_sse42_crc32_64_64: 764 KnownZero = APInt::getHighBitsSet(64, 32); 765 break; 766 } 767 } 768 break; 769 case Instruction::ExtractValue: 770 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 771 ExtractValueInst *EVI = cast<ExtractValueInst>(I); 772 if (EVI->getNumIndices() != 1) break; 773 if (EVI->getIndices()[0] == 0) { 774 switch (II->getIntrinsicID()) { 775 default: break; 776 case Intrinsic::uadd_with_overflow: 777 case Intrinsic::sadd_with_overflow: 778 ComputeMaskedBitsAddSub(true, II->getArgOperand(0), 779 II->getArgOperand(1), false, KnownZero, 780 KnownOne, KnownZero2, KnownOne2, TD, Depth); 781 break; 782 case Intrinsic::usub_with_overflow: 783 case Intrinsic::ssub_with_overflow: 784 ComputeMaskedBitsAddSub(false, II->getArgOperand(0), 785 II->getArgOperand(1), false, KnownZero, 786 KnownOne, KnownZero2, KnownOne2, TD, Depth); 787 break; 788 case Intrinsic::umul_with_overflow: 789 case Intrinsic::smul_with_overflow: 790 ComputeMaskedBitsMul(II->getArgOperand(0), II->getArgOperand(1), 791 false, KnownZero, KnownOne, 792 KnownZero2, KnownOne2, TD, Depth); 793 break; 794 } 795 } 796 } 797 } 798} 799 800/// ComputeSignBit - Determine whether the sign bit is known to be zero or 801/// one. Convenience wrapper around ComputeMaskedBits. 802void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, 803 const DataLayout *TD, unsigned Depth) { 804 unsigned BitWidth = getBitWidth(V->getType(), TD); 805 if (!BitWidth) { 806 KnownZero = false; 807 KnownOne = false; 808 return; 809 } 810 APInt ZeroBits(BitWidth, 0); 811 APInt OneBits(BitWidth, 0); 812 ComputeMaskedBits(V, ZeroBits, OneBits, TD, Depth); 813 KnownOne = OneBits[BitWidth - 1]; 814 KnownZero = ZeroBits[BitWidth - 1]; 815} 816 817/// isKnownToBeAPowerOfTwo - Return true if the given value is known to have exactly one 818/// bit set when defined. For vectors return true if every element is known to 819/// be a power of two when defined. Supports values with integer or pointer 820/// types and vectors of integers. 821bool llvm::isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth) { 822 if (Constant *C = dyn_cast<Constant>(V)) { 823 if (C->isNullValue()) 824 return OrZero; 825 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) 826 return CI->getValue().isPowerOf2(); 827 // TODO: Handle vector constants. 828 } 829 830 // 1 << X is clearly a power of two if the one is not shifted off the end. If 831 // it is shifted off the end then the result is undefined. 832 if (match(V, m_Shl(m_One(), m_Value()))) 833 return true; 834 835 // (signbit) >>l X is clearly a power of two if the one is not shifted off the 836 // bottom. If it is shifted off the bottom then the result is undefined. 837 if (match(V, m_LShr(m_SignBit(), m_Value()))) 838 return true; 839 840 // The remaining tests are all recursive, so bail out if we hit the limit. 841 if (Depth++ == MaxDepth) 842 return false; 843 844 Value *X = 0, *Y = 0; 845 // A shift of a power of two is a power of two or zero. 846 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 847 match(V, m_Shr(m_Value(X), m_Value())))) 848 return isKnownToBeAPowerOfTwo(X, /*OrZero*/true, Depth); 849 850 if (ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 851 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth); 852 853 if (SelectInst *SI = dyn_cast<SelectInst>(V)) 854 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth) && 855 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth); 856 857 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 858 // A power of two and'd with anything is a power of two or zero. 859 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/true, Depth) || 860 isKnownToBeAPowerOfTwo(Y, /*OrZero*/true, Depth)) 861 return true; 862 // X & (-X) is always a power of two or zero. 863 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 864 return true; 865 return false; 866 } 867 868 // Adding a power-of-two or zero to the same power-of-two or zero yields 869 // either the original power-of-two, a larger power-of-two or zero. 870 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 871 OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 872 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) { 873 if (match(X, m_And(m_Specific(Y), m_Value())) || 874 match(X, m_And(m_Value(), m_Specific(Y)))) 875 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth)) 876 return true; 877 if (match(Y, m_And(m_Specific(X), m_Value())) || 878 match(Y, m_And(m_Value(), m_Specific(X)))) 879 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth)) 880 return true; 881 882 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 883 APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0); 884 ComputeMaskedBits(X, LHSZeroBits, LHSOneBits, 0, Depth); 885 886 APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0); 887 ComputeMaskedBits(Y, RHSZeroBits, RHSOneBits, 0, Depth); 888 // If i8 V is a power of two or zero: 889 // ZeroBits: 1 1 1 0 1 1 1 1 890 // ~ZeroBits: 0 0 0 1 0 0 0 0 891 if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2()) 892 // If OrZero isn't set, we cannot give back a zero result. 893 // Make sure either the LHS or RHS has a bit set. 894 if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue()) 895 return true; 896 } 897 } 898 899 // An exact divide or right shift can only shift off zero bits, so the result 900 // is a power of two only if the first operand is a power of two and not 901 // copying a sign bit (sdiv int_min, 2). 902 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 903 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 904 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, Depth); 905 } 906 907 return false; 908} 909 910/// \brief Test whether a GEP's result is known to be non-null. 911/// 912/// Uses properties inherent in a GEP to try to determine whether it is known 913/// to be non-null. 914/// 915/// Currently this routine does not support vector GEPs. 916static bool isGEPKnownNonNull(GEPOperator *GEP, const DataLayout *DL, 917 unsigned Depth) { 918 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0) 919 return false; 920 921 // FIXME: Support vector-GEPs. 922 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 923 924 // If the base pointer is non-null, we cannot walk to a null address with an 925 // inbounds GEP in address space zero. 926 if (isKnownNonZero(GEP->getPointerOperand(), DL, Depth)) 927 return true; 928 929 // Past this, if we don't have DataLayout, we can't do much. 930 if (!DL) 931 return false; 932 933 // Walk the GEP operands and see if any operand introduces a non-zero offset. 934 // If so, then the GEP cannot produce a null pointer, as doing so would 935 // inherently violate the inbounds contract within address space zero. 936 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 937 GTI != GTE; ++GTI) { 938 // Struct types are easy -- they must always be indexed by a constant. 939 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 940 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 941 unsigned ElementIdx = OpC->getZExtValue(); 942 const StructLayout *SL = DL->getStructLayout(STy); 943 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 944 if (ElementOffset > 0) 945 return true; 946 continue; 947 } 948 949 // If we have a zero-sized type, the index doesn't matter. Keep looping. 950 if (DL->getTypeAllocSize(GTI.getIndexedType()) == 0) 951 continue; 952 953 // Fast path the constant operand case both for efficiency and so we don't 954 // increment Depth when just zipping down an all-constant GEP. 955 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 956 if (!OpC->isZero()) 957 return true; 958 continue; 959 } 960 961 // We post-increment Depth here because while isKnownNonZero increments it 962 // as well, when we pop back up that increment won't persist. We don't want 963 // to recurse 10k times just because we have 10k GEP operands. We don't 964 // bail completely out because we want to handle constant GEPs regardless 965 // of depth. 966 if (Depth++ >= MaxDepth) 967 continue; 968 969 if (isKnownNonZero(GTI.getOperand(), DL, Depth)) 970 return true; 971 } 972 973 return false; 974} 975 976/// isKnownNonZero - Return true if the given value is known to be non-zero 977/// when defined. For vectors return true if every element is known to be 978/// non-zero when defined. Supports values with integer or pointer type and 979/// vectors of integers. 980bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth) { 981 if (Constant *C = dyn_cast<Constant>(V)) { 982 if (C->isNullValue()) 983 return false; 984 if (isa<ConstantInt>(C)) 985 // Must be non-zero due to null test above. 986 return true; 987 // TODO: Handle vectors 988 return false; 989 } 990 991 // The remaining tests are all recursive, so bail out if we hit the limit. 992 if (Depth++ >= MaxDepth) 993 return false; 994 995 // Check for pointer simplifications. 996 if (V->getType()->isPointerTy()) { 997 if (isKnownNonNull(V)) 998 return true; 999 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 1000 if (isGEPKnownNonNull(GEP, TD, Depth)) 1001 return true; 1002 } 1003 1004 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), TD); 1005 1006 // X | Y != 0 if X != 0 or Y != 0. 1007 Value *X = 0, *Y = 0; 1008 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 1009 return isKnownNonZero(X, TD, Depth) || isKnownNonZero(Y, TD, Depth); 1010 1011 // ext X != 0 if X != 0. 1012 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 1013 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), TD, Depth); 1014 1015 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 1016 // if the lowest bit is shifted off the end. 1017 if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) { 1018 // shl nuw can't remove any non-zero bits. 1019 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 1020 if (BO->hasNoUnsignedWrap()) 1021 return isKnownNonZero(X, TD, Depth); 1022 1023 APInt KnownZero(BitWidth, 0); 1024 APInt KnownOne(BitWidth, 0); 1025 ComputeMaskedBits(X, KnownZero, KnownOne, TD, Depth); 1026 if (KnownOne[0]) 1027 return true; 1028 } 1029 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 1030 // defined if the sign bit is shifted off the end. 1031 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 1032 // shr exact can only shift out zero bits. 1033 PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 1034 if (BO->isExact()) 1035 return isKnownNonZero(X, TD, Depth); 1036 1037 bool XKnownNonNegative, XKnownNegative; 1038 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, TD, Depth); 1039 if (XKnownNegative) 1040 return true; 1041 } 1042 // div exact can only produce a zero if the dividend is zero. 1043 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 1044 return isKnownNonZero(X, TD, Depth); 1045 } 1046 // X + Y. 1047 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1048 bool XKnownNonNegative, XKnownNegative; 1049 bool YKnownNonNegative, YKnownNegative; 1050 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, TD, Depth); 1051 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, TD, Depth); 1052 1053 // If X and Y are both non-negative (as signed values) then their sum is not 1054 // zero unless both X and Y are zero. 1055 if (XKnownNonNegative && YKnownNonNegative) 1056 if (isKnownNonZero(X, TD, Depth) || isKnownNonZero(Y, TD, Depth)) 1057 return true; 1058 1059 // If X and Y are both negative (as signed values) then their sum is not 1060 // zero unless both X and Y equal INT_MIN. 1061 if (BitWidth && XKnownNegative && YKnownNegative) { 1062 APInt KnownZero(BitWidth, 0); 1063 APInt KnownOne(BitWidth, 0); 1064 APInt Mask = APInt::getSignedMaxValue(BitWidth); 1065 // The sign bit of X is set. If some other bit is set then X is not equal 1066 // to INT_MIN. 1067 ComputeMaskedBits(X, KnownZero, KnownOne, TD, Depth); 1068 if ((KnownOne & Mask) != 0) 1069 return true; 1070 // The sign bit of Y is set. If some other bit is set then Y is not equal 1071 // to INT_MIN. 1072 ComputeMaskedBits(Y, KnownZero, KnownOne, TD, Depth); 1073 if ((KnownOne & Mask) != 0) 1074 return true; 1075 } 1076 1077 // The sum of a non-negative number and a power of two is not zero. 1078 if (XKnownNonNegative && isKnownToBeAPowerOfTwo(Y, /*OrZero*/false, Depth)) 1079 return true; 1080 if (YKnownNonNegative && isKnownToBeAPowerOfTwo(X, /*OrZero*/false, Depth)) 1081 return true; 1082 } 1083 // X * Y. 1084 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 1085 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 1086 // If X and Y are non-zero then so is X * Y as long as the multiplication 1087 // does not overflow. 1088 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) && 1089 isKnownNonZero(X, TD, Depth) && isKnownNonZero(Y, TD, Depth)) 1090 return true; 1091 } 1092 // (C ? X : Y) != 0 if X != 0 and Y != 0. 1093 else if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 1094 if (isKnownNonZero(SI->getTrueValue(), TD, Depth) && 1095 isKnownNonZero(SI->getFalseValue(), TD, Depth)) 1096 return true; 1097 } 1098 1099 if (!BitWidth) return false; 1100 APInt KnownZero(BitWidth, 0); 1101 APInt KnownOne(BitWidth, 0); 1102 ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth); 1103 return KnownOne != 0; 1104} 1105 1106/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 1107/// this predicate to simplify operations downstream. Mask is known to be zero 1108/// for bits that V cannot have. 1109/// 1110/// This function is defined on values with integer type, values with pointer 1111/// type (but only if TD is non-null), and vectors of integers. In the case 1112/// where V is a vector, the mask, known zero, and known one values are the 1113/// same width as the vector element, and the bit is set only if it is true 1114/// for all of the elements in the vector. 1115bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, 1116 const DataLayout *TD, unsigned Depth) { 1117 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0); 1118 ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth); 1119 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1120 return (KnownZero & Mask) == Mask; 1121} 1122 1123 1124 1125/// ComputeNumSignBits - Return the number of times the sign bit of the 1126/// register is replicated into the other bits. We know that at least 1 bit 1127/// is always equal to the sign bit (itself), but other cases can give us 1128/// information. For example, immediately after an "ashr X, 2", we know that 1129/// the top 3 bits are all equal to each other, so we return 3. 1130/// 1131/// 'Op' must have a scalar integer type. 1132/// 1133unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout *TD, 1134 unsigned Depth) { 1135 assert((TD || V->getType()->isIntOrIntVectorTy()) && 1136 "ComputeNumSignBits requires a DataLayout object to operate " 1137 "on non-integer values!"); 1138 Type *Ty = V->getType(); 1139 unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) : 1140 Ty->getScalarSizeInBits(); 1141 unsigned Tmp, Tmp2; 1142 unsigned FirstAnswer = 1; 1143 1144 // Note that ConstantInt is handled by the general ComputeMaskedBits case 1145 // below. 1146 1147 if (Depth == 6) 1148 return 1; // Limit search depth. 1149 1150 Operator *U = dyn_cast<Operator>(V); 1151 switch (Operator::getOpcode(V)) { 1152 default: break; 1153 case Instruction::SExt: 1154 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 1155 return ComputeNumSignBits(U->getOperand(0), TD, Depth+1) + Tmp; 1156 1157 case Instruction::AShr: { 1158 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 1159 // ashr X, C -> adds C sign bits. Vectors too. 1160 const APInt *ShAmt; 1161 if (match(U->getOperand(1), m_APInt(ShAmt))) { 1162 Tmp += ShAmt->getZExtValue(); 1163 if (Tmp > TyBits) Tmp = TyBits; 1164 } 1165 return Tmp; 1166 } 1167 case Instruction::Shl: { 1168 const APInt *ShAmt; 1169 if (match(U->getOperand(1), m_APInt(ShAmt))) { 1170 // shl destroys sign bits. 1171 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 1172 Tmp2 = ShAmt->getZExtValue(); 1173 if (Tmp2 >= TyBits || // Bad shift. 1174 Tmp2 >= Tmp) break; // Shifted all sign bits out. 1175 return Tmp - Tmp2; 1176 } 1177 break; 1178 } 1179 case Instruction::And: 1180 case Instruction::Or: 1181 case Instruction::Xor: // NOT is handled here. 1182 // Logical binary ops preserve the number of sign bits at the worst. 1183 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 1184 if (Tmp != 1) { 1185 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 1186 FirstAnswer = std::min(Tmp, Tmp2); 1187 // We computed what we know about the sign bits as our first 1188 // answer. Now proceed to the generic code that uses 1189 // ComputeMaskedBits, and pick whichever answer is better. 1190 } 1191 break; 1192 1193 case Instruction::Select: 1194 Tmp = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 1195 if (Tmp == 1) return 1; // Early out. 1196 Tmp2 = ComputeNumSignBits(U->getOperand(2), TD, Depth+1); 1197 return std::min(Tmp, Tmp2); 1198 1199 case Instruction::Add: 1200 // Add can have at most one carry bit. Thus we know that the output 1201 // is, at worst, one more bit than the inputs. 1202 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 1203 if (Tmp == 1) return 1; // Early out. 1204 1205 // Special case decrementing a value (ADD X, -1): 1206 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(U->getOperand(1))) 1207 if (CRHS->isAllOnesValue()) { 1208 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 1209 ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, TD, Depth+1); 1210 1211 // If the input is known to be 0 or 1, the output is 0/-1, which is all 1212 // sign bits set. 1213 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) 1214 return TyBits; 1215 1216 // If we are subtracting one from a positive number, there is no carry 1217 // out of the result. 1218 if (KnownZero.isNegative()) 1219 return Tmp; 1220 } 1221 1222 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 1223 if (Tmp2 == 1) return 1; 1224 return std::min(Tmp, Tmp2)-1; 1225 1226 case Instruction::Sub: 1227 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 1228 if (Tmp2 == 1) return 1; 1229 1230 // Handle NEG. 1231 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(U->getOperand(0))) 1232 if (CLHS->isNullValue()) { 1233 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 1234 ComputeMaskedBits(U->getOperand(1), KnownZero, KnownOne, TD, Depth+1); 1235 // If the input is known to be 0 or 1, the output is 0/-1, which is all 1236 // sign bits set. 1237 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) 1238 return TyBits; 1239 1240 // If the input is known to be positive (the sign bit is known clear), 1241 // the output of the NEG has the same number of sign bits as the input. 1242 if (KnownZero.isNegative()) 1243 return Tmp2; 1244 1245 // Otherwise, we treat this like a SUB. 1246 } 1247 1248 // Sub can have at most one carry bit. Thus we know that the output 1249 // is, at worst, one more bit than the inputs. 1250 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 1251 if (Tmp == 1) return 1; // Early out. 1252 return std::min(Tmp, Tmp2)-1; 1253 1254 case Instruction::PHI: { 1255 PHINode *PN = cast<PHINode>(U); 1256 // Don't analyze large in-degree PHIs. 1257 if (PN->getNumIncomingValues() > 4) break; 1258 1259 // Take the minimum of all incoming values. This can't infinitely loop 1260 // because of our depth threshold. 1261 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), TD, Depth+1); 1262 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) { 1263 if (Tmp == 1) return Tmp; 1264 Tmp = std::min(Tmp, 1265 ComputeNumSignBits(PN->getIncomingValue(i), TD, Depth+1)); 1266 } 1267 return Tmp; 1268 } 1269 1270 case Instruction::Trunc: 1271 // FIXME: it's tricky to do anything useful for this, but it is an important 1272 // case for targets like X86. 1273 break; 1274 } 1275 1276 // Finally, if we can prove that the top bits of the result are 0's or 1's, 1277 // use this information. 1278 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 1279 APInt Mask; 1280 ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth); 1281 1282 if (KnownZero.isNegative()) { // sign bit is 0 1283 Mask = KnownZero; 1284 } else if (KnownOne.isNegative()) { // sign bit is 1; 1285 Mask = KnownOne; 1286 } else { 1287 // Nothing known. 1288 return FirstAnswer; 1289 } 1290 1291 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 1292 // the number of identical bits in the top of the input value. 1293 Mask = ~Mask; 1294 Mask <<= Mask.getBitWidth()-TyBits; 1295 // Return # leading zeros. We use 'min' here in case Val was zero before 1296 // shifting. We don't want to return '64' as for an i32 "0". 1297 return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros())); 1298} 1299 1300/// ComputeMultiple - This function computes the integer multiple of Base that 1301/// equals V. If successful, it returns true and returns the multiple in 1302/// Multiple. If unsuccessful, it returns false. It looks 1303/// through SExt instructions only if LookThroughSExt is true. 1304bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 1305 bool LookThroughSExt, unsigned Depth) { 1306 const unsigned MaxDepth = 6; 1307 1308 assert(V && "No Value?"); 1309 assert(Depth <= MaxDepth && "Limit Search Depth"); 1310 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 1311 1312 Type *T = V->getType(); 1313 1314 ConstantInt *CI = dyn_cast<ConstantInt>(V); 1315 1316 if (Base == 0) 1317 return false; 1318 1319 if (Base == 1) { 1320 Multiple = V; 1321 return true; 1322 } 1323 1324 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 1325 Constant *BaseVal = ConstantInt::get(T, Base); 1326 if (CO && CO == BaseVal) { 1327 // Multiple is 1. 1328 Multiple = ConstantInt::get(T, 1); 1329 return true; 1330 } 1331 1332 if (CI && CI->getZExtValue() % Base == 0) { 1333 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 1334 return true; 1335 } 1336 1337 if (Depth == MaxDepth) return false; // Limit search depth. 1338 1339 Operator *I = dyn_cast<Operator>(V); 1340 if (!I) return false; 1341 1342 switch (I->getOpcode()) { 1343 default: break; 1344 case Instruction::SExt: 1345 if (!LookThroughSExt) return false; 1346 // otherwise fall through to ZExt 1347 case Instruction::ZExt: 1348 return ComputeMultiple(I->getOperand(0), Base, Multiple, 1349 LookThroughSExt, Depth+1); 1350 case Instruction::Shl: 1351 case Instruction::Mul: { 1352 Value *Op0 = I->getOperand(0); 1353 Value *Op1 = I->getOperand(1); 1354 1355 if (I->getOpcode() == Instruction::Shl) { 1356 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 1357 if (!Op1CI) return false; 1358 // Turn Op0 << Op1 into Op0 * 2^Op1 1359 APInt Op1Int = Op1CI->getValue(); 1360 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 1361 APInt API(Op1Int.getBitWidth(), 0); 1362 API.setBit(BitToSet); 1363 Op1 = ConstantInt::get(V->getContext(), API); 1364 } 1365 1366 Value *Mul0 = NULL; 1367 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 1368 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 1369 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 1370 if (Op1C->getType()->getPrimitiveSizeInBits() < 1371 MulC->getType()->getPrimitiveSizeInBits()) 1372 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 1373 if (Op1C->getType()->getPrimitiveSizeInBits() > 1374 MulC->getType()->getPrimitiveSizeInBits()) 1375 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 1376 1377 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 1378 Multiple = ConstantExpr::getMul(MulC, Op1C); 1379 return true; 1380 } 1381 1382 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 1383 if (Mul0CI->getValue() == 1) { 1384 // V == Base * Op1, so return Op1 1385 Multiple = Op1; 1386 return true; 1387 } 1388 } 1389 1390 Value *Mul1 = NULL; 1391 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 1392 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 1393 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 1394 if (Op0C->getType()->getPrimitiveSizeInBits() < 1395 MulC->getType()->getPrimitiveSizeInBits()) 1396 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 1397 if (Op0C->getType()->getPrimitiveSizeInBits() > 1398 MulC->getType()->getPrimitiveSizeInBits()) 1399 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 1400 1401 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 1402 Multiple = ConstantExpr::getMul(MulC, Op0C); 1403 return true; 1404 } 1405 1406 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 1407 if (Mul1CI->getValue() == 1) { 1408 // V == Base * Op0, so return Op0 1409 Multiple = Op0; 1410 return true; 1411 } 1412 } 1413 } 1414 } 1415 1416 // We could not determine if V is a multiple of Base. 1417 return false; 1418} 1419 1420/// CannotBeNegativeZero - Return true if we can prove that the specified FP 1421/// value is never equal to -0.0. 1422/// 1423/// NOTE: this function will need to be revisited when we support non-default 1424/// rounding modes! 1425/// 1426bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) { 1427 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) 1428 return !CFP->getValueAPF().isNegZero(); 1429 1430 if (Depth == 6) 1431 return 1; // Limit search depth. 1432 1433 const Operator *I = dyn_cast<Operator>(V); 1434 if (I == 0) return false; 1435 1436 // Check if the nsz fast-math flag is set 1437 if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I)) 1438 if (FPO->hasNoSignedZeros()) 1439 return true; 1440 1441 // (add x, 0.0) is guaranteed to return +0.0, not -0.0. 1442 if (I->getOpcode() == Instruction::FAdd) 1443 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1))) 1444 if (CFP->isNullValue()) 1445 return true; 1446 1447 // sitofp and uitofp turn into +0.0 for zero. 1448 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I)) 1449 return true; 1450 1451 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 1452 // sqrt(-0.0) = -0.0, no other negative results are possible. 1453 if (II->getIntrinsicID() == Intrinsic::sqrt) 1454 return CannotBeNegativeZero(II->getArgOperand(0), Depth+1); 1455 1456 if (const CallInst *CI = dyn_cast<CallInst>(I)) 1457 if (const Function *F = CI->getCalledFunction()) { 1458 if (F->isDeclaration()) { 1459 // abs(x) != -0.0 1460 if (F->getName() == "abs") return true; 1461 // fabs[lf](x) != -0.0 1462 if (F->getName() == "fabs") return true; 1463 if (F->getName() == "fabsf") return true; 1464 if (F->getName() == "fabsl") return true; 1465 if (F->getName() == "sqrt" || F->getName() == "sqrtf" || 1466 F->getName() == "sqrtl") 1467 return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1); 1468 } 1469 } 1470 1471 return false; 1472} 1473 1474/// isBytewiseValue - If the specified value can be set by repeating the same 1475/// byte in memory, return the i8 value that it is represented with. This is 1476/// true for all i8 values obviously, but is also true for i32 0, i32 -1, 1477/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated 1478/// byte store (e.g. i16 0x1234), return null. 1479Value *llvm::isBytewiseValue(Value *V) { 1480 // All byte-wide stores are splatable, even of arbitrary variables. 1481 if (V->getType()->isIntegerTy(8)) return V; 1482 1483 // Handle 'null' ConstantArrayZero etc. 1484 if (Constant *C = dyn_cast<Constant>(V)) 1485 if (C->isNullValue()) 1486 return Constant::getNullValue(Type::getInt8Ty(V->getContext())); 1487 1488 // Constant float and double values can be handled as integer values if the 1489 // corresponding integer value is "byteable". An important case is 0.0. 1490 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 1491 if (CFP->getType()->isFloatTy()) 1492 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); 1493 if (CFP->getType()->isDoubleTy()) 1494 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); 1495 // Don't handle long double formats, which have strange constraints. 1496 } 1497 1498 // We can handle constant integers that are power of two in size and a 1499 // multiple of 8 bits. 1500 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 1501 unsigned Width = CI->getBitWidth(); 1502 if (isPowerOf2_32(Width) && Width > 8) { 1503 // We can handle this value if the recursive binary decomposition is the 1504 // same at all levels. 1505 APInt Val = CI->getValue(); 1506 APInt Val2; 1507 while (Val.getBitWidth() != 8) { 1508 unsigned NextWidth = Val.getBitWidth()/2; 1509 Val2 = Val.lshr(NextWidth); 1510 Val2 = Val2.trunc(Val.getBitWidth()/2); 1511 Val = Val.trunc(Val.getBitWidth()/2); 1512 1513 // If the top/bottom halves aren't the same, reject it. 1514 if (Val != Val2) 1515 return 0; 1516 } 1517 return ConstantInt::get(V->getContext(), Val); 1518 } 1519 } 1520 1521 // A ConstantDataArray/Vector is splatable if all its members are equal and 1522 // also splatable. 1523 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) { 1524 Value *Elt = CA->getElementAsConstant(0); 1525 Value *Val = isBytewiseValue(Elt); 1526 if (!Val) 1527 return 0; 1528 1529 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I) 1530 if (CA->getElementAsConstant(I) != Elt) 1531 return 0; 1532 1533 return Val; 1534 } 1535 1536 // Conceptually, we could handle things like: 1537 // %a = zext i8 %X to i16 1538 // %b = shl i16 %a, 8 1539 // %c = or i16 %a, %b 1540 // but until there is an example that actually needs this, it doesn't seem 1541 // worth worrying about. 1542 return 0; 1543} 1544 1545 1546// This is the recursive version of BuildSubAggregate. It takes a few different 1547// arguments. Idxs is the index within the nested struct From that we are 1548// looking at now (which is of type IndexedType). IdxSkip is the number of 1549// indices from Idxs that should be left out when inserting into the resulting 1550// struct. To is the result struct built so far, new insertvalue instructions 1551// build on that. 1552static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 1553 SmallVectorImpl<unsigned> &Idxs, 1554 unsigned IdxSkip, 1555 Instruction *InsertBefore) { 1556 llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType); 1557 if (STy) { 1558 // Save the original To argument so we can modify it 1559 Value *OrigTo = To; 1560 // General case, the type indexed by Idxs is a struct 1561 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1562 // Process each struct element recursively 1563 Idxs.push_back(i); 1564 Value *PrevTo = To; 1565 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 1566 InsertBefore); 1567 Idxs.pop_back(); 1568 if (!To) { 1569 // Couldn't find any inserted value for this index? Cleanup 1570 while (PrevTo != OrigTo) { 1571 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 1572 PrevTo = Del->getAggregateOperand(); 1573 Del->eraseFromParent(); 1574 } 1575 // Stop processing elements 1576 break; 1577 } 1578 } 1579 // If we successfully found a value for each of our subaggregates 1580 if (To) 1581 return To; 1582 } 1583 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 1584 // the struct's elements had a value that was inserted directly. In the latter 1585 // case, perhaps we can't determine each of the subelements individually, but 1586 // we might be able to find the complete struct somewhere. 1587 1588 // Find the value that is at that particular spot 1589 Value *V = FindInsertedValue(From, Idxs); 1590 1591 if (!V) 1592 return NULL; 1593 1594 // Insert the value in the new (sub) aggregrate 1595 return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 1596 "tmp", InsertBefore); 1597} 1598 1599// This helper takes a nested struct and extracts a part of it (which is again a 1600// struct) into a new value. For example, given the struct: 1601// { a, { b, { c, d }, e } } 1602// and the indices "1, 1" this returns 1603// { c, d }. 1604// 1605// It does this by inserting an insertvalue for each element in the resulting 1606// struct, as opposed to just inserting a single struct. This will only work if 1607// each of the elements of the substruct are known (ie, inserted into From by an 1608// insertvalue instruction somewhere). 1609// 1610// All inserted insertvalue instructions are inserted before InsertBefore 1611static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 1612 Instruction *InsertBefore) { 1613 assert(InsertBefore && "Must have someplace to insert!"); 1614 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 1615 idx_range); 1616 Value *To = UndefValue::get(IndexedType); 1617 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 1618 unsigned IdxSkip = Idxs.size(); 1619 1620 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 1621} 1622 1623/// FindInsertedValue - Given an aggregrate and an sequence of indices, see if 1624/// the scalar value indexed is already around as a register, for example if it 1625/// were inserted directly into the aggregrate. 1626/// 1627/// If InsertBefore is not null, this function will duplicate (modified) 1628/// insertvalues when a part of a nested struct is extracted. 1629Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 1630 Instruction *InsertBefore) { 1631 // Nothing to index? Just return V then (this is useful at the end of our 1632 // recursion). 1633 if (idx_range.empty()) 1634 return V; 1635 // We have indices, so V should have an indexable type. 1636 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 1637 "Not looking at a struct or array?"); 1638 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 1639 "Invalid indices for type?"); 1640 1641 if (Constant *C = dyn_cast<Constant>(V)) { 1642 C = C->getAggregateElement(idx_range[0]); 1643 if (C == 0) return 0; 1644 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 1645 } 1646 1647 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 1648 // Loop the indices for the insertvalue instruction in parallel with the 1649 // requested indices 1650 const unsigned *req_idx = idx_range.begin(); 1651 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 1652 i != e; ++i, ++req_idx) { 1653 if (req_idx == idx_range.end()) { 1654 // We can't handle this without inserting insertvalues 1655 if (!InsertBefore) 1656 return 0; 1657 1658 // The requested index identifies a part of a nested aggregate. Handle 1659 // this specially. For example, 1660 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 1661 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 1662 // %C = extractvalue {i32, { i32, i32 } } %B, 1 1663 // This can be changed into 1664 // %A = insertvalue {i32, i32 } undef, i32 10, 0 1665 // %C = insertvalue {i32, i32 } %A, i32 11, 1 1666 // which allows the unused 0,0 element from the nested struct to be 1667 // removed. 1668 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 1669 InsertBefore); 1670 } 1671 1672 // This insert value inserts something else than what we are looking for. 1673 // See if the (aggregrate) value inserted into has the value we are 1674 // looking for, then. 1675 if (*req_idx != *i) 1676 return FindInsertedValue(I->getAggregateOperand(), idx_range, 1677 InsertBefore); 1678 } 1679 // If we end up here, the indices of the insertvalue match with those 1680 // requested (though possibly only partially). Now we recursively look at 1681 // the inserted value, passing any remaining indices. 1682 return FindInsertedValue(I->getInsertedValueOperand(), 1683 makeArrayRef(req_idx, idx_range.end()), 1684 InsertBefore); 1685 } 1686 1687 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 1688 // If we're extracting a value from an aggregrate that was extracted from 1689 // something else, we can extract from that something else directly instead. 1690 // However, we will need to chain I's indices with the requested indices. 1691 1692 // Calculate the number of indices required 1693 unsigned size = I->getNumIndices() + idx_range.size(); 1694 // Allocate some space to put the new indices in 1695 SmallVector<unsigned, 5> Idxs; 1696 Idxs.reserve(size); 1697 // Add indices from the extract value instruction 1698 Idxs.append(I->idx_begin(), I->idx_end()); 1699 1700 // Add requested indices 1701 Idxs.append(idx_range.begin(), idx_range.end()); 1702 1703 assert(Idxs.size() == size 1704 && "Number of indices added not correct?"); 1705 1706 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 1707 } 1708 // Otherwise, we don't know (such as, extracting from a function return value 1709 // or load instruction) 1710 return 0; 1711} 1712 1713/// GetPointerBaseWithConstantOffset - Analyze the specified pointer to see if 1714/// it can be expressed as a base pointer plus a constant offset. Return the 1715/// base and offset to the caller. 1716Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 1717 const DataLayout *DL) { 1718 // Without DataLayout, conservatively assume 64-bit offsets, which is 1719 // the widest we support. 1720 unsigned BitWidth = DL ? DL->getPointerTypeSizeInBits(Ptr->getType()) : 64; 1721 APInt ByteOffset(BitWidth, 0); 1722 while (1) { 1723 if (Ptr->getType()->isVectorTy()) 1724 break; 1725 1726 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 1727 if (DL) { 1728 APInt GEPOffset(BitWidth, 0); 1729 if (!GEP->accumulateConstantOffset(*DL, GEPOffset)) 1730 break; 1731 1732 ByteOffset += GEPOffset; 1733 } 1734 1735 Ptr = GEP->getPointerOperand(); 1736 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast) { 1737 Ptr = cast<Operator>(Ptr)->getOperand(0); 1738 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 1739 if (GA->mayBeOverridden()) 1740 break; 1741 Ptr = GA->getAliasee(); 1742 } else { 1743 break; 1744 } 1745 } 1746 Offset = ByteOffset.getSExtValue(); 1747 return Ptr; 1748} 1749 1750 1751/// getConstantStringInfo - This function computes the length of a 1752/// null-terminated C string pointed to by V. If successful, it returns true 1753/// and returns the string in Str. If unsuccessful, it returns false. 1754bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 1755 uint64_t Offset, bool TrimAtNul) { 1756 assert(V); 1757 1758 // Look through bitcast instructions and geps. 1759 V = V->stripPointerCasts(); 1760 1761 // If the value is a GEP instructionor constant expression, treat it as an 1762 // offset. 1763 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 1764 // Make sure the GEP has exactly three arguments. 1765 if (GEP->getNumOperands() != 3) 1766 return false; 1767 1768 // Make sure the index-ee is a pointer to array of i8. 1769 PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType()); 1770 ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType()); 1771 if (AT == 0 || !AT->getElementType()->isIntegerTy(8)) 1772 return false; 1773 1774 // Check to make sure that the first operand of the GEP is an integer and 1775 // has value 0 so that we are sure we're indexing into the initializer. 1776 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 1777 if (FirstIdx == 0 || !FirstIdx->isZero()) 1778 return false; 1779 1780 // If the second index isn't a ConstantInt, then this is a variable index 1781 // into the array. If this occurs, we can't say anything meaningful about 1782 // the string. 1783 uint64_t StartIdx = 0; 1784 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 1785 StartIdx = CI->getZExtValue(); 1786 else 1787 return false; 1788 return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx+Offset); 1789 } 1790 1791 // The GEP instruction, constant or instruction, must reference a global 1792 // variable that is a constant and is initialized. The referenced constant 1793 // initializer is the array that we'll use for optimization. 1794 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 1795 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 1796 return false; 1797 1798 // Handle the all-zeros case 1799 if (GV->getInitializer()->isNullValue()) { 1800 // This is a degenerate case. The initializer is constant zero so the 1801 // length of the string must be zero. 1802 Str = ""; 1803 return true; 1804 } 1805 1806 // Must be a Constant Array 1807 const ConstantDataArray *Array = 1808 dyn_cast<ConstantDataArray>(GV->getInitializer()); 1809 if (Array == 0 || !Array->isString()) 1810 return false; 1811 1812 // Get the number of elements in the array 1813 uint64_t NumElts = Array->getType()->getArrayNumElements(); 1814 1815 // Start out with the entire array in the StringRef. 1816 Str = Array->getAsString(); 1817 1818 if (Offset > NumElts) 1819 return false; 1820 1821 // Skip over 'offset' bytes. 1822 Str = Str.substr(Offset); 1823 1824 if (TrimAtNul) { 1825 // Trim off the \0 and anything after it. If the array is not nul 1826 // terminated, we just return the whole end of string. The client may know 1827 // some other way that the string is length-bound. 1828 Str = Str.substr(0, Str.find('\0')); 1829 } 1830 return true; 1831} 1832 1833// These next two are very similar to the above, but also look through PHI 1834// nodes. 1835// TODO: See if we can integrate these two together. 1836 1837/// GetStringLengthH - If we can compute the length of the string pointed to by 1838/// the specified pointer, return 'len+1'. If we can't, return 0. 1839static uint64_t GetStringLengthH(Value *V, SmallPtrSet<PHINode*, 32> &PHIs) { 1840 // Look through noop bitcast instructions. 1841 V = V->stripPointerCasts(); 1842 1843 // If this is a PHI node, there are two cases: either we have already seen it 1844 // or we haven't. 1845 if (PHINode *PN = dyn_cast<PHINode>(V)) { 1846 if (!PHIs.insert(PN)) 1847 return ~0ULL; // already in the set. 1848 1849 // If it was new, see if all the input strings are the same length. 1850 uint64_t LenSoFar = ~0ULL; 1851 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1852 uint64_t Len = GetStringLengthH(PN->getIncomingValue(i), PHIs); 1853 if (Len == 0) return 0; // Unknown length -> unknown. 1854 1855 if (Len == ~0ULL) continue; 1856 1857 if (Len != LenSoFar && LenSoFar != ~0ULL) 1858 return 0; // Disagree -> unknown. 1859 LenSoFar = Len; 1860 } 1861 1862 // Success, all agree. 1863 return LenSoFar; 1864 } 1865 1866 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 1867 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 1868 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs); 1869 if (Len1 == 0) return 0; 1870 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs); 1871 if (Len2 == 0) return 0; 1872 if (Len1 == ~0ULL) return Len2; 1873 if (Len2 == ~0ULL) return Len1; 1874 if (Len1 != Len2) return 0; 1875 return Len1; 1876 } 1877 1878 // Otherwise, see if we can read the string. 1879 StringRef StrData; 1880 if (!getConstantStringInfo(V, StrData)) 1881 return 0; 1882 1883 return StrData.size()+1; 1884} 1885 1886/// GetStringLength - If we can compute the length of the string pointed to by 1887/// the specified pointer, return 'len+1'. If we can't, return 0. 1888uint64_t llvm::GetStringLength(Value *V) { 1889 if (!V->getType()->isPointerTy()) return 0; 1890 1891 SmallPtrSet<PHINode*, 32> PHIs; 1892 uint64_t Len = GetStringLengthH(V, PHIs); 1893 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 1894 // an empty string as a length. 1895 return Len == ~0ULL ? 1 : Len; 1896} 1897 1898Value * 1899llvm::GetUnderlyingObject(Value *V, const DataLayout *TD, unsigned MaxLookup) { 1900 if (!V->getType()->isPointerTy()) 1901 return V; 1902 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 1903 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 1904 V = GEP->getPointerOperand(); 1905 } else if (Operator::getOpcode(V) == Instruction::BitCast) { 1906 V = cast<Operator>(V)->getOperand(0); 1907 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1908 if (GA->mayBeOverridden()) 1909 return V; 1910 V = GA->getAliasee(); 1911 } else { 1912 // See if InstructionSimplify knows any relevant tricks. 1913 if (Instruction *I = dyn_cast<Instruction>(V)) 1914 // TODO: Acquire a DominatorTree and use it. 1915 if (Value *Simplified = SimplifyInstruction(I, TD, 0)) { 1916 V = Simplified; 1917 continue; 1918 } 1919 1920 return V; 1921 } 1922 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 1923 } 1924 return V; 1925} 1926 1927void 1928llvm::GetUnderlyingObjects(Value *V, 1929 SmallVectorImpl<Value *> &Objects, 1930 const DataLayout *TD, 1931 unsigned MaxLookup) { 1932 SmallPtrSet<Value *, 4> Visited; 1933 SmallVector<Value *, 4> Worklist; 1934 Worklist.push_back(V); 1935 do { 1936 Value *P = Worklist.pop_back_val(); 1937 P = GetUnderlyingObject(P, TD, MaxLookup); 1938 1939 if (!Visited.insert(P)) 1940 continue; 1941 1942 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 1943 Worklist.push_back(SI->getTrueValue()); 1944 Worklist.push_back(SI->getFalseValue()); 1945 continue; 1946 } 1947 1948 if (PHINode *PN = dyn_cast<PHINode>(P)) { 1949 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 1950 Worklist.push_back(PN->getIncomingValue(i)); 1951 continue; 1952 } 1953 1954 Objects.push_back(P); 1955 } while (!Worklist.empty()); 1956} 1957 1958/// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer 1959/// are lifetime markers. 1960/// 1961bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 1962 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end(); 1963 UI != UE; ++UI) { 1964 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(*UI); 1965 if (!II) return false; 1966 1967 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 1968 II->getIntrinsicID() != Intrinsic::lifetime_end) 1969 return false; 1970 } 1971 return true; 1972} 1973 1974bool llvm::isSafeToSpeculativelyExecute(const Value *V, 1975 const DataLayout *TD) { 1976 const Operator *Inst = dyn_cast<Operator>(V); 1977 if (!Inst) 1978 return false; 1979 1980 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 1981 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 1982 if (C->canTrap()) 1983 return false; 1984 1985 switch (Inst->getOpcode()) { 1986 default: 1987 return true; 1988 case Instruction::UDiv: 1989 case Instruction::URem: 1990 // x / y is undefined if y == 0, but calcuations like x / 3 are safe. 1991 return isKnownNonZero(Inst->getOperand(1), TD); 1992 case Instruction::SDiv: 1993 case Instruction::SRem: { 1994 Value *Op = Inst->getOperand(1); 1995 // x / y is undefined if y == 0 1996 if (!isKnownNonZero(Op, TD)) 1997 return false; 1998 // x / y might be undefined if y == -1 1999 unsigned BitWidth = getBitWidth(Op->getType(), TD); 2000 if (BitWidth == 0) 2001 return false; 2002 APInt KnownZero(BitWidth, 0); 2003 APInt KnownOne(BitWidth, 0); 2004 ComputeMaskedBits(Op, KnownZero, KnownOne, TD); 2005 return !!KnownZero; 2006 } 2007 case Instruction::Load: { 2008 const LoadInst *LI = cast<LoadInst>(Inst); 2009 if (!LI->isUnordered()) 2010 return false; 2011 return LI->getPointerOperand()->isDereferenceablePointer(); 2012 } 2013 case Instruction::Call: { 2014 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 2015 switch (II->getIntrinsicID()) { 2016 // These synthetic intrinsics have no side-effects, and just mark 2017 // information about their operands. 2018 // FIXME: There are other no-op synthetic instructions that potentially 2019 // should be considered at least *safe* to speculate... 2020 case Intrinsic::dbg_declare: 2021 case Intrinsic::dbg_value: 2022 return true; 2023 2024 case Intrinsic::bswap: 2025 case Intrinsic::ctlz: 2026 case Intrinsic::ctpop: 2027 case Intrinsic::cttz: 2028 case Intrinsic::objectsize: 2029 case Intrinsic::sadd_with_overflow: 2030 case Intrinsic::smul_with_overflow: 2031 case Intrinsic::ssub_with_overflow: 2032 case Intrinsic::uadd_with_overflow: 2033 case Intrinsic::umul_with_overflow: 2034 case Intrinsic::usub_with_overflow: 2035 return true; 2036 // TODO: some fp intrinsics are marked as having the same error handling 2037 // as libm. They're safe to speculate when they won't error. 2038 // TODO: are convert_{from,to}_fp16 safe? 2039 // TODO: can we list target-specific intrinsics here? 2040 default: break; 2041 } 2042 } 2043 return false; // The called function could have undefined behavior or 2044 // side-effects, even if marked readnone nounwind. 2045 } 2046 case Instruction::VAArg: 2047 case Instruction::Alloca: 2048 case Instruction::Invoke: 2049 case Instruction::PHI: 2050 case Instruction::Store: 2051 case Instruction::Ret: 2052 case Instruction::Br: 2053 case Instruction::IndirectBr: 2054 case Instruction::Switch: 2055 case Instruction::Unreachable: 2056 case Instruction::Fence: 2057 case Instruction::LandingPad: 2058 case Instruction::AtomicRMW: 2059 case Instruction::AtomicCmpXchg: 2060 case Instruction::Resume: 2061 return false; // Misc instructions which have effects 2062 } 2063} 2064 2065/// isKnownNonNull - Return true if we know that the specified value is never 2066/// null. 2067bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) { 2068 // Alloca never returns null, malloc might. 2069 if (isa<AllocaInst>(V)) return true; 2070 2071 // A byval argument is never null. 2072 if (const Argument *A = dyn_cast<Argument>(V)) 2073 return A->hasByValAttr(); 2074 2075 // Global values are not null unless extern weak. 2076 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) 2077 return !GV->hasExternalWeakLinkage(); 2078 2079 // operator new never returns null. 2080 if (isOperatorNewLikeFn(V, TLI, /*LookThroughBitCast=*/true)) 2081 return true; 2082 2083 return false; 2084} 2085