1260684Skaiw//===- InstCombineCompares.cpp --------------------------------------------===//
2260684Skaiw//
3260684Skaiw// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4260684Skaiw// See https://llvm.org/LICENSE.txt for license information.
5260684Skaiw// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6260684Skaiw//
7260684Skaiw//===----------------------------------------------------------------------===//
8260684Skaiw//
9260684Skaiw// This file implements the visitICmp and visitFCmp functions.
10260684Skaiw//
11260684Skaiw//===----------------------------------------------------------------------===//
12260684Skaiw
13260684Skaiw#include "InstCombineInternal.h"
14260684Skaiw#include "llvm/ADT/APSInt.h"
15260684Skaiw#include "llvm/ADT/SetVector.h"
16260684Skaiw#include "llvm/ADT/Statistic.h"
17260684Skaiw#include "llvm/Analysis/ConstantFolding.h"
18260684Skaiw#include "llvm/Analysis/InstructionSimplify.h"
19260684Skaiw#include "llvm/Analysis/TargetLibraryInfo.h"
20260684Skaiw#include "llvm/IR/ConstantRange.h"
21260684Skaiw#include "llvm/IR/DataLayout.h"
22260684Skaiw#include "llvm/IR/GetElementPtrTypeIterator.h"
23260684Skaiw#include "llvm/IR/IntrinsicInst.h"
24367466Sdim#include "llvm/IR/PatternMatch.h"
25260684Skaiw#include "llvm/Support/Debug.h"
26260684Skaiw#include "llvm/Support/KnownBits.h"
27367466Sdim
28260684Skaiwusing namespace llvm;
29260684Skaiwusing namespace PatternMatch;
30260684Skaiw
31260684Skaiw#define DEBUG_TYPE "instcombine"
32260684Skaiw
33260684Skaiw// How many times is a select replaced by one of its operands?
34260684SkaiwSTATISTIC(NumSel, "Number of select opts");
35260684Skaiw
36260684Skaiw
37260684Skaiw/// Compute Result = In1+In2, returning true if the result overflowed for this
38260684Skaiw/// type.
39260684Skaiwstatic bool addWithOverflow(APInt &Result, const APInt &In1,
40260684Skaiw                            const APInt &In2, bool IsSigned = false) {
41260684Skaiw  bool Overflow;
42260684Skaiw  if (IsSigned)
43260684Skaiw    Result = In1.sadd_ov(In2, Overflow);
44260684Skaiw  else
45260684Skaiw    Result = In1.uadd_ov(In2, Overflow);
46260684Skaiw
47260684Skaiw  return Overflow;
48260684Skaiw}
49260684Skaiw
50260684Skaiw/// Compute Result = In1-In2, returning true if the result overflowed for this
51260684Skaiw/// type.
52260684Skaiwstatic bool subWithOverflow(APInt &Result, const APInt &In1,
53260684Skaiw                            const APInt &In2, bool IsSigned = false) {
54260684Skaiw  bool Overflow;
55260684Skaiw  if (IsSigned)
56260684Skaiw    Result = In1.ssub_ov(In2, Overflow);
57260684Skaiw  else
58260684Skaiw    Result = In1.usub_ov(In2, Overflow);
59
60  return Overflow;
61}
62
63/// Given an icmp instruction, return true if any use of this comparison is a
64/// branch on sign bit comparison.
65static bool hasBranchUse(ICmpInst &I) {
66  for (auto *U : I.users())
67    if (isa<BranchInst>(U))
68      return true;
69  return false;
70}
71
72/// Returns true if the exploded icmp can be expressed as a signed comparison
73/// to zero and updates the predicate accordingly.
74/// The signedness of the comparison is preserved.
75/// TODO: Refactor with decomposeBitTestICmp()?
76static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) {
77  if (!ICmpInst::isSigned(Pred))
78    return false;
79
80  if (C.isNullValue())
81    return ICmpInst::isRelational(Pred);
82
83  if (C.isOneValue()) {
84    if (Pred == ICmpInst::ICMP_SLT) {
85      Pred = ICmpInst::ICMP_SLE;
86      return true;
87    }
88  } else if (C.isAllOnesValue()) {
89    if (Pred == ICmpInst::ICMP_SGT) {
90      Pred = ICmpInst::ICMP_SGE;
91      return true;
92    }
93  }
94
95  return false;
96}
97
98/// Given a signed integer type and a set of known zero and one bits, compute
99/// the maximum and minimum values that could have the specified known zero and
100/// known one bits, returning them in Min/Max.
101/// TODO: Move to method on KnownBits struct?
102static void computeSignedMinMaxValuesFromKnownBits(const KnownBits &Known,
103                                                   APInt &Min, APInt &Max) {
104  assert(Known.getBitWidth() == Min.getBitWidth() &&
105         Known.getBitWidth() == Max.getBitWidth() &&
106         "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
107  APInt UnknownBits = ~(Known.Zero|Known.One);
108
109  // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
110  // bit if it is unknown.
111  Min = Known.One;
112  Max = Known.One|UnknownBits;
113
114  if (UnknownBits.isNegative()) { // Sign bit is unknown
115    Min.setSignBit();
116    Max.clearSignBit();
117  }
118}
119
120/// Given an unsigned integer type and a set of known zero and one bits, compute
121/// the maximum and minimum values that could have the specified known zero and
122/// known one bits, returning them in Min/Max.
123/// TODO: Move to method on KnownBits struct?
124static void computeUnsignedMinMaxValuesFromKnownBits(const KnownBits &Known,
125                                                     APInt &Min, APInt &Max) {
126  assert(Known.getBitWidth() == Min.getBitWidth() &&
127         Known.getBitWidth() == Max.getBitWidth() &&
128         "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
129  APInt UnknownBits = ~(Known.Zero|Known.One);
130
131  // The minimum value is when the unknown bits are all zeros.
132  Min = Known.One;
133  // The maximum value is when the unknown bits are all ones.
134  Max = Known.One|UnknownBits;
135}
136
137/// This is called when we see this pattern:
138///   cmp pred (load (gep GV, ...)), cmpcst
139/// where GV is a global variable with a constant initializer. Try to simplify
140/// this into some simple computation that does not need the load. For example
141/// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
142///
143/// If AndCst is non-null, then the loaded value is masked with that constant
144/// before doing the comparison. This handles cases like "A[i]&4 == 0".
145Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
146                                                        GlobalVariable *GV,
147                                                        CmpInst &ICI,
148                                                        ConstantInt *AndCst) {
149  Constant *Init = GV->getInitializer();
150  if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
151    return nullptr;
152
153  uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
154  // Don't blow up on huge arrays.
155  if (ArrayElementCount > MaxArraySizeForCombine)
156    return nullptr;
157
158  // There are many forms of this optimization we can handle, for now, just do
159  // the simple index into a single-dimensional array.
160  //
161  // Require: GEP GV, 0, i {{, constant indices}}
162  if (GEP->getNumOperands() < 3 ||
163      !isa<ConstantInt>(GEP->getOperand(1)) ||
164      !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
165      isa<Constant>(GEP->getOperand(2)))
166    return nullptr;
167
168  // Check that indices after the variable are constants and in-range for the
169  // type they index.  Collect the indices.  This is typically for arrays of
170  // structs.
171  SmallVector<unsigned, 4> LaterIndices;
172
173  Type *EltTy = Init->getType()->getArrayElementType();
174  for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
175    ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
176    if (!Idx) return nullptr;  // Variable index.
177
178    uint64_t IdxVal = Idx->getZExtValue();
179    if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index.
180
181    if (StructType *STy = dyn_cast<StructType>(EltTy))
182      EltTy = STy->getElementType(IdxVal);
183    else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
184      if (IdxVal >= ATy->getNumElements()) return nullptr;
185      EltTy = ATy->getElementType();
186    } else {
187      return nullptr; // Unknown type.
188    }
189
190    LaterIndices.push_back(IdxVal);
191  }
192
193  enum { Overdefined = -3, Undefined = -2 };
194
195  // Variables for our state machines.
196
197  // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
198  // "i == 47 | i == 87", where 47 is the first index the condition is true for,
199  // and 87 is the second (and last) index.  FirstTrueElement is -2 when
200  // undefined, otherwise set to the first true element.  SecondTrueElement is
201  // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
202  int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
203
204  // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
205  // form "i != 47 & i != 87".  Same state transitions as for true elements.
206  int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
207
208  /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
209  /// define a state machine that triggers for ranges of values that the index
210  /// is true or false for.  This triggers on things like "abbbbc"[i] == 'b'.
211  /// This is -2 when undefined, -3 when overdefined, and otherwise the last
212  /// index in the range (inclusive).  We use -2 for undefined here because we
213  /// use relative comparisons and don't want 0-1 to match -1.
214  int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
215
216  // MagicBitvector - This is a magic bitvector where we set a bit if the
217  // comparison is true for element 'i'.  If there are 64 elements or less in
218  // the array, this will fully represent all the comparison results.
219  uint64_t MagicBitvector = 0;
220
221  // Scan the array and see if one of our patterns matches.
222  Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
223  for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
224    Constant *Elt = Init->getAggregateElement(i);
225    if (!Elt) return nullptr;
226
227    // If this is indexing an array of structures, get the structure element.
228    if (!LaterIndices.empty())
229      Elt = ConstantExpr::getExtractValue(Elt, LaterIndices);
230
231    // If the element is masked, handle it.
232    if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst);
233
234    // Find out if the comparison would be true or false for the i'th element.
235    Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
236                                                  CompareRHS, DL, &TLI);
237    // If the result is undef for this element, ignore it.
238    if (isa<UndefValue>(C)) {
239      // Extend range state machines to cover this element in case there is an
240      // undef in the middle of the range.
241      if (TrueRangeEnd == (int)i-1)
242        TrueRangeEnd = i;
243      if (FalseRangeEnd == (int)i-1)
244        FalseRangeEnd = i;
245      continue;
246    }
247
248    // If we can't compute the result for any of the elements, we have to give
249    // up evaluating the entire conditional.
250    if (!isa<ConstantInt>(C)) return nullptr;
251
252    // Otherwise, we know if the comparison is true or false for this element,
253    // update our state machines.
254    bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
255
256    // State machine for single/double/range index comparison.
257    if (IsTrueForElt) {
258      // Update the TrueElement state machine.
259      if (FirstTrueElement == Undefined)
260        FirstTrueElement = TrueRangeEnd = i;  // First true element.
261      else {
262        // Update double-compare state machine.
263        if (SecondTrueElement == Undefined)
264          SecondTrueElement = i;
265        else
266          SecondTrueElement = Overdefined;
267
268        // Update range state machine.
269        if (TrueRangeEnd == (int)i-1)
270          TrueRangeEnd = i;
271        else
272          TrueRangeEnd = Overdefined;
273      }
274    } else {
275      // Update the FalseElement state machine.
276      if (FirstFalseElement == Undefined)
277        FirstFalseElement = FalseRangeEnd = i; // First false element.
278      else {
279        // Update double-compare state machine.
280        if (SecondFalseElement == Undefined)
281          SecondFalseElement = i;
282        else
283          SecondFalseElement = Overdefined;
284
285        // Update range state machine.
286        if (FalseRangeEnd == (int)i-1)
287          FalseRangeEnd = i;
288        else
289          FalseRangeEnd = Overdefined;
290      }
291    }
292
293    // If this element is in range, update our magic bitvector.
294    if (i < 64 && IsTrueForElt)
295      MagicBitvector |= 1ULL << i;
296
297    // If all of our states become overdefined, bail out early.  Since the
298    // predicate is expensive, only check it every 8 elements.  This is only
299    // really useful for really huge arrays.
300    if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
301        SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
302        FalseRangeEnd == Overdefined)
303      return nullptr;
304  }
305
306  // Now that we've scanned the entire array, emit our new comparison(s).  We
307  // order the state machines in complexity of the generated code.
308  Value *Idx = GEP->getOperand(2);
309
310  // If the index is larger than the pointer size of the target, truncate the
311  // index down like the GEP would do implicitly.  We don't have to do this for
312  // an inbounds GEP because the index can't be out of range.
313  if (!GEP->isInBounds()) {
314    Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
315    unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
316    if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize)
317      Idx = Builder.CreateTrunc(Idx, IntPtrTy);
318  }
319
320  // If the comparison is only true for one or two elements, emit direct
321  // comparisons.
322  if (SecondTrueElement != Overdefined) {
323    // None true -> false.
324    if (FirstTrueElement == Undefined)
325      return replaceInstUsesWith(ICI, Builder.getFalse());
326
327    Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
328
329    // True for one element -> 'i == 47'.
330    if (SecondTrueElement == Undefined)
331      return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
332
333    // True for two elements -> 'i == 47 | i == 72'.
334    Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx);
335    Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
336    Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx);
337    return BinaryOperator::CreateOr(C1, C2);
338  }
339
340  // If the comparison is only false for one or two elements, emit direct
341  // comparisons.
342  if (SecondFalseElement != Overdefined) {
343    // None false -> true.
344    if (FirstFalseElement == Undefined)
345      return replaceInstUsesWith(ICI, Builder.getTrue());
346
347    Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
348
349    // False for one element -> 'i != 47'.
350    if (SecondFalseElement == Undefined)
351      return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
352
353    // False for two elements -> 'i != 47 & i != 72'.
354    Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx);
355    Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement);
356    Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx);
357    return BinaryOperator::CreateAnd(C1, C2);
358  }
359
360  // If the comparison can be replaced with a range comparison for the elements
361  // where it is true, emit the range check.
362  if (TrueRangeEnd != Overdefined) {
363    assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
364
365    // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
366    if (FirstTrueElement) {
367      Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
368      Idx = Builder.CreateAdd(Idx, Offs);
369    }
370
371    Value *End = ConstantInt::get(Idx->getType(),
372                                  TrueRangeEnd-FirstTrueElement+1);
373    return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
374  }
375
376  // False range check.
377  if (FalseRangeEnd != Overdefined) {
378    assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
379    // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
380    if (FirstFalseElement) {
381      Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
382      Idx = Builder.CreateAdd(Idx, Offs);
383    }
384
385    Value *End = ConstantInt::get(Idx->getType(),
386                                  FalseRangeEnd-FirstFalseElement);
387    return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
388  }
389
390  // If a magic bitvector captures the entire comparison state
391  // of this load, replace it with computation that does:
392  //   ((magic_cst >> i) & 1) != 0
393  {
394    Type *Ty = nullptr;
395
396    // Look for an appropriate type:
397    // - The type of Idx if the magic fits
398    // - The smallest fitting legal type
399    if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
400      Ty = Idx->getType();
401    else
402      Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
403
404    if (Ty) {
405      Value *V = Builder.CreateIntCast(Idx, Ty, false);
406      V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
407      V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V);
408      return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
409    }
410  }
411
412  return nullptr;
413}
414
415/// Return a value that can be used to compare the *offset* implied by a GEP to
416/// zero. For example, if we have &A[i], we want to return 'i' for
417/// "icmp ne i, 0". Note that, in general, indices can be complex, and scales
418/// are involved. The above expression would also be legal to codegen as
419/// "icmp ne (i*4), 0" (assuming A is a pointer to i32).
420/// This latter form is less amenable to optimization though, and we are allowed
421/// to generate the first by knowing that pointer arithmetic doesn't overflow.
422///
423/// If we can't emit an optimized form for this expression, this returns null.
424///
425static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
426                                          const DataLayout &DL) {
427  gep_type_iterator GTI = gep_type_begin(GEP);
428
429  // Check to see if this gep only has a single variable index.  If so, and if
430  // any constant indices are a multiple of its scale, then we can compute this
431  // in terms of the scale of the variable index.  For example, if the GEP
432  // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
433  // because the expression will cross zero at the same point.
434  unsigned i, e = GEP->getNumOperands();
435  int64_t Offset = 0;
436  for (i = 1; i != e; ++i, ++GTI) {
437    if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
438      // Compute the aggregate offset of constant indices.
439      if (CI->isZero()) continue;
440
441      // Handle a struct index, which adds its field offset to the pointer.
442      if (StructType *STy = GTI.getStructTypeOrNull()) {
443        Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
444      } else {
445        uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
446        Offset += Size*CI->getSExtValue();
447      }
448    } else {
449      // Found our variable index.
450      break;
451    }
452  }
453
454  // If there are no variable indices, we must have a constant offset, just
455  // evaluate it the general way.
456  if (i == e) return nullptr;
457
458  Value *VariableIdx = GEP->getOperand(i);
459  // Determine the scale factor of the variable element.  For example, this is
460  // 4 if the variable index is into an array of i32.
461  uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType());
462
463  // Verify that there are no other variable indices.  If so, emit the hard way.
464  for (++i, ++GTI; i != e; ++i, ++GTI) {
465    ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
466    if (!CI) return nullptr;
467
468    // Compute the aggregate offset of constant indices.
469    if (CI->isZero()) continue;
470
471    // Handle a struct index, which adds its field offset to the pointer.
472    if (StructType *STy = GTI.getStructTypeOrNull()) {
473      Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
474    } else {
475      uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
476      Offset += Size*CI->getSExtValue();
477    }
478  }
479
480  // Okay, we know we have a single variable index, which must be a
481  // pointer/array/vector index.  If there is no offset, life is simple, return
482  // the index.
483  Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType());
484  unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth();
485  if (Offset == 0) {
486    // Cast to intptrty in case a truncation occurs.  If an extension is needed,
487    // we don't need to bother extending: the extension won't affect where the
488    // computation crosses zero.
489    if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
490      VariableIdx = IC.Builder.CreateTrunc(VariableIdx, IntPtrTy);
491    }
492    return VariableIdx;
493  }
494
495  // Otherwise, there is an index.  The computation we will do will be modulo
496  // the pointer size.
497  Offset = SignExtend64(Offset, IntPtrWidth);
498  VariableScale = SignExtend64(VariableScale, IntPtrWidth);
499
500  // To do this transformation, any constant index must be a multiple of the
501  // variable scale factor.  For example, we can evaluate "12 + 4*i" as "3 + i",
502  // but we can't evaluate "10 + 3*i" in terms of i.  Check that the offset is a
503  // multiple of the variable scale.
504  int64_t NewOffs = Offset / (int64_t)VariableScale;
505  if (Offset != NewOffs*(int64_t)VariableScale)
506    return nullptr;
507
508  // Okay, we can do this evaluation.  Start by converting the index to intptr.
509  if (VariableIdx->getType() != IntPtrTy)
510    VariableIdx = IC.Builder.CreateIntCast(VariableIdx, IntPtrTy,
511                                            true /*Signed*/);
512  Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
513  return IC.Builder.CreateAdd(VariableIdx, OffsetVal, "offset");
514}
515
516/// Returns true if we can rewrite Start as a GEP with pointer Base
517/// and some integer offset. The nodes that need to be re-written
518/// for this transformation will be added to Explored.
519static bool canRewriteGEPAsOffset(Value *Start, Value *Base,
520                                  const DataLayout &DL,
521                                  SetVector<Value *> &Explored) {
522  SmallVector<Value *, 16> WorkList(1, Start);
523  Explored.insert(Base);
524
525  // The following traversal gives us an order which can be used
526  // when doing the final transformation. Since in the final
527  // transformation we create the PHI replacement instructions first,
528  // we don't have to get them in any particular order.
529  //
530  // However, for other instructions we will have to traverse the
531  // operands of an instruction first, which means that we have to
532  // do a post-order traversal.
533  while (!WorkList.empty()) {
534    SetVector<PHINode *> PHIs;
535
536    while (!WorkList.empty()) {
537      if (Explored.size() >= 100)
538        return false;
539
540      Value *V = WorkList.back();
541
542      if (Explored.count(V) != 0) {
543        WorkList.pop_back();
544        continue;
545      }
546
547      if (!isa<IntToPtrInst>(V) && !isa<PtrToIntInst>(V) &&
548          !isa<GetElementPtrInst>(V) && !isa<PHINode>(V))
549        // We've found some value that we can't explore which is different from
550        // the base. Therefore we can't do this transformation.
551        return false;
552
553      if (isa<IntToPtrInst>(V) || isa<PtrToIntInst>(V)) {
554        auto *CI = dyn_cast<CastInst>(V);
555        if (!CI->isNoopCast(DL))
556          return false;
557
558        if (Explored.count(CI->getOperand(0)) == 0)
559          WorkList.push_back(CI->getOperand(0));
560      }
561
562      if (auto *GEP = dyn_cast<GEPOperator>(V)) {
563        // We're limiting the GEP to having one index. This will preserve
564        // the original pointer type. We could handle more cases in the
565        // future.
566        if (GEP->getNumIndices() != 1 || !GEP->isInBounds() ||
567            GEP->getType() != Start->getType())
568          return false;
569
570        if (Explored.count(GEP->getOperand(0)) == 0)
571          WorkList.push_back(GEP->getOperand(0));
572      }
573
574      if (WorkList.back() == V) {
575        WorkList.pop_back();
576        // We've finished visiting this node, mark it as such.
577        Explored.insert(V);
578      }
579
580      if (auto *PN = dyn_cast<PHINode>(V)) {
581        // We cannot transform PHIs on unsplittable basic blocks.
582        if (isa<CatchSwitchInst>(PN->getParent()->getTerminator()))
583          return false;
584        Explored.insert(PN);
585        PHIs.insert(PN);
586      }
587    }
588
589    // Explore the PHI nodes further.
590    for (auto *PN : PHIs)
591      for (Value *Op : PN->incoming_values())
592        if (Explored.count(Op) == 0)
593          WorkList.push_back(Op);
594  }
595
596  // Make sure that we can do this. Since we can't insert GEPs in a basic
597  // block before a PHI node, we can't easily do this transformation if
598  // we have PHI node users of transformed instructions.
599  for (Value *Val : Explored) {
600    for (Value *Use : Val->uses()) {
601
602      auto *PHI = dyn_cast<PHINode>(Use);
603      auto *Inst = dyn_cast<Instruction>(Val);
604
605      if (Inst == Base || Inst == PHI || !Inst || !PHI ||
606          Explored.count(PHI) == 0)
607        continue;
608
609      if (PHI->getParent() == Inst->getParent())
610        return false;
611    }
612  }
613  return true;
614}
615
616// Sets the appropriate insert point on Builder where we can add
617// a replacement Instruction for V (if that is possible).
618static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
619                              bool Before = true) {
620  if (auto *PHI = dyn_cast<PHINode>(V)) {
621    Builder.SetInsertPoint(&*PHI->getParent()->getFirstInsertionPt());
622    return;
623  }
624  if (auto *I = dyn_cast<Instruction>(V)) {
625    if (!Before)
626      I = &*std::next(I->getIterator());
627    Builder.SetInsertPoint(I);
628    return;
629  }
630  if (auto *A = dyn_cast<Argument>(V)) {
631    // Set the insertion point in the entry block.
632    BasicBlock &Entry = A->getParent()->getEntryBlock();
633    Builder.SetInsertPoint(&*Entry.getFirstInsertionPt());
634    return;
635  }
636  // Otherwise, this is a constant and we don't need to set a new
637  // insertion point.
638  assert(isa<Constant>(V) && "Setting insertion point for unknown value!");
639}
640
641/// Returns a re-written value of Start as an indexed GEP using Base as a
642/// pointer.
643static Value *rewriteGEPAsOffset(Value *Start, Value *Base,
644                                 const DataLayout &DL,
645                                 SetVector<Value *> &Explored) {
646  // Perform all the substitutions. This is a bit tricky because we can
647  // have cycles in our use-def chains.
648  // 1. Create the PHI nodes without any incoming values.
649  // 2. Create all the other values.
650  // 3. Add the edges for the PHI nodes.
651  // 4. Emit GEPs to get the original pointers.
652  // 5. Remove the original instructions.
653  Type *IndexType = IntegerType::get(
654      Base->getContext(), DL.getIndexTypeSizeInBits(Start->getType()));
655
656  DenseMap<Value *, Value *> NewInsts;
657  NewInsts[Base] = ConstantInt::getNullValue(IndexType);
658
659  // Create the new PHI nodes, without adding any incoming values.
660  for (Value *Val : Explored) {
661    if (Val == Base)
662      continue;
663    // Create empty phi nodes. This avoids cyclic dependencies when creating
664    // the remaining instructions.
665    if (auto *PHI = dyn_cast<PHINode>(Val))
666      NewInsts[PHI] = PHINode::Create(IndexType, PHI->getNumIncomingValues(),
667                                      PHI->getName() + ".idx", PHI);
668  }
669  IRBuilder<> Builder(Base->getContext());
670
671  // Create all the other instructions.
672  for (Value *Val : Explored) {
673
674    if (NewInsts.find(Val) != NewInsts.end())
675      continue;
676
677    if (auto *CI = dyn_cast<CastInst>(Val)) {
678      // Don't get rid of the intermediate variable here; the store can grow
679      // the map which will invalidate the reference to the input value.
680      Value *V = NewInsts[CI->getOperand(0)];
681      NewInsts[CI] = V;
682      continue;
683    }
684    if (auto *GEP = dyn_cast<GEPOperator>(Val)) {
685      Value *Index = NewInsts[GEP->getOperand(1)] ? NewInsts[GEP->getOperand(1)]
686                                                  : GEP->getOperand(1);
687      setInsertionPoint(Builder, GEP);
688      // Indices might need to be sign extended. GEPs will magically do
689      // this, but we need to do it ourselves here.
690      if (Index->getType()->getScalarSizeInBits() !=
691          NewInsts[GEP->getOperand(0)]->getType()->getScalarSizeInBits()) {
692        Index = Builder.CreateSExtOrTrunc(
693            Index, NewInsts[GEP->getOperand(0)]->getType(),
694            GEP->getOperand(0)->getName() + ".sext");
695      }
696
697      auto *Op = NewInsts[GEP->getOperand(0)];
698      if (isa<ConstantInt>(Op) && cast<ConstantInt>(Op)->isZero())
699        NewInsts[GEP] = Index;
700      else
701        NewInsts[GEP] = Builder.CreateNSWAdd(
702            Op, Index, GEP->getOperand(0)->getName() + ".add");
703      continue;
704    }
705    if (isa<PHINode>(Val))
706      continue;
707
708    llvm_unreachable("Unexpected instruction type");
709  }
710
711  // Add the incoming values to the PHI nodes.
712  for (Value *Val : Explored) {
713    if (Val == Base)
714      continue;
715    // All the instructions have been created, we can now add edges to the
716    // phi nodes.
717    if (auto *PHI = dyn_cast<PHINode>(Val)) {
718      PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]);
719      for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) {
720        Value *NewIncoming = PHI->getIncomingValue(I);
721
722        if (NewInsts.find(NewIncoming) != NewInsts.end())
723          NewIncoming = NewInsts[NewIncoming];
724
725        NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I));
726      }
727    }
728  }
729
730  for (Value *Val : Explored) {
731    if (Val == Base)
732      continue;
733
734    // Depending on the type, for external users we have to emit
735    // a GEP or a GEP + ptrtoint.
736    setInsertionPoint(Builder, Val, false);
737
738    // If required, create an inttoptr instruction for Base.
739    Value *NewBase = Base;
740    if (!Base->getType()->isPointerTy())
741      NewBase = Builder.CreateBitOrPointerCast(Base, Start->getType(),
742                                               Start->getName() + "to.ptr");
743
744    Value *GEP = Builder.CreateInBoundsGEP(
745        Start->getType()->getPointerElementType(), NewBase,
746        makeArrayRef(NewInsts[Val]), Val->getName() + ".ptr");
747
748    if (!Val->getType()->isPointerTy()) {
749      Value *Cast = Builder.CreatePointerCast(GEP, Val->getType(),
750                                              Val->getName() + ".conv");
751      GEP = Cast;
752    }
753    Val->replaceAllUsesWith(GEP);
754  }
755
756  return NewInsts[Start];
757}
758
759/// Looks through GEPs, IntToPtrInsts and PtrToIntInsts in order to express
760/// the input Value as a constant indexed GEP. Returns a pair containing
761/// the GEPs Pointer and Index.
762static std::pair<Value *, Value *>
763getAsConstantIndexedAddress(Value *V, const DataLayout &DL) {
764  Type *IndexType = IntegerType::get(V->getContext(),
765                                     DL.getIndexTypeSizeInBits(V->getType()));
766
767  Constant *Index = ConstantInt::getNullValue(IndexType);
768  while (true) {
769    if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
770      // We accept only inbouds GEPs here to exclude the possibility of
771      // overflow.
772      if (!GEP->isInBounds())
773        break;
774      if (GEP->hasAllConstantIndices() && GEP->getNumIndices() == 1 &&
775          GEP->getType() == V->getType()) {
776        V = GEP->getOperand(0);
777        Constant *GEPIndex = static_cast<Constant *>(GEP->getOperand(1));
778        Index = ConstantExpr::getAdd(
779            Index, ConstantExpr::getSExtOrBitCast(GEPIndex, IndexType));
780        continue;
781      }
782      break;
783    }
784    if (auto *CI = dyn_cast<IntToPtrInst>(V)) {
785      if (!CI->isNoopCast(DL))
786        break;
787      V = CI->getOperand(0);
788      continue;
789    }
790    if (auto *CI = dyn_cast<PtrToIntInst>(V)) {
791      if (!CI->isNoopCast(DL))
792        break;
793      V = CI->getOperand(0);
794      continue;
795    }
796    break;
797  }
798  return {V, Index};
799}
800
801/// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
802/// We can look through PHIs, GEPs and casts in order to determine a common base
803/// between GEPLHS and RHS.
804static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS,
805                                              ICmpInst::Predicate Cond,
806                                              const DataLayout &DL) {
807  // FIXME: Support vector of pointers.
808  if (GEPLHS->getType()->isVectorTy())
809    return nullptr;
810
811  if (!GEPLHS->hasAllConstantIndices())
812    return nullptr;
813
814  // Make sure the pointers have the same type.
815  if (GEPLHS->getType() != RHS->getType())
816    return nullptr;
817
818  Value *PtrBase, *Index;
819  std::tie(PtrBase, Index) = getAsConstantIndexedAddress(GEPLHS, DL);
820
821  // The set of nodes that will take part in this transformation.
822  SetVector<Value *> Nodes;
823
824  if (!canRewriteGEPAsOffset(RHS, PtrBase, DL, Nodes))
825    return nullptr;
826
827  // We know we can re-write this as
828  //  ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)
829  // Since we've only looked through inbouds GEPs we know that we
830  // can't have overflow on either side. We can therefore re-write
831  // this as:
832  //   OFFSET1 cmp OFFSET2
833  Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, DL, Nodes);
834
835  // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written
836  // GEP having PtrBase as the pointer base, and has returned in NewRHS the
837  // offset. Since Index is the offset of LHS to the base pointer, we will now
838  // compare the offsets instead of comparing the pointers.
839  return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Index, NewRHS);
840}
841
842/// Fold comparisons between a GEP instruction and something else. At this point
843/// we know that the GEP is on the LHS of the comparison.
844Instruction *InstCombiner::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
845                                       ICmpInst::Predicate Cond,
846                                       Instruction &I) {
847  // Don't transform signed compares of GEPs into index compares. Even if the
848  // GEP is inbounds, the final add of the base pointer can have signed overflow
849  // and would change the result of the icmp.
850  // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
851  // the maximum signed value for the pointer type.
852  if (ICmpInst::isSigned(Cond))
853    return nullptr;
854
855  // Look through bitcasts and addrspacecasts. We do not however want to remove
856  // 0 GEPs.
857  if (!isa<GetElementPtrInst>(RHS))
858    RHS = RHS->stripPointerCasts();
859
860  Value *PtrBase = GEPLHS->getOperand(0);
861  // FIXME: Support vector pointer GEPs.
862  if (PtrBase == RHS && GEPLHS->isInBounds() &&
863      !GEPLHS->getType()->isVectorTy()) {
864    // ((gep Ptr, OFFSET) cmp Ptr)   ---> (OFFSET cmp 0).
865    // This transformation (ignoring the base and scales) is valid because we
866    // know pointers can't overflow since the gep is inbounds.  See if we can
867    // output an optimized form.
868    Value *Offset = evaluateGEPOffsetExpression(GEPLHS, *this, DL);
869
870    // If not, synthesize the offset the hard way.
871    if (!Offset)
872      Offset = EmitGEPOffset(GEPLHS);
873    return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
874                        Constant::getNullValue(Offset->getType()));
875  }
876
877  if (GEPLHS->isInBounds() && ICmpInst::isEquality(Cond) &&
878      isa<Constant>(RHS) && cast<Constant>(RHS)->isNullValue() &&
879      !NullPointerIsDefined(I.getFunction(),
880                            RHS->getType()->getPointerAddressSpace())) {
881    // For most address spaces, an allocation can't be placed at null, but null
882    // itself is treated as a 0 size allocation in the in bounds rules.  Thus,
883    // the only valid inbounds address derived from null, is null itself.
884    // Thus, we have four cases to consider:
885    // 1) Base == nullptr, Offset == 0 -> inbounds, null
886    // 2) Base == nullptr, Offset != 0 -> poison as the result is out of bounds
887    // 3) Base != nullptr, Offset == (-base) -> poison (crossing allocations)
888    // 4) Base != nullptr, Offset != (-base) -> nonnull (and possibly poison)
889    //
890    // (Note if we're indexing a type of size 0, that simply collapses into one
891    //  of the buckets above.)
892    //
893    // In general, we're allowed to make values less poison (i.e. remove
894    //   sources of full UB), so in this case, we just select between the two
895    //   non-poison cases (1 and 4 above).
896    //
897    // For vectors, we apply the same reasoning on a per-lane basis.
898    auto *Base = GEPLHS->getPointerOperand();
899    if (GEPLHS->getType()->isVectorTy() && Base->getType()->isPointerTy()) {
900      int NumElts = GEPLHS->getType()->getVectorNumElements();
901      Base = Builder.CreateVectorSplat(NumElts, Base);
902    }
903    return new ICmpInst(Cond, Base,
904                        ConstantExpr::getPointerBitCastOrAddrSpaceCast(
905                            cast<Constant>(RHS), Base->getType()));
906  } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
907    // If the base pointers are different, but the indices are the same, just
908    // compare the base pointer.
909    if (PtrBase != GEPRHS->getOperand(0)) {
910      bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
911      IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
912                        GEPRHS->getOperand(0)->getType();
913      if (IndicesTheSame)
914        for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
915          if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
916            IndicesTheSame = false;
917            break;
918          }
919
920      // If all indices are the same, just compare the base pointers.
921      Type *BaseType = GEPLHS->getOperand(0)->getType();
922      if (IndicesTheSame && CmpInst::makeCmpResultType(BaseType) == I.getType())
923        return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0));
924
925      // If we're comparing GEPs with two base pointers that only differ in type
926      // and both GEPs have only constant indices or just one use, then fold
927      // the compare with the adjusted indices.
928      // FIXME: Support vector of pointers.
929      if (GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
930          (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
931          (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
932          PtrBase->stripPointerCasts() ==
933              GEPRHS->getOperand(0)->stripPointerCasts() &&
934          !GEPLHS->getType()->isVectorTy()) {
935        Value *LOffset = EmitGEPOffset(GEPLHS);
936        Value *ROffset = EmitGEPOffset(GEPRHS);
937
938        // If we looked through an addrspacecast between different sized address
939        // spaces, the LHS and RHS pointers are different sized
940        // integers. Truncate to the smaller one.
941        Type *LHSIndexTy = LOffset->getType();
942        Type *RHSIndexTy = ROffset->getType();
943        if (LHSIndexTy != RHSIndexTy) {
944          if (LHSIndexTy->getPrimitiveSizeInBits() <
945              RHSIndexTy->getPrimitiveSizeInBits()) {
946            ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy);
947          } else
948            LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy);
949        }
950
951        Value *Cmp = Builder.CreateICmp(ICmpInst::getSignedPredicate(Cond),
952                                        LOffset, ROffset);
953        return replaceInstUsesWith(I, Cmp);
954      }
955
956      // Otherwise, the base pointers are different and the indices are
957      // different. Try convert this to an indexed compare by looking through
958      // PHIs/casts.
959      return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
960    }
961
962    // If one of the GEPs has all zero indices, recurse.
963    // FIXME: Handle vector of pointers.
964    if (!GEPLHS->getType()->isVectorTy() && GEPLHS->hasAllZeroIndices())
965      return foldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
966                         ICmpInst::getSwappedPredicate(Cond), I);
967
968    // If the other GEP has all zero indices, recurse.
969    // FIXME: Handle vector of pointers.
970    if (!GEPRHS->getType()->isVectorTy() && GEPRHS->hasAllZeroIndices())
971      return foldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
972
973    bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds();
974    if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
975      // If the GEPs only differ by one index, compare it.
976      unsigned NumDifferences = 0;  // Keep track of # differences.
977      unsigned DiffOperand = 0;     // The operand that differs.
978      for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
979        if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
980          Type *LHSType = GEPLHS->getOperand(i)->getType();
981          Type *RHSType = GEPRHS->getOperand(i)->getType();
982          // FIXME: Better support for vector of pointers.
983          if (LHSType->getPrimitiveSizeInBits() !=
984                   RHSType->getPrimitiveSizeInBits() ||
985              (GEPLHS->getType()->isVectorTy() &&
986               (!LHSType->isVectorTy() || !RHSType->isVectorTy()))) {
987            // Irreconcilable differences.
988            NumDifferences = 2;
989            break;
990          }
991
992          if (NumDifferences++) break;
993          DiffOperand = i;
994        }
995
996      if (NumDifferences == 0)   // SAME GEP?
997        return replaceInstUsesWith(I, // No comparison is needed here.
998          ConstantInt::get(I.getType(), ICmpInst::isTrueWhenEqual(Cond)));
999
1000      else if (NumDifferences == 1 && GEPsInBounds) {
1001        Value *LHSV = GEPLHS->getOperand(DiffOperand);
1002        Value *RHSV = GEPRHS->getOperand(DiffOperand);
1003        // Make sure we do a signed comparison here.
1004        return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
1005      }
1006    }
1007
1008    // Only lower this if the icmp is the only user of the GEP or if we expect
1009    // the result to fold to a constant!
1010    if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
1011        (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
1012      // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)  --->  (OFFSET1 cmp OFFSET2)
1013      Value *L = EmitGEPOffset(GEPLHS);
1014      Value *R = EmitGEPOffset(GEPRHS);
1015      return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
1016    }
1017  }
1018
1019  // Try convert this to an indexed compare by looking through PHIs/casts as a
1020  // last resort.
1021  return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
1022}
1023
1024Instruction *InstCombiner::foldAllocaCmp(ICmpInst &ICI,
1025                                         const AllocaInst *Alloca,
1026                                         const Value *Other) {
1027  assert(ICI.isEquality() && "Cannot fold non-equality comparison.");
1028
1029  // It would be tempting to fold away comparisons between allocas and any
1030  // pointer not based on that alloca (e.g. an argument). However, even
1031  // though such pointers cannot alias, they can still compare equal.
1032  //
1033  // But LLVM doesn't specify where allocas get their memory, so if the alloca
1034  // doesn't escape we can argue that it's impossible to guess its value, and we
1035  // can therefore act as if any such guesses are wrong.
1036  //
1037  // The code below checks that the alloca doesn't escape, and that it's only
1038  // used in a comparison once (the current instruction). The
1039  // single-comparison-use condition ensures that we're trivially folding all
1040  // comparisons against the alloca consistently, and avoids the risk of
1041  // erroneously folding a comparison of the pointer with itself.
1042
1043  unsigned MaxIter = 32; // Break cycles and bound to constant-time.
1044
1045  SmallVector<const Use *, 32> Worklist;
1046  for (const Use &U : Alloca->uses()) {
1047    if (Worklist.size() >= MaxIter)
1048      return nullptr;
1049    Worklist.push_back(&U);
1050  }
1051
1052  unsigned NumCmps = 0;
1053  while (!Worklist.empty()) {
1054    assert(Worklist.size() <= MaxIter);
1055    const Use *U = Worklist.pop_back_val();
1056    const Value *V = U->getUser();
1057    --MaxIter;
1058
1059    if (isa<BitCastInst>(V) || isa<GetElementPtrInst>(V) || isa<PHINode>(V) ||
1060        isa<SelectInst>(V)) {
1061      // Track the uses.
1062    } else if (isa<LoadInst>(V)) {
1063      // Loading from the pointer doesn't escape it.
1064      continue;
1065    } else if (const auto *SI = dyn_cast<StoreInst>(V)) {
1066      // Storing *to* the pointer is fine, but storing the pointer escapes it.
1067      if (SI->getValueOperand() == U->get())
1068        return nullptr;
1069      continue;
1070    } else if (isa<ICmpInst>(V)) {
1071      if (NumCmps++)
1072        return nullptr; // Found more than one cmp.
1073      continue;
1074    } else if (const auto *Intrin = dyn_cast<IntrinsicInst>(V)) {
1075      switch (Intrin->getIntrinsicID()) {
1076        // These intrinsics don't escape or compare the pointer. Memset is safe
1077        // because we don't allow ptrtoint. Memcpy and memmove are safe because
1078        // we don't allow stores, so src cannot point to V.
1079        case Intrinsic::lifetime_start: case Intrinsic::lifetime_end:
1080        case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset:
1081          continue;
1082        default:
1083          return nullptr;
1084      }
1085    } else {
1086      return nullptr;
1087    }
1088    for (const Use &U : V->uses()) {
1089      if (Worklist.size() >= MaxIter)
1090        return nullptr;
1091      Worklist.push_back(&U);
1092    }
1093  }
1094
1095  Type *CmpTy = CmpInst::makeCmpResultType(Other->getType());
1096  return replaceInstUsesWith(
1097      ICI,
1098      ConstantInt::get(CmpTy, !CmpInst::isTrueWhenEqual(ICI.getPredicate())));
1099}
1100
1101/// Fold "icmp pred (X+C), X".
1102Instruction *InstCombiner::foldICmpAddOpConst(Value *X, const APInt &C,
1103                                              ICmpInst::Predicate Pred) {
1104  // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
1105  // so the values can never be equal.  Similarly for all other "or equals"
1106  // operators.
1107  assert(!!C && "C should not be zero!");
1108
1109  // (X+1) <u X        --> X >u (MAXUINT-1)        --> X == 255
1110  // (X+2) <u X        --> X >u (MAXUINT-2)        --> X > 253
1111  // (X+MAXUINT) <u X  --> X >u (MAXUINT-MAXUINT)  --> X != 0
1112  if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
1113    Constant *R = ConstantInt::get(X->getType(),
1114                                   APInt::getMaxValue(C.getBitWidth()) - C);
1115    return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
1116  }
1117
1118  // (X+1) >u X        --> X <u (0-1)        --> X != 255
1119  // (X+2) >u X        --> X <u (0-2)        --> X <u 254
1120  // (X+MAXUINT) >u X  --> X <u (0-MAXUINT)  --> X <u 1  --> X == 0
1121  if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
1122    return new ICmpInst(ICmpInst::ICMP_ULT, X,
1123                        ConstantInt::get(X->getType(), -C));
1124
1125  APInt SMax = APInt::getSignedMaxValue(C.getBitWidth());
1126
1127  // (X+ 1) <s X       --> X >s (MAXSINT-1)          --> X == 127
1128  // (X+ 2) <s X       --> X >s (MAXSINT-2)          --> X >s 125
1129  // (X+MAXSINT) <s X  --> X >s (MAXSINT-MAXSINT)    --> X >s 0
1130  // (X+MINSINT) <s X  --> X >s (MAXSINT-MINSINT)    --> X >s -1
1131  // (X+ -2) <s X      --> X >s (MAXSINT- -2)        --> X >s 126
1132  // (X+ -1) <s X      --> X >s (MAXSINT- -1)        --> X != 127
1133  if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
1134    return new ICmpInst(ICmpInst::ICMP_SGT, X,
1135                        ConstantInt::get(X->getType(), SMax - C));
1136
1137  // (X+ 1) >s X       --> X <s (MAXSINT-(1-1))       --> X != 127
1138  // (X+ 2) >s X       --> X <s (MAXSINT-(2-1))       --> X <s 126
1139  // (X+MAXSINT) >s X  --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
1140  // (X+MINSINT) >s X  --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
1141  // (X+ -2) >s X      --> X <s (MAXSINT-(-2-1))      --> X <s -126
1142  // (X+ -1) >s X      --> X <s (MAXSINT-(-1-1))      --> X == -128
1143
1144  assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
1145  return new ICmpInst(ICmpInst::ICMP_SLT, X,
1146                      ConstantInt::get(X->getType(), SMax - (C - 1)));
1147}
1148
1149/// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" ->
1150/// (icmp eq/ne A, Log2(AP2/AP1)) ->
1151/// (icmp eq/ne A, Log2(AP2) - Log2(AP1)).
1152Instruction *InstCombiner::foldICmpShrConstConst(ICmpInst &I, Value *A,
1153                                                 const APInt &AP1,
1154                                                 const APInt &AP2) {
1155  assert(I.isEquality() && "Cannot fold icmp gt/lt");
1156
1157  auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1158    if (I.getPredicate() == I.ICMP_NE)
1159      Pred = CmpInst::getInversePredicate(Pred);
1160    return new ICmpInst(Pred, LHS, RHS);
1161  };
1162
1163  // Don't bother doing any work for cases which InstSimplify handles.
1164  if (AP2.isNullValue())
1165    return nullptr;
1166
1167  bool IsAShr = isa<AShrOperator>(I.getOperand(0));
1168  if (IsAShr) {
1169    if (AP2.isAllOnesValue())
1170      return nullptr;
1171    if (AP2.isNegative() != AP1.isNegative())
1172      return nullptr;
1173    if (AP2.sgt(AP1))
1174      return nullptr;
1175  }
1176
1177  if (!AP1)
1178    // 'A' must be large enough to shift out the highest set bit.
1179    return getICmp(I.ICMP_UGT, A,
1180                   ConstantInt::get(A->getType(), AP2.logBase2()));
1181
1182  if (AP1 == AP2)
1183    return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1184
1185  int Shift;
1186  if (IsAShr && AP1.isNegative())
1187    Shift = AP1.countLeadingOnes() - AP2.countLeadingOnes();
1188  else
1189    Shift = AP1.countLeadingZeros() - AP2.countLeadingZeros();
1190
1191  if (Shift > 0) {
1192    if (IsAShr && AP1 == AP2.ashr(Shift)) {
1193      // There are multiple solutions if we are comparing against -1 and the LHS
1194      // of the ashr is not a power of two.
1195      if (AP1.isAllOnesValue() && !AP2.isPowerOf2())
1196        return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift));
1197      return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1198    } else if (AP1 == AP2.lshr(Shift)) {
1199      return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1200    }
1201  }
1202
1203  // Shifting const2 will never be equal to const1.
1204  // FIXME: This should always be handled by InstSimplify?
1205  auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1206  return replaceInstUsesWith(I, TorF);
1207}
1208
1209/// Handle "(icmp eq/ne (shl AP2, A), AP1)" ->
1210/// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
1211Instruction *InstCombiner::foldICmpShlConstConst(ICmpInst &I, Value *A,
1212                                                 const APInt &AP1,
1213                                                 const APInt &AP2) {
1214  assert(I.isEquality() && "Cannot fold icmp gt/lt");
1215
1216  auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1217    if (I.getPredicate() == I.ICMP_NE)
1218      Pred = CmpInst::getInversePredicate(Pred);
1219    return new ICmpInst(Pred, LHS, RHS);
1220  };
1221
1222  // Don't bother doing any work for cases which InstSimplify handles.
1223  if (AP2.isNullValue())
1224    return nullptr;
1225
1226  unsigned AP2TrailingZeros = AP2.countTrailingZeros();
1227
1228  if (!AP1 && AP2TrailingZeros != 0)
1229    return getICmp(
1230        I.ICMP_UGE, A,
1231        ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros));
1232
1233  if (AP1 == AP2)
1234    return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1235
1236  // Get the distance between the lowest bits that are set.
1237  int Shift = AP1.countTrailingZeros() - AP2TrailingZeros;
1238
1239  if (Shift > 0 && AP2.shl(Shift) == AP1)
1240    return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1241
1242  // Shifting const2 will never be equal to const1.
1243  // FIXME: This should always be handled by InstSimplify?
1244  auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1245  return replaceInstUsesWith(I, TorF);
1246}
1247
1248/// The caller has matched a pattern of the form:
1249///   I = icmp ugt (add (add A, B), CI2), CI1
1250/// If this is of the form:
1251///   sum = a + b
1252///   if (sum+128 >u 255)
1253/// Then replace it with llvm.sadd.with.overflow.i8.
1254///
1255static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
1256                                          ConstantInt *CI2, ConstantInt *CI1,
1257                                          InstCombiner &IC) {
1258  // The transformation we're trying to do here is to transform this into an
1259  // llvm.sadd.with.overflow.  To do this, we have to replace the original add
1260  // with a narrower add, and discard the add-with-constant that is part of the
1261  // range check (if we can't eliminate it, this isn't profitable).
1262
1263  // In order to eliminate the add-with-constant, the compare can be its only
1264  // use.
1265  Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
1266  if (!AddWithCst->hasOneUse())
1267    return nullptr;
1268
1269  // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
1270  if (!CI2->getValue().isPowerOf2())
1271    return nullptr;
1272  unsigned NewWidth = CI2->getValue().countTrailingZeros();
1273  if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1274    return nullptr;
1275
1276  // The width of the new add formed is 1 more than the bias.
1277  ++NewWidth;
1278
1279  // Check to see that CI1 is an all-ones value with NewWidth bits.
1280  if (CI1->getBitWidth() == NewWidth ||
1281      CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
1282    return nullptr;
1283
1284  // This is only really a signed overflow check if the inputs have been
1285  // sign-extended; check for that condition. For example, if CI2 is 2^31 and
1286  // the operands of the add are 64 bits wide, we need at least 33 sign bits.
1287  unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1;
1288  if (IC.ComputeNumSignBits(A, 0, &I) < NeededSignBits ||
1289      IC.ComputeNumSignBits(B, 0, &I) < NeededSignBits)
1290    return nullptr;
1291
1292  // In order to replace the original add with a narrower
1293  // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
1294  // and truncates that discard the high bits of the add.  Verify that this is
1295  // the case.
1296  Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
1297  for (User *U : OrigAdd->users()) {
1298    if (U == AddWithCst)
1299      continue;
1300
1301    // Only accept truncates for now.  We would really like a nice recursive
1302    // predicate like SimplifyDemandedBits, but which goes downwards the use-def
1303    // chain to see which bits of a value are actually demanded.  If the
1304    // original add had another add which was then immediately truncated, we
1305    // could still do the transformation.
1306    TruncInst *TI = dyn_cast<TruncInst>(U);
1307    if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
1308      return nullptr;
1309  }
1310
1311  // If the pattern matches, truncate the inputs to the narrower type and
1312  // use the sadd_with_overflow intrinsic to efficiently compute both the
1313  // result and the overflow bit.
1314  Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
1315  Function *F = Intrinsic::getDeclaration(
1316      I.getModule(), Intrinsic::sadd_with_overflow, NewType);
1317
1318  InstCombiner::BuilderTy &Builder = IC.Builder;
1319
1320  // Put the new code above the original add, in case there are any uses of the
1321  // add between the add and the compare.
1322  Builder.SetInsertPoint(OrigAdd);
1323
1324  Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc");
1325  Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc");
1326  CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd");
1327  Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result");
1328  Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType());
1329
1330  // The inner add was the result of the narrow add, zero extended to the
1331  // wider type.  Replace it with the result computed by the intrinsic.
1332  IC.replaceInstUsesWith(*OrigAdd, ZExt);
1333
1334  // The original icmp gets replaced with the overflow value.
1335  return ExtractValueInst::Create(Call, 1, "sadd.overflow");
1336}
1337
1338/// If we have:
1339///   icmp eq/ne (urem/srem %x, %y), 0
1340/// iff %y is a power-of-two, we can replace this with a bit test:
1341///   icmp eq/ne (and %x, (add %y, -1)), 0
1342Instruction *InstCombiner::foldIRemByPowerOfTwoToBitTest(ICmpInst &I) {
1343  // This fold is only valid for equality predicates.
1344  if (!I.isEquality())
1345    return nullptr;
1346  ICmpInst::Predicate Pred;
1347  Value *X, *Y, *Zero;
1348  if (!match(&I, m_ICmp(Pred, m_OneUse(m_IRem(m_Value(X), m_Value(Y))),
1349                        m_CombineAnd(m_Zero(), m_Value(Zero)))))
1350    return nullptr;
1351  if (!isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, 0, &I))
1352    return nullptr;
1353  // This may increase instruction count, we don't enforce that Y is a constant.
1354  Value *Mask = Builder.CreateAdd(Y, Constant::getAllOnesValue(Y->getType()));
1355  Value *Masked = Builder.CreateAnd(X, Mask);
1356  return ICmpInst::Create(Instruction::ICmp, Pred, Masked, Zero);
1357}
1358
1359/// Fold equality-comparison between zero and any (maybe truncated) right-shift
1360/// by one-less-than-bitwidth into a sign test on the original value.
1361Instruction *InstCombiner::foldSignBitTest(ICmpInst &I) {
1362  Instruction *Val;
1363  ICmpInst::Predicate Pred;
1364  if (!I.isEquality() || !match(&I, m_ICmp(Pred, m_Instruction(Val), m_Zero())))
1365    return nullptr;
1366
1367  Value *X;
1368  Type *XTy;
1369
1370  Constant *C;
1371  if (match(Val, m_TruncOrSelf(m_Shr(m_Value(X), m_Constant(C))))) {
1372    XTy = X->getType();
1373    unsigned XBitWidth = XTy->getScalarSizeInBits();
1374    if (!match(C, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
1375                                     APInt(XBitWidth, XBitWidth - 1))))
1376      return nullptr;
1377  } else if (isa<BinaryOperator>(Val) &&
1378             (X = reassociateShiftAmtsOfTwoSameDirectionShifts(
1379                  cast<BinaryOperator>(Val), SQ.getWithInstruction(Val),
1380                  /*AnalyzeForSignBitExtraction=*/true))) {
1381    XTy = X->getType();
1382  } else
1383    return nullptr;
1384
1385  return ICmpInst::Create(Instruction::ICmp,
1386                          Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_SGE
1387                                                    : ICmpInst::ICMP_SLT,
1388                          X, ConstantInt::getNullValue(XTy));
1389}
1390
1391// Handle  icmp pred X, 0
1392Instruction *InstCombiner::foldICmpWithZero(ICmpInst &Cmp) {
1393  CmpInst::Predicate Pred = Cmp.getPredicate();
1394  if (!match(Cmp.getOperand(1), m_Zero()))
1395    return nullptr;
1396
1397  // (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0)
1398  if (Pred == ICmpInst::ICMP_SGT) {
1399    Value *A, *B;
1400    SelectPatternResult SPR = matchSelectPattern(Cmp.getOperand(0), A, B);
1401    if (SPR.Flavor == SPF_SMIN) {
1402      if (isKnownPositive(A, DL, 0, &AC, &Cmp, &DT))
1403        return new ICmpInst(Pred, B, Cmp.getOperand(1));
1404      if (isKnownPositive(B, DL, 0, &AC, &Cmp, &DT))
1405        return new ICmpInst(Pred, A, Cmp.getOperand(1));
1406    }
1407  }
1408
1409  if (Instruction *New = foldIRemByPowerOfTwoToBitTest(Cmp))
1410    return New;
1411
1412  // Given:
1413  //   icmp eq/ne (urem %x, %y), 0
1414  // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem':
1415  //   icmp eq/ne %x, 0
1416  Value *X, *Y;
1417  if (match(Cmp.getOperand(0), m_URem(m_Value(X), m_Value(Y))) &&
1418      ICmpInst::isEquality(Pred)) {
1419    KnownBits XKnown = computeKnownBits(X, 0, &Cmp);
1420    KnownBits YKnown = computeKnownBits(Y, 0, &Cmp);
1421    if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2)
1422      return new ICmpInst(Pred, X, Cmp.getOperand(1));
1423  }
1424
1425  return nullptr;
1426}
1427
1428/// Fold icmp Pred X, C.
1429/// TODO: This code structure does not make sense. The saturating add fold
1430/// should be moved to some other helper and extended as noted below (it is also
1431/// possible that code has been made unnecessary - do we canonicalize IR to
1432/// overflow/saturating intrinsics or not?).
1433Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &Cmp) {
1434  // Match the following pattern, which is a common idiom when writing
1435  // overflow-safe integer arithmetic functions. The source performs an addition
1436  // in wider type and explicitly checks for overflow using comparisons against
1437  // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic.
1438  //
1439  // TODO: This could probably be generalized to handle other overflow-safe
1440  // operations if we worked out the formulas to compute the appropriate magic
1441  // constants.
1442  //
1443  // sum = a + b
1444  // if (sum+128 >u 255)  ...  -> llvm.sadd.with.overflow.i8
1445  CmpInst::Predicate Pred = Cmp.getPredicate();
1446  Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1);
1447  Value *A, *B;
1448  ConstantInt *CI, *CI2; // I = icmp ugt (add (add A, B), CI2), CI
1449  if (Pred == ICmpInst::ICMP_UGT && match(Op1, m_ConstantInt(CI)) &&
1450      match(Op0, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
1451    if (Instruction *Res = processUGT_ADDCST_ADD(Cmp, A, B, CI2, CI, *this))
1452      return Res;
1453
1454  return nullptr;
1455}
1456
1457/// Canonicalize icmp instructions based on dominating conditions.
1458Instruction *InstCombiner::foldICmpWithDominatingICmp(ICmpInst &Cmp) {
1459  // This is a cheap/incomplete check for dominance - just match a single
1460  // predecessor with a conditional branch.
1461  BasicBlock *CmpBB = Cmp.getParent();
1462  BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1463  if (!DomBB)
1464    return nullptr;
1465
1466  Value *DomCond;
1467  BasicBlock *TrueBB, *FalseBB;
1468  if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1469    return nullptr;
1470
1471  assert((TrueBB == CmpBB || FalseBB == CmpBB) &&
1472         "Predecessor block does not point to successor?");
1473
1474  // The branch should get simplified. Don't bother simplifying this condition.
1475  if (TrueBB == FalseBB)
1476    return nullptr;
1477
1478  // Try to simplify this compare to T/F based on the dominating condition.
1479  Optional<bool> Imp = isImpliedCondition(DomCond, &Cmp, DL, TrueBB == CmpBB);
1480  if (Imp)
1481    return replaceInstUsesWith(Cmp, ConstantInt::get(Cmp.getType(), *Imp));
1482
1483  CmpInst::Predicate Pred = Cmp.getPredicate();
1484  Value *X = Cmp.getOperand(0), *Y = Cmp.getOperand(1);
1485  ICmpInst::Predicate DomPred;
1486  const APInt *C, *DomC;
1487  if (match(DomCond, m_ICmp(DomPred, m_Specific(X), m_APInt(DomC))) &&
1488      match(Y, m_APInt(C))) {
1489    // We have 2 compares of a variable with constants. Calculate the constant
1490    // ranges of those compares to see if we can transform the 2nd compare:
1491    // DomBB:
1492    //   DomCond = icmp DomPred X, DomC
1493    //   br DomCond, CmpBB, FalseBB
1494    // CmpBB:
1495    //   Cmp = icmp Pred X, C
1496    ConstantRange CR = ConstantRange::makeAllowedICmpRegion(Pred, *C);
1497    ConstantRange DominatingCR =
1498        (CmpBB == TrueBB) ? ConstantRange::makeExactICmpRegion(DomPred, *DomC)
1499                          : ConstantRange::makeExactICmpRegion(
1500                                CmpInst::getInversePredicate(DomPred), *DomC);
1501    ConstantRange Intersection = DominatingCR.intersectWith(CR);
1502    ConstantRange Difference = DominatingCR.difference(CR);
1503    if (Intersection.isEmptySet())
1504      return replaceInstUsesWith(Cmp, Builder.getFalse());
1505    if (Difference.isEmptySet())
1506      return replaceInstUsesWith(Cmp, Builder.getTrue());
1507
1508    // Canonicalizing a sign bit comparison that gets used in a branch,
1509    // pessimizes codegen by generating branch on zero instruction instead
1510    // of a test and branch. So we avoid canonicalizing in such situations
1511    // because test and branch instruction has better branch displacement
1512    // than compare and branch instruction.
1513    bool UnusedBit;
1514    bool IsSignBit = isSignBitCheck(Pred, *C, UnusedBit);
1515    if (Cmp.isEquality() || (IsSignBit && hasBranchUse(Cmp)))
1516      return nullptr;
1517
1518    if (const APInt *EqC = Intersection.getSingleElement())
1519      return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*EqC));
1520    if (const APInt *NeC = Difference.getSingleElement())
1521      return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*NeC));
1522  }
1523
1524  return nullptr;
1525}
1526
1527/// Fold icmp (trunc X, Y), C.
1528Instruction *InstCombiner::foldICmpTruncConstant(ICmpInst &Cmp,
1529                                                 TruncInst *Trunc,
1530                                                 const APInt &C) {
1531  ICmpInst::Predicate Pred = Cmp.getPredicate();
1532  Value *X = Trunc->getOperand(0);
1533  if (C.isOneValue() && C.getBitWidth() > 1) {
1534    // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1
1535    Value *V = nullptr;
1536    if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V))))
1537      return new ICmpInst(ICmpInst::ICMP_SLT, V,
1538                          ConstantInt::get(V->getType(), 1));
1539  }
1540
1541  if (Cmp.isEquality() && Trunc->hasOneUse()) {
1542    // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
1543    // of the high bits truncated out of x are known.
1544    unsigned DstBits = Trunc->getType()->getScalarSizeInBits(),
1545             SrcBits = X->getType()->getScalarSizeInBits();
1546    KnownBits Known = computeKnownBits(X, 0, &Cmp);
1547
1548    // If all the high bits are known, we can do this xform.
1549    if ((Known.Zero | Known.One).countLeadingOnes() >= SrcBits - DstBits) {
1550      // Pull in the high bits from known-ones set.
1551      APInt NewRHS = C.zext(SrcBits);
1552      NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits);
1553      return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), NewRHS));
1554    }
1555  }
1556
1557  return nullptr;
1558}
1559
1560/// Fold icmp (xor X, Y), C.
1561Instruction *InstCombiner::foldICmpXorConstant(ICmpInst &Cmp,
1562                                               BinaryOperator *Xor,
1563                                               const APInt &C) {
1564  Value *X = Xor->getOperand(0);
1565  Value *Y = Xor->getOperand(1);
1566  const APInt *XorC;
1567  if (!match(Y, m_APInt(XorC)))
1568    return nullptr;
1569
1570  // If this is a comparison that tests the signbit (X < 0) or (x > -1),
1571  // fold the xor.
1572  ICmpInst::Predicate Pred = Cmp.getPredicate();
1573  bool TrueIfSigned = false;
1574  if (isSignBitCheck(Cmp.getPredicate(), C, TrueIfSigned)) {
1575
1576    // If the sign bit of the XorCst is not set, there is no change to
1577    // the operation, just stop using the Xor.
1578    if (!XorC->isNegative()) {
1579      Cmp.setOperand(0, X);
1580      Worklist.Add(Xor);
1581      return &Cmp;
1582    }
1583
1584    // Emit the opposite comparison.
1585    if (TrueIfSigned)
1586      return new ICmpInst(ICmpInst::ICMP_SGT, X,
1587                          ConstantInt::getAllOnesValue(X->getType()));
1588    else
1589      return new ICmpInst(ICmpInst::ICMP_SLT, X,
1590                          ConstantInt::getNullValue(X->getType()));
1591  }
1592
1593  if (Xor->hasOneUse()) {
1594    // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask))
1595    if (!Cmp.isEquality() && XorC->isSignMask()) {
1596      Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate()
1597                            : Cmp.getSignedPredicate();
1598      return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1599    }
1600
1601    // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask))
1602    if (!Cmp.isEquality() && XorC->isMaxSignedValue()) {
1603      Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate()
1604                            : Cmp.getSignedPredicate();
1605      Pred = Cmp.getSwappedPredicate(Pred);
1606      return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1607    }
1608  }
1609
1610  // Mask constant magic can eliminate an 'xor' with unsigned compares.
1611  if (Pred == ICmpInst::ICMP_UGT) {
1612    // (xor X, ~C) >u C --> X <u ~C (when C+1 is a power of 2)
1613    if (*XorC == ~C && (C + 1).isPowerOf2())
1614      return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
1615    // (xor X, C) >u C --> X >u C (when C+1 is a power of 2)
1616    if (*XorC == C && (C + 1).isPowerOf2())
1617      return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
1618  }
1619  if (Pred == ICmpInst::ICMP_ULT) {
1620    // (xor X, -C) <u C --> X >u ~C (when C is a power of 2)
1621    if (*XorC == -C && C.isPowerOf2())
1622      return new ICmpInst(ICmpInst::ICMP_UGT, X,
1623                          ConstantInt::get(X->getType(), ~C));
1624    // (xor X, C) <u C --> X >u ~C (when -C is a power of 2)
1625    if (*XorC == C && (-C).isPowerOf2())
1626      return new ICmpInst(ICmpInst::ICMP_UGT, X,
1627                          ConstantInt::get(X->getType(), ~C));
1628  }
1629  return nullptr;
1630}
1631
1632/// Fold icmp (and (sh X, Y), C2), C1.
1633Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
1634                                            const APInt &C1, const APInt &C2) {
1635  BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0));
1636  if (!Shift || !Shift->isShift())
1637    return nullptr;
1638
1639  // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could
1640  // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in
1641  // code produced by the clang front-end, for bitfield access.
1642  // This seemingly simple opportunity to fold away a shift turns out to be
1643  // rather complicated. See PR17827 for details.
1644  unsigned ShiftOpcode = Shift->getOpcode();
1645  bool IsShl = ShiftOpcode == Instruction::Shl;
1646  const APInt *C3;
1647  if (match(Shift->getOperand(1), m_APInt(C3))) {
1648    bool CanFold = false;
1649    if (ShiftOpcode == Instruction::Shl) {
1650      // For a left shift, we can fold if the comparison is not signed. We can
1651      // also fold a signed comparison if the mask value and comparison value
1652      // are not negative. These constraints may not be obvious, but we can
1653      // prove that they are correct using an SMT solver.
1654      if (!Cmp.isSigned() || (!C2.isNegative() && !C1.isNegative()))
1655        CanFold = true;
1656    } else {
1657      bool IsAshr = ShiftOpcode == Instruction::AShr;
1658      // For a logical right shift, we can fold if the comparison is not signed.
1659      // We can also fold a signed comparison if the shifted mask value and the
1660      // shifted comparison value are not negative. These constraints may not be
1661      // obvious, but we can prove that they are correct using an SMT solver.
1662      // For an arithmetic shift right we can do the same, if we ensure
1663      // the And doesn't use any bits being shifted in. Normally these would
1664      // be turned into lshr by SimplifyDemandedBits, but not if there is an
1665      // additional user.
1666      if (!IsAshr || (C2.shl(*C3).lshr(*C3) == C2)) {
1667        if (!Cmp.isSigned() ||
1668            (!C2.shl(*C3).isNegative() && !C1.shl(*C3).isNegative()))
1669          CanFold = true;
1670      }
1671    }
1672
1673    if (CanFold) {
1674      APInt NewCst = IsShl ? C1.lshr(*C3) : C1.shl(*C3);
1675      APInt SameAsC1 = IsShl ? NewCst.shl(*C3) : NewCst.lshr(*C3);
1676      // Check to see if we are shifting out any of the bits being compared.
1677      if (SameAsC1 != C1) {
1678        // If we shifted bits out, the fold is not going to work out. As a
1679        // special case, check to see if this means that the result is always
1680        // true or false now.
1681        if (Cmp.getPredicate() == ICmpInst::ICMP_EQ)
1682          return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType()));
1683        if (Cmp.getPredicate() == ICmpInst::ICMP_NE)
1684          return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType()));
1685      } else {
1686        Cmp.setOperand(1, ConstantInt::get(And->getType(), NewCst));
1687        APInt NewAndCst = IsShl ? C2.lshr(*C3) : C2.shl(*C3);
1688        And->setOperand(1, ConstantInt::get(And->getType(), NewAndCst));
1689        And->setOperand(0, Shift->getOperand(0));
1690        Worklist.Add(Shift); // Shift is dead.
1691        return &Cmp;
1692      }
1693    }
1694  }
1695
1696  // Turn ((X >> Y) & C2) == 0  into  (X & (C2 << Y)) == 0.  The latter is
1697  // preferable because it allows the C2 << Y expression to be hoisted out of a
1698  // loop if Y is invariant and X is not.
1699  if (Shift->hasOneUse() && C1.isNullValue() && Cmp.isEquality() &&
1700      !Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) {
1701    // Compute C2 << Y.
1702    Value *NewShift =
1703        IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1))
1704              : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1));
1705
1706    // Compute X & (C2 << Y).
1707    Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift);
1708    Cmp.setOperand(0, NewAnd);
1709    return &Cmp;
1710  }
1711
1712  return nullptr;
1713}
1714
1715/// Fold icmp (and X, C2), C1.
1716Instruction *InstCombiner::foldICmpAndConstConst(ICmpInst &Cmp,
1717                                                 BinaryOperator *And,
1718                                                 const APInt &C1) {
1719  bool isICMP_NE = Cmp.getPredicate() == ICmpInst::ICMP_NE;
1720
1721  // For vectors: icmp ne (and X, 1), 0 --> trunc X to N x i1
1722  // TODO: We canonicalize to the longer form for scalars because we have
1723  // better analysis/folds for icmp, and codegen may be better with icmp.
1724  if (isICMP_NE && Cmp.getType()->isVectorTy() && C1.isNullValue() &&
1725      match(And->getOperand(1), m_One()))
1726    return new TruncInst(And->getOperand(0), Cmp.getType());
1727
1728  const APInt *C2;
1729  Value *X;
1730  if (!match(And, m_And(m_Value(X), m_APInt(C2))))
1731    return nullptr;
1732
1733  // Don't perform the following transforms if the AND has multiple uses
1734  if (!And->hasOneUse())
1735    return nullptr;
1736
1737  if (Cmp.isEquality() && C1.isNullValue()) {
1738    // Restrict this fold to single-use 'and' (PR10267).
1739    // Replace (and X, (1 << size(X)-1) != 0) with X s< 0
1740    if (C2->isSignMask()) {
1741      Constant *Zero = Constant::getNullValue(X->getType());
1742      auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
1743      return new ICmpInst(NewPred, X, Zero);
1744    }
1745
1746    // Restrict this fold only for single-use 'and' (PR10267).
1747    // ((%x & C) == 0) --> %x u< (-C)  iff (-C) is power of two.
1748    if ((~(*C2) + 1).isPowerOf2()) {
1749      Constant *NegBOC =
1750          ConstantExpr::getNeg(cast<Constant>(And->getOperand(1)));
1751      auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
1752      return new ICmpInst(NewPred, X, NegBOC);
1753    }
1754  }
1755
1756  // If the LHS is an 'and' of a truncate and we can widen the and/compare to
1757  // the input width without changing the value produced, eliminate the cast:
1758  //
1759  // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1'
1760  //
1761  // We can do this transformation if the constants do not have their sign bits
1762  // set or if it is an equality comparison. Extending a relational comparison
1763  // when we're checking the sign bit would not work.
1764  Value *W;
1765  if (match(And->getOperand(0), m_OneUse(m_Trunc(m_Value(W)))) &&
1766      (Cmp.isEquality() || (!C1.isNegative() && !C2->isNegative()))) {
1767    // TODO: Is this a good transform for vectors? Wider types may reduce
1768    // throughput. Should this transform be limited (even for scalars) by using
1769    // shouldChangeType()?
1770    if (!Cmp.getType()->isVectorTy()) {
1771      Type *WideType = W->getType();
1772      unsigned WideScalarBits = WideType->getScalarSizeInBits();
1773      Constant *ZextC1 = ConstantInt::get(WideType, C1.zext(WideScalarBits));
1774      Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits));
1775      Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName());
1776      return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1777    }
1778  }
1779
1780  if (Instruction *I = foldICmpAndShift(Cmp, And, C1, *C2))
1781    return I;
1782
1783  // (icmp pred (and (or (lshr A, B), A), 1), 0) -->
1784  // (icmp pred (and A, (or (shl 1, B), 1), 0))
1785  //
1786  // iff pred isn't signed
1787  if (!Cmp.isSigned() && C1.isNullValue() && And->getOperand(0)->hasOneUse() &&
1788      match(And->getOperand(1), m_One())) {
1789    Constant *One = cast<Constant>(And->getOperand(1));
1790    Value *Or = And->getOperand(0);
1791    Value *A, *B, *LShr;
1792    if (match(Or, m_Or(m_Value(LShr), m_Value(A))) &&
1793        match(LShr, m_LShr(m_Specific(A), m_Value(B)))) {
1794      unsigned UsesRemoved = 0;
1795      if (And->hasOneUse())
1796        ++UsesRemoved;
1797      if (Or->hasOneUse())
1798        ++UsesRemoved;
1799      if (LShr->hasOneUse())
1800        ++UsesRemoved;
1801
1802      // Compute A & ((1 << B) | 1)
1803      Value *NewOr = nullptr;
1804      if (auto *C = dyn_cast<Constant>(B)) {
1805        if (UsesRemoved >= 1)
1806          NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One);
1807      } else {
1808        if (UsesRemoved >= 3)
1809          NewOr = Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(),
1810                                                     /*HasNUW=*/true),
1811                                   One, Or->getName());
1812      }
1813      if (NewOr) {
1814        Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName());
1815        Cmp.setOperand(0, NewAnd);
1816        return &Cmp;
1817      }
1818    }
1819  }
1820
1821  return nullptr;
1822}
1823
1824/// Fold icmp (and X, Y), C.
1825Instruction *InstCombiner::foldICmpAndConstant(ICmpInst &Cmp,
1826                                               BinaryOperator *And,
1827                                               const APInt &C) {
1828  if (Instruction *I = foldICmpAndConstConst(Cmp, And, C))
1829    return I;
1830
1831  // TODO: These all require that Y is constant too, so refactor with the above.
1832
1833  // Try to optimize things like "A[i] & 42 == 0" to index computations.
1834  Value *X = And->getOperand(0);
1835  Value *Y = And->getOperand(1);
1836  if (auto *LI = dyn_cast<LoadInst>(X))
1837    if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
1838      if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
1839        if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
1840            !LI->isVolatile() && isa<ConstantInt>(Y)) {
1841          ConstantInt *C2 = cast<ConstantInt>(Y);
1842          if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, Cmp, C2))
1843            return Res;
1844        }
1845
1846  if (!Cmp.isEquality())
1847    return nullptr;
1848
1849  // X & -C == -C -> X >  u ~C
1850  // X & -C != -C -> X <= u ~C
1851  //   iff C is a power of 2
1852  if (Cmp.getOperand(1) == Y && (-C).isPowerOf2()) {
1853    auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGT
1854                                                          : CmpInst::ICMP_ULE;
1855    return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1))));
1856  }
1857
1858  // (X & C2) == 0 -> (trunc X) >= 0
1859  // (X & C2) != 0 -> (trunc X) <  0
1860  //   iff C2 is a power of 2 and it masks the sign bit of a legal integer type.
1861  const APInt *C2;
1862  if (And->hasOneUse() && C.isNullValue() && match(Y, m_APInt(C2))) {
1863    int32_t ExactLogBase2 = C2->exactLogBase2();
1864    if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) {
1865      Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1);
1866      if (And->getType()->isVectorTy())
1867        NTy = VectorType::get(NTy, And->getType()->getVectorNumElements());
1868      Value *Trunc = Builder.CreateTrunc(X, NTy);
1869      auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE
1870                                                            : CmpInst::ICMP_SLT;
1871      return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy));
1872    }
1873  }
1874
1875  return nullptr;
1876}
1877
1878/// Fold icmp (or X, Y), C.
1879Instruction *InstCombiner::foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or,
1880                                              const APInt &C) {
1881  ICmpInst::Predicate Pred = Cmp.getPredicate();
1882  if (C.isOneValue()) {
1883    // icmp slt signum(V) 1 --> icmp slt V, 1
1884    Value *V = nullptr;
1885    if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V))))
1886      return new ICmpInst(ICmpInst::ICMP_SLT, V,
1887                          ConstantInt::get(V->getType(), 1));
1888  }
1889
1890  Value *OrOp0 = Or->getOperand(0), *OrOp1 = Or->getOperand(1);
1891  if (Cmp.isEquality() && Cmp.getOperand(1) == OrOp1) {
1892    // X | C == C --> X <=u C
1893    // X | C != C --> X  >u C
1894    //   iff C+1 is a power of 2 (C is a bitmask of the low bits)
1895    if ((C + 1).isPowerOf2()) {
1896      Pred = (Pred == CmpInst::ICMP_EQ) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT;
1897      return new ICmpInst(Pred, OrOp0, OrOp1);
1898    }
1899    // More general: are all bits outside of a mask constant set or not set?
1900    // X | C == C --> (X & ~C) == 0
1901    // X | C != C --> (X & ~C) != 0
1902    if (Or->hasOneUse()) {
1903      Value *A = Builder.CreateAnd(OrOp0, ~C);
1904      return new ICmpInst(Pred, A, ConstantInt::getNullValue(OrOp0->getType()));
1905    }
1906  }
1907
1908  if (!Cmp.isEquality() || !C.isNullValue() || !Or->hasOneUse())
1909    return nullptr;
1910
1911  Value *P, *Q;
1912  if (match(Or, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) {
1913    // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
1914    // -> and (icmp eq P, null), (icmp eq Q, null).
1915    Value *CmpP =
1916        Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType()));
1917    Value *CmpQ =
1918        Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType()));
1919    auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1920    return BinaryOperator::Create(BOpc, CmpP, CmpQ);
1921  }
1922
1923  // Are we using xors to bitwise check for a pair of (in)equalities? Convert to
1924  // a shorter form that has more potential to be folded even further.
1925  Value *X1, *X2, *X3, *X4;
1926  if (match(OrOp0, m_OneUse(m_Xor(m_Value(X1), m_Value(X2)))) &&
1927      match(OrOp1, m_OneUse(m_Xor(m_Value(X3), m_Value(X4))))) {
1928    // ((X1 ^ X2) || (X3 ^ X4)) == 0 --> (X1 == X2) && (X3 == X4)
1929    // ((X1 ^ X2) || (X3 ^ X4)) != 0 --> (X1 != X2) || (X3 != X4)
1930    Value *Cmp12 = Builder.CreateICmp(Pred, X1, X2);
1931    Value *Cmp34 = Builder.CreateICmp(Pred, X3, X4);
1932    auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1933    return BinaryOperator::Create(BOpc, Cmp12, Cmp34);
1934  }
1935
1936  return nullptr;
1937}
1938
1939/// Fold icmp (mul X, Y), C.
1940Instruction *InstCombiner::foldICmpMulConstant(ICmpInst &Cmp,
1941                                               BinaryOperator *Mul,
1942                                               const APInt &C) {
1943  const APInt *MulC;
1944  if (!match(Mul->getOperand(1), m_APInt(MulC)))
1945    return nullptr;
1946
1947  // If this is a test of the sign bit and the multiply is sign-preserving with
1948  // a constant operand, use the multiply LHS operand instead.
1949  ICmpInst::Predicate Pred = Cmp.getPredicate();
1950  if (isSignTest(Pred, C) && Mul->hasNoSignedWrap()) {
1951    if (MulC->isNegative())
1952      Pred = ICmpInst::getSwappedPredicate(Pred);
1953    return new ICmpInst(Pred, Mul->getOperand(0),
1954                        Constant::getNullValue(Mul->getType()));
1955  }
1956
1957  return nullptr;
1958}
1959
1960/// Fold icmp (shl 1, Y), C.
1961static Instruction *foldICmpShlOne(ICmpInst &Cmp, Instruction *Shl,
1962                                   const APInt &C) {
1963  Value *Y;
1964  if (!match(Shl, m_Shl(m_One(), m_Value(Y))))
1965    return nullptr;
1966
1967  Type *ShiftType = Shl->getType();
1968  unsigned TypeBits = C.getBitWidth();
1969  bool CIsPowerOf2 = C.isPowerOf2();
1970  ICmpInst::Predicate Pred = Cmp.getPredicate();
1971  if (Cmp.isUnsigned()) {
1972    // (1 << Y) pred C -> Y pred Log2(C)
1973    if (!CIsPowerOf2) {
1974      // (1 << Y) <  30 -> Y <= 4
1975      // (1 << Y) <= 30 -> Y <= 4
1976      // (1 << Y) >= 30 -> Y >  4
1977      // (1 << Y) >  30 -> Y >  4
1978      if (Pred == ICmpInst::ICMP_ULT)
1979        Pred = ICmpInst::ICMP_ULE;
1980      else if (Pred == ICmpInst::ICMP_UGE)
1981        Pred = ICmpInst::ICMP_UGT;
1982    }
1983
1984    // (1 << Y) >= 2147483648 -> Y >= 31 -> Y == 31
1985    // (1 << Y) <  2147483648 -> Y <  31 -> Y != 31
1986    unsigned CLog2 = C.logBase2();
1987    if (CLog2 == TypeBits - 1) {
1988      if (Pred == ICmpInst::ICMP_UGE)
1989        Pred = ICmpInst::ICMP_EQ;
1990      else if (Pred == ICmpInst::ICMP_ULT)
1991        Pred = ICmpInst::ICMP_NE;
1992    }
1993    return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2));
1994  } else if (Cmp.isSigned()) {
1995    Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
1996    if (C.isAllOnesValue()) {
1997      // (1 << Y) <= -1 -> Y == 31
1998      if (Pred == ICmpInst::ICMP_SLE)
1999        return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
2000
2001      // (1 << Y) >  -1 -> Y != 31
2002      if (Pred == ICmpInst::ICMP_SGT)
2003        return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
2004    } else if (!C) {
2005      // (1 << Y) <  0 -> Y == 31
2006      // (1 << Y) <= 0 -> Y == 31
2007      if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
2008        return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
2009
2010      // (1 << Y) >= 0 -> Y != 31
2011      // (1 << Y) >  0 -> Y != 31
2012      if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
2013        return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
2014    }
2015  } else if (Cmp.isEquality() && CIsPowerOf2) {
2016    return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, C.logBase2()));
2017  }
2018
2019  return nullptr;
2020}
2021
2022/// Fold icmp (shl X, Y), C.
2023Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp,
2024                                               BinaryOperator *Shl,
2025                                               const APInt &C) {
2026  const APInt *ShiftVal;
2027  if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal)))
2028    return foldICmpShlConstConst(Cmp, Shl->getOperand(1), C, *ShiftVal);
2029
2030  const APInt *ShiftAmt;
2031  if (!match(Shl->getOperand(1), m_APInt(ShiftAmt)))
2032    return foldICmpShlOne(Cmp, Shl, C);
2033
2034  // Check that the shift amount is in range. If not, don't perform undefined
2035  // shifts. When the shift is visited, it will be simplified.
2036  unsigned TypeBits = C.getBitWidth();
2037  if (ShiftAmt->uge(TypeBits))
2038    return nullptr;
2039
2040  ICmpInst::Predicate Pred = Cmp.getPredicate();
2041  Value *X = Shl->getOperand(0);
2042  Type *ShType = Shl->getType();
2043
2044  // NSW guarantees that we are only shifting out sign bits from the high bits,
2045  // so we can ASHR the compare constant without needing a mask and eliminate
2046  // the shift.
2047  if (Shl->hasNoSignedWrap()) {
2048    if (Pred == ICmpInst::ICMP_SGT) {
2049      // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt)
2050      APInt ShiftedC = C.ashr(*ShiftAmt);
2051      return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2052    }
2053    if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2054        C.ashr(*ShiftAmt).shl(*ShiftAmt) == C) {
2055      APInt ShiftedC = C.ashr(*ShiftAmt);
2056      return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2057    }
2058    if (Pred == ICmpInst::ICMP_SLT) {
2059      // SLE is the same as above, but SLE is canonicalized to SLT, so convert:
2060      // (X << S) <=s C is equiv to X <=s (C >> S) for all C
2061      // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX
2062      // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN
2063      assert(!C.isMinSignedValue() && "Unexpected icmp slt");
2064      APInt ShiftedC = (C - 1).ashr(*ShiftAmt) + 1;
2065      return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2066    }
2067    // If this is a signed comparison to 0 and the shift is sign preserving,
2068    // use the shift LHS operand instead; isSignTest may change 'Pred', so only
2069    // do that if we're sure to not continue on in this function.
2070    if (isSignTest(Pred, C))
2071      return new ICmpInst(Pred, X, Constant::getNullValue(ShType));
2072  }
2073
2074  // NUW guarantees that we are only shifting out zero bits from the high bits,
2075  // so we can LSHR the compare constant without needing a mask and eliminate
2076  // the shift.
2077  if (Shl->hasNoUnsignedWrap()) {
2078    if (Pred == ICmpInst::ICMP_UGT) {
2079      // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt)
2080      APInt ShiftedC = C.lshr(*ShiftAmt);
2081      return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2082    }
2083    if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2084        C.lshr(*ShiftAmt).shl(*ShiftAmt) == C) {
2085      APInt ShiftedC = C.lshr(*ShiftAmt);
2086      return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2087    }
2088    if (Pred == ICmpInst::ICMP_ULT) {
2089      // ULE is the same as above, but ULE is canonicalized to ULT, so convert:
2090      // (X << S) <=u C is equiv to X <=u (C >> S) for all C
2091      // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u
2092      // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0
2093      assert(C.ugt(0) && "ult 0 should have been eliminated");
2094      APInt ShiftedC = (C - 1).lshr(*ShiftAmt) + 1;
2095      return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2096    }
2097  }
2098
2099  if (Cmp.isEquality() && Shl->hasOneUse()) {
2100    // Strength-reduce the shift into an 'and'.
2101    Constant *Mask = ConstantInt::get(
2102        ShType,
2103        APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue()));
2104    Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2105    Constant *LShrC = ConstantInt::get(ShType, C.lshr(*ShiftAmt));
2106    return new ICmpInst(Pred, And, LShrC);
2107  }
2108
2109  // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
2110  bool TrueIfSigned = false;
2111  if (Shl->hasOneUse() && isSignBitCheck(Pred, C, TrueIfSigned)) {
2112    // (X << 31) <s 0  --> (X & 1) != 0
2113    Constant *Mask = ConstantInt::get(
2114        ShType,
2115        APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1));
2116    Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2117    return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
2118                        And, Constant::getNullValue(ShType));
2119  }
2120
2121  // Simplify 'shl' inequality test into 'and' equality test.
2122  if (Cmp.isUnsigned() && Shl->hasOneUse()) {
2123    // (X l<< C2) u<=/u> C1 iff C1+1 is power of two -> X & (~C1 l>> C2) ==/!= 0
2124    if ((C + 1).isPowerOf2() &&
2125        (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT)) {
2126      Value *And = Builder.CreateAnd(X, (~C).lshr(ShiftAmt->getZExtValue()));
2127      return new ICmpInst(Pred == ICmpInst::ICMP_ULE ? ICmpInst::ICMP_EQ
2128                                                     : ICmpInst::ICMP_NE,
2129                          And, Constant::getNullValue(ShType));
2130    }
2131    // (X l<< C2) u</u>= C1 iff C1 is power of two -> X & (-C1 l>> C2) ==/!= 0
2132    if (C.isPowerOf2() &&
2133        (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) {
2134      Value *And =
2135          Builder.CreateAnd(X, (~(C - 1)).lshr(ShiftAmt->getZExtValue()));
2136      return new ICmpInst(Pred == ICmpInst::ICMP_ULT ? ICmpInst::ICMP_EQ
2137                                                     : ICmpInst::ICMP_NE,
2138                          And, Constant::getNullValue(ShType));
2139    }
2140  }
2141
2142  // Transform (icmp pred iM (shl iM %v, N), C)
2143  // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N))
2144  // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N.
2145  // This enables us to get rid of the shift in favor of a trunc that may be
2146  // free on the target. It has the additional benefit of comparing to a
2147  // smaller constant that may be more target-friendly.
2148  unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1);
2149  if (Shl->hasOneUse() && Amt != 0 && C.countTrailingZeros() >= Amt &&
2150      DL.isLegalInteger(TypeBits - Amt)) {
2151    Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt);
2152    if (ShType->isVectorTy())
2153      TruncTy = VectorType::get(TruncTy, ShType->getVectorNumElements());
2154    Constant *NewC =
2155        ConstantInt::get(TruncTy, C.ashr(*ShiftAmt).trunc(TypeBits - Amt));
2156    return new ICmpInst(Pred, Builder.CreateTrunc(X, TruncTy), NewC);
2157  }
2158
2159  return nullptr;
2160}
2161
2162/// Fold icmp ({al}shr X, Y), C.
2163Instruction *InstCombiner::foldICmpShrConstant(ICmpInst &Cmp,
2164                                               BinaryOperator *Shr,
2165                                               const APInt &C) {
2166  // An exact shr only shifts out zero bits, so:
2167  // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0
2168  Value *X = Shr->getOperand(0);
2169  CmpInst::Predicate Pred = Cmp.getPredicate();
2170  if (Cmp.isEquality() && Shr->isExact() && Shr->hasOneUse() &&
2171      C.isNullValue())
2172    return new ICmpInst(Pred, X, Cmp.getOperand(1));
2173
2174  const APInt *ShiftVal;
2175  if (Cmp.isEquality() && match(Shr->getOperand(0), m_APInt(ShiftVal)))
2176    return foldICmpShrConstConst(Cmp, Shr->getOperand(1), C, *ShiftVal);
2177
2178  const APInt *ShiftAmt;
2179  if (!match(Shr->getOperand(1), m_APInt(ShiftAmt)))
2180    return nullptr;
2181
2182  // Check that the shift amount is in range. If not, don't perform undefined
2183  // shifts. When the shift is visited it will be simplified.
2184  unsigned TypeBits = C.getBitWidth();
2185  unsigned ShAmtVal = ShiftAmt->getLimitedValue(TypeBits);
2186  if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2187    return nullptr;
2188
2189  bool IsAShr = Shr->getOpcode() == Instruction::AShr;
2190  bool IsExact = Shr->isExact();
2191  Type *ShrTy = Shr->getType();
2192  // TODO: If we could guarantee that InstSimplify would handle all of the
2193  // constant-value-based preconditions in the folds below, then we could assert
2194  // those conditions rather than checking them. This is difficult because of
2195  // undef/poison (PR34838).
2196  if (IsAShr) {
2197    if (Pred == CmpInst::ICMP_SLT || (Pred == CmpInst::ICMP_SGT && IsExact)) {
2198      // icmp slt (ashr X, ShAmtC), C --> icmp slt X, (C << ShAmtC)
2199      // icmp sgt (ashr exact X, ShAmtC), C --> icmp sgt X, (C << ShAmtC)
2200      APInt ShiftedC = C.shl(ShAmtVal);
2201      if (ShiftedC.ashr(ShAmtVal) == C)
2202        return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2203    }
2204    if (Pred == CmpInst::ICMP_SGT) {
2205      // icmp sgt (ashr X, ShAmtC), C --> icmp sgt X, ((C + 1) << ShAmtC) - 1
2206      APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2207      if (!C.isMaxSignedValue() && !(C + 1).shl(ShAmtVal).isMinSignedValue() &&
2208          (ShiftedC + 1).ashr(ShAmtVal) == (C + 1))
2209        return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2210    }
2211  } else {
2212    if (Pred == CmpInst::ICMP_ULT || (Pred == CmpInst::ICMP_UGT && IsExact)) {
2213      // icmp ult (lshr X, ShAmtC), C --> icmp ult X, (C << ShAmtC)
2214      // icmp ugt (lshr exact X, ShAmtC), C --> icmp ugt X, (C << ShAmtC)
2215      APInt ShiftedC = C.shl(ShAmtVal);
2216      if (ShiftedC.lshr(ShAmtVal) == C)
2217        return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2218    }
2219    if (Pred == CmpInst::ICMP_UGT) {
2220      // icmp ugt (lshr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1
2221      APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2222      if ((ShiftedC + 1).lshr(ShAmtVal) == (C + 1))
2223        return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2224    }
2225  }
2226
2227  if (!Cmp.isEquality())
2228    return nullptr;
2229
2230  // Handle equality comparisons of shift-by-constant.
2231
2232  // If the comparison constant changes with the shift, the comparison cannot
2233  // succeed (bits of the comparison constant cannot match the shifted value).
2234  // This should be known by InstSimplify and already be folded to true/false.
2235  assert(((IsAShr && C.shl(ShAmtVal).ashr(ShAmtVal) == C) ||
2236          (!IsAShr && C.shl(ShAmtVal).lshr(ShAmtVal) == C)) &&
2237         "Expected icmp+shr simplify did not occur.");
2238
2239  // If the bits shifted out are known zero, compare the unshifted value:
2240  //  (X & 4) >> 1 == 2  --> (X & 4) == 4.
2241  if (Shr->isExact())
2242    return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, C << ShAmtVal));
2243
2244  if (Shr->hasOneUse()) {
2245    // Canonicalize the shift into an 'and':
2246    // icmp eq/ne (shr X, ShAmt), C --> icmp eq/ne (and X, HiMask), (C << ShAmt)
2247    APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
2248    Constant *Mask = ConstantInt::get(ShrTy, Val);
2249    Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask");
2250    return new ICmpInst(Pred, And, ConstantInt::get(ShrTy, C << ShAmtVal));
2251  }
2252
2253  return nullptr;
2254}
2255
2256Instruction *InstCombiner::foldICmpSRemConstant(ICmpInst &Cmp,
2257                                                BinaryOperator *SRem,
2258                                                const APInt &C) {
2259  // Match an 'is positive' or 'is negative' comparison of remainder by a
2260  // constant power-of-2 value:
2261  // (X % pow2C) sgt/slt 0
2262  const ICmpInst::Predicate Pred = Cmp.getPredicate();
2263  if (Pred != ICmpInst::ICMP_SGT && Pred != ICmpInst::ICMP_SLT)
2264    return nullptr;
2265
2266  // TODO: The one-use check is standard because we do not typically want to
2267  //       create longer instruction sequences, but this might be a special-case
2268  //       because srem is not good for analysis or codegen.
2269  if (!SRem->hasOneUse())
2270    return nullptr;
2271
2272  const APInt *DivisorC;
2273  if (!C.isNullValue() || !match(SRem->getOperand(1), m_Power2(DivisorC)))
2274    return nullptr;
2275
2276  // Mask off the sign bit and the modulo bits (low-bits).
2277  Type *Ty = SRem->getType();
2278  APInt SignMask = APInt::getSignMask(Ty->getScalarSizeInBits());
2279  Constant *MaskC = ConstantInt::get(Ty, SignMask | (*DivisorC - 1));
2280  Value *And = Builder.CreateAnd(SRem->getOperand(0), MaskC);
2281
2282  // For 'is positive?' check that the sign-bit is clear and at least 1 masked
2283  // bit is set. Example:
2284  // (i8 X % 32) s> 0 --> (X & 159) s> 0
2285  if (Pred == ICmpInst::ICMP_SGT)
2286    return new ICmpInst(ICmpInst::ICMP_SGT, And, ConstantInt::getNullValue(Ty));
2287
2288  // For 'is negative?' check that the sign-bit is set and at least 1 masked
2289  // bit is set. Example:
2290  // (i16 X % 4) s< 0 --> (X & 32771) u> 32768
2291  return new ICmpInst(ICmpInst::ICMP_UGT, And, ConstantInt::get(Ty, SignMask));
2292}
2293
2294/// Fold icmp (udiv X, Y), C.
2295Instruction *InstCombiner::foldICmpUDivConstant(ICmpInst &Cmp,
2296                                                BinaryOperator *UDiv,
2297                                                const APInt &C) {
2298  const APInt *C2;
2299  if (!match(UDiv->getOperand(0), m_APInt(C2)))
2300    return nullptr;
2301
2302  assert(*C2 != 0 && "udiv 0, X should have been simplified already.");
2303
2304  // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1))
2305  Value *Y = UDiv->getOperand(1);
2306  if (Cmp.getPredicate() == ICmpInst::ICMP_UGT) {
2307    assert(!C.isMaxValue() &&
2308           "icmp ugt X, UINT_MAX should have been simplified already.");
2309    return new ICmpInst(ICmpInst::ICMP_ULE, Y,
2310                        ConstantInt::get(Y->getType(), C2->udiv(C + 1)));
2311  }
2312
2313  // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C)
2314  if (Cmp.getPredicate() == ICmpInst::ICMP_ULT) {
2315    assert(C != 0 && "icmp ult X, 0 should have been simplified already.");
2316    return new ICmpInst(ICmpInst::ICMP_UGT, Y,
2317                        ConstantInt::get(Y->getType(), C2->udiv(C)));
2318  }
2319
2320  return nullptr;
2321}
2322
2323/// Fold icmp ({su}div X, Y), C.
2324Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp,
2325                                               BinaryOperator *Div,
2326                                               const APInt &C) {
2327  // Fold: icmp pred ([us]div X, C2), C -> range test
2328  // Fold this div into the comparison, producing a range check.
2329  // Determine, based on the divide type, what the range is being
2330  // checked.  If there is an overflow on the low or high side, remember
2331  // it, otherwise compute the range [low, hi) bounding the new value.
2332  // See: InsertRangeTest above for the kinds of replacements possible.
2333  const APInt *C2;
2334  if (!match(Div->getOperand(1), m_APInt(C2)))
2335    return nullptr;
2336
2337  // FIXME: If the operand types don't match the type of the divide
2338  // then don't attempt this transform. The code below doesn't have the
2339  // logic to deal with a signed divide and an unsigned compare (and
2340  // vice versa). This is because (x /s C2) <s C  produces different
2341  // results than (x /s C2) <u C or (x /u C2) <s C or even
2342  // (x /u C2) <u C.  Simply casting the operands and result won't
2343  // work. :(  The if statement below tests that condition and bails
2344  // if it finds it.
2345  bool DivIsSigned = Div->getOpcode() == Instruction::SDiv;
2346  if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2347    return nullptr;
2348
2349  // The ProdOV computation fails on divide by 0 and divide by -1. Cases with
2350  // INT_MIN will also fail if the divisor is 1. Although folds of all these
2351  // division-by-constant cases should be present, we can not assert that they
2352  // have happened before we reach this icmp instruction.
2353  if (C2->isNullValue() || C2->isOneValue() ||
2354      (DivIsSigned && C2->isAllOnesValue()))
2355    return nullptr;
2356
2357  // Compute Prod = C * C2. We are essentially solving an equation of
2358  // form X / C2 = C. We solve for X by multiplying C2 and C.
2359  // By solving for X, we can turn this into a range check instead of computing
2360  // a divide.
2361  APInt Prod = C * *C2;
2362
2363  // Determine if the product overflows by seeing if the product is not equal to
2364  // the divide. Make sure we do the same kind of divide as in the LHS
2365  // instruction that we're folding.
2366  bool ProdOV = (DivIsSigned ? Prod.sdiv(*C2) : Prod.udiv(*C2)) != C;
2367
2368  ICmpInst::Predicate Pred = Cmp.getPredicate();
2369
2370  // If the division is known to be exact, then there is no remainder from the
2371  // divide, so the covered range size is unit, otherwise it is the divisor.
2372  APInt RangeSize = Div->isExact() ? APInt(C2->getBitWidth(), 1) : *C2;
2373
2374  // Figure out the interval that is being checked.  For example, a comparison
2375  // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
2376  // Compute this interval based on the constants involved and the signedness of
2377  // the compare/divide.  This computes a half-open interval, keeping track of
2378  // whether either value in the interval overflows.  After analysis each
2379  // overflow variable is set to 0 if it's corresponding bound variable is valid
2380  // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
2381  int LoOverflow = 0, HiOverflow = 0;
2382  APInt LoBound, HiBound;
2383
2384  if (!DivIsSigned) {  // udiv
2385    // e.g. X/5 op 3  --> [15, 20)
2386    LoBound = Prod;
2387    HiOverflow = LoOverflow = ProdOV;
2388    if (!HiOverflow) {
2389      // If this is not an exact divide, then many values in the range collapse
2390      // to the same result value.
2391      HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false);
2392    }
2393  } else if (C2->isStrictlyPositive()) { // Divisor is > 0.
2394    if (C.isNullValue()) {       // (X / pos) op 0
2395      // Can't overflow.  e.g.  X/2 op 0 --> [-1, 2)
2396      LoBound = -(RangeSize - 1);
2397      HiBound = RangeSize;
2398    } else if (C.isStrictlyPositive()) {   // (X / pos) op pos
2399      LoBound = Prod;     // e.g.   X/5 op 3 --> [15, 20)
2400      HiOverflow = LoOverflow = ProdOV;
2401      if (!HiOverflow)
2402        HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true);
2403    } else {                       // (X / pos) op neg
2404      // e.g. X/5 op -3  --> [-15-4, -15+1) --> [-19, -14)
2405      HiBound = Prod + 1;
2406      LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2407      if (!LoOverflow) {
2408        APInt DivNeg = -RangeSize;
2409        LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
2410      }
2411    }
2412  } else if (C2->isNegative()) { // Divisor is < 0.
2413    if (Div->isExact())
2414      RangeSize.negate();
2415    if (C.isNullValue()) { // (X / neg) op 0
2416      // e.g. X/-5 op 0  --> [-4, 5)
2417      LoBound = RangeSize + 1;
2418      HiBound = -RangeSize;
2419      if (HiBound == *C2) {        // -INTMIN = INTMIN
2420        HiOverflow = 1;            // [INTMIN+1, overflow)
2421        HiBound = APInt();         // e.g. X/INTMIN = 0 --> X > INTMIN
2422      }
2423    } else if (C.isStrictlyPositive()) {   // (X / neg) op pos
2424      // e.g. X/-5 op 3  --> [-19, -14)
2425      HiBound = Prod + 1;
2426      HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2427      if (!LoOverflow)
2428        LoOverflow = addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0;
2429    } else {                       // (X / neg) op neg
2430      LoBound = Prod;       // e.g. X/-5 op -3  --> [15, 20)
2431      LoOverflow = HiOverflow = ProdOV;
2432      if (!HiOverflow)
2433        HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true);
2434    }
2435
2436    // Dividing by a negative swaps the condition.  LT <-> GT
2437    Pred = ICmpInst::getSwappedPredicate(Pred);
2438  }
2439
2440  Value *X = Div->getOperand(0);
2441  switch (Pred) {
2442    default: llvm_unreachable("Unhandled icmp opcode!");
2443    case ICmpInst::ICMP_EQ:
2444      if (LoOverflow && HiOverflow)
2445        return replaceInstUsesWith(Cmp, Builder.getFalse());
2446      if (HiOverflow)
2447        return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2448                            ICmpInst::ICMP_UGE, X,
2449                            ConstantInt::get(Div->getType(), LoBound));
2450      if (LoOverflow)
2451        return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2452                            ICmpInst::ICMP_ULT, X,
2453                            ConstantInt::get(Div->getType(), HiBound));
2454      return replaceInstUsesWith(
2455          Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, true));
2456    case ICmpInst::ICMP_NE:
2457      if (LoOverflow && HiOverflow)
2458        return replaceInstUsesWith(Cmp, Builder.getTrue());
2459      if (HiOverflow)
2460        return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2461                            ICmpInst::ICMP_ULT, X,
2462                            ConstantInt::get(Div->getType(), LoBound));
2463      if (LoOverflow)
2464        return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2465                            ICmpInst::ICMP_UGE, X,
2466                            ConstantInt::get(Div->getType(), HiBound));
2467      return replaceInstUsesWith(Cmp,
2468                                 insertRangeTest(X, LoBound, HiBound,
2469                                                 DivIsSigned, false));
2470    case ICmpInst::ICMP_ULT:
2471    case ICmpInst::ICMP_SLT:
2472      if (LoOverflow == +1)   // Low bound is greater than input range.
2473        return replaceInstUsesWith(Cmp, Builder.getTrue());
2474      if (LoOverflow == -1)   // Low bound is less than input range.
2475        return replaceInstUsesWith(Cmp, Builder.getFalse());
2476      return new ICmpInst(Pred, X, ConstantInt::get(Div->getType(), LoBound));
2477    case ICmpInst::ICMP_UGT:
2478    case ICmpInst::ICMP_SGT:
2479      if (HiOverflow == +1)       // High bound greater than input range.
2480        return replaceInstUsesWith(Cmp, Builder.getFalse());
2481      if (HiOverflow == -1)       // High bound less than input range.
2482        return replaceInstUsesWith(Cmp, Builder.getTrue());
2483      if (Pred == ICmpInst::ICMP_UGT)
2484        return new ICmpInst(ICmpInst::ICMP_UGE, X,
2485                            ConstantInt::get(Div->getType(), HiBound));
2486      return new ICmpInst(ICmpInst::ICMP_SGE, X,
2487                          ConstantInt::get(Div->getType(), HiBound));
2488  }
2489
2490  return nullptr;
2491}
2492
2493/// Fold icmp (sub X, Y), C.
2494Instruction *InstCombiner::foldICmpSubConstant(ICmpInst &Cmp,
2495                                               BinaryOperator *Sub,
2496                                               const APInt &C) {
2497  Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1);
2498  ICmpInst::Predicate Pred = Cmp.getPredicate();
2499  const APInt *C2;
2500  APInt SubResult;
2501
2502  // icmp eq/ne (sub C, Y), C -> icmp eq/ne Y, 0
2503  if (match(X, m_APInt(C2)) && *C2 == C && Cmp.isEquality())
2504    return new ICmpInst(Cmp.getPredicate(), Y,
2505                        ConstantInt::get(Y->getType(), 0));
2506
2507  // (icmp P (sub nuw|nsw C2, Y), C) -> (icmp swap(P) Y, C2-C)
2508  if (match(X, m_APInt(C2)) &&
2509      ((Cmp.isUnsigned() && Sub->hasNoUnsignedWrap()) ||
2510       (Cmp.isSigned() && Sub->hasNoSignedWrap())) &&
2511      !subWithOverflow(SubResult, *C2, C, Cmp.isSigned()))
2512    return new ICmpInst(Cmp.getSwappedPredicate(), Y,
2513                        ConstantInt::get(Y->getType(), SubResult));
2514
2515  // The following transforms are only worth it if the only user of the subtract
2516  // is the icmp.
2517  if (!Sub->hasOneUse())
2518    return nullptr;
2519
2520  if (Sub->hasNoSignedWrap()) {
2521    // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y)
2522    if (Pred == ICmpInst::ICMP_SGT && C.isAllOnesValue())
2523      return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
2524
2525    // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y)
2526    if (Pred == ICmpInst::ICMP_SGT && C.isNullValue())
2527      return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
2528
2529    // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y)
2530    if (Pred == ICmpInst::ICMP_SLT && C.isNullValue())
2531      return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
2532
2533    // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y)
2534    if (Pred == ICmpInst::ICMP_SLT && C.isOneValue())
2535      return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
2536  }
2537
2538  if (!match(X, m_APInt(C2)))
2539    return nullptr;
2540
2541  // C2 - Y <u C -> (Y | (C - 1)) == C2
2542  //   iff (C2 & (C - 1)) == C - 1 and C is a power of 2
2543  if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() &&
2544      (*C2 & (C - 1)) == (C - 1))
2545    return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, C - 1), X);
2546
2547  // C2 - Y >u C -> (Y | C) != C2
2548  //   iff C2 & C == C and C + 1 is a power of 2
2549  if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == C)
2550    return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, C), X);
2551
2552  return nullptr;
2553}
2554
2555/// Fold icmp (add X, Y), C.
2556Instruction *InstCombiner::foldICmpAddConstant(ICmpInst &Cmp,
2557                                               BinaryOperator *Add,
2558                                               const APInt &C) {
2559  Value *Y = Add->getOperand(1);
2560  const APInt *C2;
2561  if (Cmp.isEquality() || !match(Y, m_APInt(C2)))
2562    return nullptr;
2563
2564  // Fold icmp pred (add X, C2), C.
2565  Value *X = Add->getOperand(0);
2566  Type *Ty = Add->getType();
2567  CmpInst::Predicate Pred = Cmp.getPredicate();
2568
2569  // If the add does not wrap, we can always adjust the compare by subtracting
2570  // the constants. Equality comparisons are handled elsewhere. SGE/SLE/UGE/ULE
2571  // are canonicalized to SGT/SLT/UGT/ULT.
2572  if ((Add->hasNoSignedWrap() &&
2573       (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) ||
2574      (Add->hasNoUnsignedWrap() &&
2575       (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT))) {
2576    bool Overflow;
2577    APInt NewC =
2578        Cmp.isSigned() ? C.ssub_ov(*C2, Overflow) : C.usub_ov(*C2, Overflow);
2579    // If there is overflow, the result must be true or false.
2580    // TODO: Can we assert there is no overflow because InstSimplify always
2581    // handles those cases?
2582    if (!Overflow)
2583      // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2)
2584      return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC));
2585  }
2586
2587  auto CR = ConstantRange::makeExactICmpRegion(Pred, C).subtract(*C2);
2588  const APInt &Upper = CR.getUpper();
2589  const APInt &Lower = CR.getLower();
2590  if (Cmp.isSigned()) {
2591    if (Lower.isSignMask())
2592      return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper));
2593    if (Upper.isSignMask())
2594      return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower));
2595  } else {
2596    if (Lower.isMinValue())
2597      return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper));
2598    if (Upper.isMinValue())
2599      return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower));
2600  }
2601
2602  if (!Add->hasOneUse())
2603    return nullptr;
2604
2605  // X+C <u C2 -> (X & -C2) == C
2606  //   iff C & (C2-1) == 0
2607  //       C2 is a power of 2
2608  if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() && (*C2 & (C - 1)) == 0)
2609    return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -C),
2610                        ConstantExpr::getNeg(cast<Constant>(Y)));
2611
2612  // X+C >u C2 -> (X & ~C2) != C
2613  //   iff C & C2 == 0
2614  //       C2+1 is a power of 2
2615  if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == 0)
2616    return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~C),
2617                        ConstantExpr::getNeg(cast<Constant>(Y)));
2618
2619  return nullptr;
2620}
2621
2622bool InstCombiner::matchThreeWayIntCompare(SelectInst *SI, Value *&LHS,
2623                                           Value *&RHS, ConstantInt *&Less,
2624                                           ConstantInt *&Equal,
2625                                           ConstantInt *&Greater) {
2626  // TODO: Generalize this to work with other comparison idioms or ensure
2627  // they get canonicalized into this form.
2628
2629  // select i1 (a == b),
2630  //        i32 Equal,
2631  //        i32 (select i1 (a < b), i32 Less, i32 Greater)
2632  // where Equal, Less and Greater are placeholders for any three constants.
2633  ICmpInst::Predicate PredA;
2634  if (!match(SI->getCondition(), m_ICmp(PredA, m_Value(LHS), m_Value(RHS))) ||
2635      !ICmpInst::isEquality(PredA))
2636    return false;
2637  Value *EqualVal = SI->getTrueValue();
2638  Value *UnequalVal = SI->getFalseValue();
2639  // We still can get non-canonical predicate here, so canonicalize.
2640  if (PredA == ICmpInst::ICMP_NE)
2641    std::swap(EqualVal, UnequalVal);
2642  if (!match(EqualVal, m_ConstantInt(Equal)))
2643    return false;
2644  ICmpInst::Predicate PredB;
2645  Value *LHS2, *RHS2;
2646  if (!match(UnequalVal, m_Select(m_ICmp(PredB, m_Value(LHS2), m_Value(RHS2)),
2647                                  m_ConstantInt(Less), m_ConstantInt(Greater))))
2648    return false;
2649  // We can get predicate mismatch here, so canonicalize if possible:
2650  // First, ensure that 'LHS' match.
2651  if (LHS2 != LHS) {
2652    // x sgt y <--> y slt x
2653    std::swap(LHS2, RHS2);
2654    PredB = ICmpInst::getSwappedPredicate(PredB);
2655  }
2656  if (LHS2 != LHS)
2657    return false;
2658  // We also need to canonicalize 'RHS'.
2659  if (PredB == ICmpInst::ICMP_SGT && isa<Constant>(RHS2)) {
2660    // x sgt C-1  <-->  x sge C  <-->  not(x slt C)
2661    auto FlippedStrictness =
2662        getFlippedStrictnessPredicateAndConstant(PredB, cast<Constant>(RHS2));
2663    if (!FlippedStrictness)
2664      return false;
2665    assert(FlippedStrictness->first == ICmpInst::ICMP_SGE && "Sanity check");
2666    RHS2 = FlippedStrictness->second;
2667    // And kind-of perform the result swap.
2668    std::swap(Less, Greater);
2669    PredB = ICmpInst::ICMP_SLT;
2670  }
2671  return PredB == ICmpInst::ICMP_SLT && RHS == RHS2;
2672}
2673
2674Instruction *InstCombiner::foldICmpSelectConstant(ICmpInst &Cmp,
2675                                                  SelectInst *Select,
2676                                                  ConstantInt *C) {
2677
2678  assert(C && "Cmp RHS should be a constant int!");
2679  // If we're testing a constant value against the result of a three way
2680  // comparison, the result can be expressed directly in terms of the
2681  // original values being compared.  Note: We could possibly be more
2682  // aggressive here and remove the hasOneUse test. The original select is
2683  // really likely to simplify or sink when we remove a test of the result.
2684  Value *OrigLHS, *OrigRHS;
2685  ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
2686  if (Cmp.hasOneUse() &&
2687      matchThreeWayIntCompare(Select, OrigLHS, OrigRHS, C1LessThan, C2Equal,
2688                              C3GreaterThan)) {
2689    assert(C1LessThan && C2Equal && C3GreaterThan);
2690
2691    bool TrueWhenLessThan =
2692        ConstantExpr::getCompare(Cmp.getPredicate(), C1LessThan, C)
2693            ->isAllOnesValue();
2694    bool TrueWhenEqual =
2695        ConstantExpr::getCompare(Cmp.getPredicate(), C2Equal, C)
2696            ->isAllOnesValue();
2697    bool TrueWhenGreaterThan =
2698        ConstantExpr::getCompare(Cmp.getPredicate(), C3GreaterThan, C)
2699            ->isAllOnesValue();
2700
2701    // This generates the new instruction that will replace the original Cmp
2702    // Instruction. Instead of enumerating the various combinations when
2703    // TrueWhenLessThan, TrueWhenEqual and TrueWhenGreaterThan are true versus
2704    // false, we rely on chaining of ORs and future passes of InstCombine to
2705    // simplify the OR further (i.e. a s< b || a == b becomes a s<= b).
2706
2707    // When none of the three constants satisfy the predicate for the RHS (C),
2708    // the entire original Cmp can be simplified to a false.
2709    Value *Cond = Builder.getFalse();
2710    if (TrueWhenLessThan)
2711      Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT,
2712                                                       OrigLHS, OrigRHS));
2713    if (TrueWhenEqual)
2714      Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ,
2715                                                       OrigLHS, OrigRHS));
2716    if (TrueWhenGreaterThan)
2717      Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT,
2718                                                       OrigLHS, OrigRHS));
2719
2720    return replaceInstUsesWith(Cmp, Cond);
2721  }
2722  return nullptr;
2723}
2724
2725static Instruction *foldICmpBitCast(ICmpInst &Cmp,
2726                                    InstCombiner::BuilderTy &Builder) {
2727  auto *Bitcast = dyn_cast<BitCastInst>(Cmp.getOperand(0));
2728  if (!Bitcast)
2729    return nullptr;
2730
2731  ICmpInst::Predicate Pred = Cmp.getPredicate();
2732  Value *Op1 = Cmp.getOperand(1);
2733  Value *BCSrcOp = Bitcast->getOperand(0);
2734
2735  // Make sure the bitcast doesn't change the number of vector elements.
2736  if (Bitcast->getSrcTy()->getScalarSizeInBits() ==
2737          Bitcast->getDestTy()->getScalarSizeInBits()) {
2738    // Zero-equality and sign-bit checks are preserved through sitofp + bitcast.
2739    Value *X;
2740    if (match(BCSrcOp, m_SIToFP(m_Value(X)))) {
2741      // icmp  eq (bitcast (sitofp X)), 0 --> icmp  eq X, 0
2742      // icmp  ne (bitcast (sitofp X)), 0 --> icmp  ne X, 0
2743      // icmp slt (bitcast (sitofp X)), 0 --> icmp slt X, 0
2744      // icmp sgt (bitcast (sitofp X)), 0 --> icmp sgt X, 0
2745      if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_SLT ||
2746           Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT) &&
2747          match(Op1, m_Zero()))
2748        return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
2749
2750      // icmp slt (bitcast (sitofp X)), 1 --> icmp slt X, 1
2751      if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_One()))
2752        return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), 1));
2753
2754      // icmp sgt (bitcast (sitofp X)), -1 --> icmp sgt X, -1
2755      if (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))
2756        return new ICmpInst(Pred, X,
2757                            ConstantInt::getAllOnesValue(X->getType()));
2758    }
2759
2760    // Zero-equality checks are preserved through unsigned floating-point casts:
2761    // icmp eq (bitcast (uitofp X)), 0 --> icmp eq X, 0
2762    // icmp ne (bitcast (uitofp X)), 0 --> icmp ne X, 0
2763    if (match(BCSrcOp, m_UIToFP(m_Value(X))))
2764      if (Cmp.isEquality() && match(Op1, m_Zero()))
2765        return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
2766  }
2767
2768  // Test to see if the operands of the icmp are casted versions of other
2769  // values. If the ptr->ptr cast can be stripped off both arguments, do so.
2770  if (Bitcast->getType()->isPointerTy() &&
2771      (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
2772    // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
2773    // so eliminate it as well.
2774    if (auto *BC2 = dyn_cast<BitCastInst>(Op1))
2775      Op1 = BC2->getOperand(0);
2776
2777    Op1 = Builder.CreateBitCast(Op1, BCSrcOp->getType());
2778    return new ICmpInst(Pred, BCSrcOp, Op1);
2779  }
2780
2781  // Folding: icmp <pred> iN X, C
2782  //  where X = bitcast <M x iK> (shufflevector <M x iK> %vec, undef, SC)) to iN
2783  //    and C is a splat of a K-bit pattern
2784  //    and SC is a constant vector = <C', C', C', ..., C'>
2785  // Into:
2786  //   %E = extractelement <M x iK> %vec, i32 C'
2787  //   icmp <pred> iK %E, trunc(C)
2788  const APInt *C;
2789  if (!match(Cmp.getOperand(1), m_APInt(C)) ||
2790      !Bitcast->getType()->isIntegerTy() ||
2791      !Bitcast->getSrcTy()->isIntOrIntVectorTy())
2792    return nullptr;
2793
2794  Value *Vec;
2795  Constant *Mask;
2796  if (match(BCSrcOp,
2797            m_ShuffleVector(m_Value(Vec), m_Undef(), m_Constant(Mask)))) {
2798    // Check whether every element of Mask is the same constant
2799    if (auto *Elem = dyn_cast_or_null<ConstantInt>(Mask->getSplatValue())) {
2800      auto *VecTy = cast<VectorType>(BCSrcOp->getType());
2801      auto *EltTy = cast<IntegerType>(VecTy->getElementType());
2802      if (C->isSplat(EltTy->getBitWidth())) {
2803        // Fold the icmp based on the value of C
2804        // If C is M copies of an iK sized bit pattern,
2805        // then:
2806        //   =>  %E = extractelement <N x iK> %vec, i32 Elem
2807        //       icmp <pred> iK %SplatVal, <pattern>
2808        Value *Extract = Builder.CreateExtractElement(Vec, Elem);
2809        Value *NewC = ConstantInt::get(EltTy, C->trunc(EltTy->getBitWidth()));
2810        return new ICmpInst(Pred, Extract, NewC);
2811      }
2812    }
2813  }
2814  return nullptr;
2815}
2816
2817/// Try to fold integer comparisons with a constant operand: icmp Pred X, C
2818/// where X is some kind of instruction.
2819Instruction *InstCombiner::foldICmpInstWithConstant(ICmpInst &Cmp) {
2820  const APInt *C;
2821  if (!match(Cmp.getOperand(1), m_APInt(C)))
2822    return nullptr;
2823
2824  if (auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0))) {
2825    switch (BO->getOpcode()) {
2826    case Instruction::Xor:
2827      if (Instruction *I = foldICmpXorConstant(Cmp, BO, *C))
2828        return I;
2829      break;
2830    case Instruction::And:
2831      if (Instruction *I = foldICmpAndConstant(Cmp, BO, *C))
2832        return I;
2833      break;
2834    case Instruction::Or:
2835      if (Instruction *I = foldICmpOrConstant(Cmp, BO, *C))
2836        return I;
2837      break;
2838    case Instruction::Mul:
2839      if (Instruction *I = foldICmpMulConstant(Cmp, BO, *C))
2840        return I;
2841      break;
2842    case Instruction::Shl:
2843      if (Instruction *I = foldICmpShlConstant(Cmp, BO, *C))
2844        return I;
2845      break;
2846    case Instruction::LShr:
2847    case Instruction::AShr:
2848      if (Instruction *I = foldICmpShrConstant(Cmp, BO, *C))
2849        return I;
2850      break;
2851    case Instruction::SRem:
2852      if (Instruction *I = foldICmpSRemConstant(Cmp, BO, *C))
2853        return I;
2854      break;
2855    case Instruction::UDiv:
2856      if (Instruction *I = foldICmpUDivConstant(Cmp, BO, *C))
2857        return I;
2858      LLVM_FALLTHROUGH;
2859    case Instruction::SDiv:
2860      if (Instruction *I = foldICmpDivConstant(Cmp, BO, *C))
2861        return I;
2862      break;
2863    case Instruction::Sub:
2864      if (Instruction *I = foldICmpSubConstant(Cmp, BO, *C))
2865        return I;
2866      break;
2867    case Instruction::Add:
2868      if (Instruction *I = foldICmpAddConstant(Cmp, BO, *C))
2869        return I;
2870      break;
2871    default:
2872      break;
2873    }
2874    // TODO: These folds could be refactored to be part of the above calls.
2875    if (Instruction *I = foldICmpBinOpEqualityWithConstant(Cmp, BO, *C))
2876      return I;
2877  }
2878
2879  // Match against CmpInst LHS being instructions other than binary operators.
2880
2881  if (auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0))) {
2882    // For now, we only support constant integers while folding the
2883    // ICMP(SELECT)) pattern. We can extend this to support vector of integers
2884    // similar to the cases handled by binary ops above.
2885    if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1)))
2886      if (Instruction *I = foldICmpSelectConstant(Cmp, SI, ConstRHS))
2887        return I;
2888  }
2889
2890  if (auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0))) {
2891    if (Instruction *I = foldICmpTruncConstant(Cmp, TI, *C))
2892      return I;
2893  }
2894
2895  if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0)))
2896    if (Instruction *I = foldICmpIntrinsicWithConstant(Cmp, II, *C))
2897      return I;
2898
2899  return nullptr;
2900}
2901
2902/// Fold an icmp equality instruction with binary operator LHS and constant RHS:
2903/// icmp eq/ne BO, C.
2904Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
2905                                                             BinaryOperator *BO,
2906                                                             const APInt &C) {
2907  // TODO: Some of these folds could work with arbitrary constants, but this
2908  // function is limited to scalar and vector splat constants.
2909  if (!Cmp.isEquality())
2910    return nullptr;
2911
2912  ICmpInst::Predicate Pred = Cmp.getPredicate();
2913  bool isICMP_NE = Pred == ICmpInst::ICMP_NE;
2914  Constant *RHS = cast<Constant>(Cmp.getOperand(1));
2915  Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
2916
2917  switch (BO->getOpcode()) {
2918  case Instruction::SRem:
2919    // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
2920    if (C.isNullValue() && BO->hasOneUse()) {
2921      const APInt *BOC;
2922      if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) {
2923        Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName());
2924        return new ICmpInst(Pred, NewRem,
2925                            Constant::getNullValue(BO->getType()));
2926      }
2927    }
2928    break;
2929  case Instruction::Add: {
2930    // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
2931    const APInt *BOC;
2932    if (match(BOp1, m_APInt(BOC))) {
2933      if (BO->hasOneUse()) {
2934        Constant *SubC = ConstantExpr::getSub(RHS, cast<Constant>(BOp1));
2935        return new ICmpInst(Pred, BOp0, SubC);
2936      }
2937    } else if (C.isNullValue()) {
2938      // Replace ((add A, B) != 0) with (A != -B) if A or B is
2939      // efficiently invertible, or if the add has just this one use.
2940      if (Value *NegVal = dyn_castNegVal(BOp1))
2941        return new ICmpInst(Pred, BOp0, NegVal);
2942      if (Value *NegVal = dyn_castNegVal(BOp0))
2943        return new ICmpInst(Pred, NegVal, BOp1);
2944      if (BO->hasOneUse()) {
2945        Value *Neg = Builder.CreateNeg(BOp1);
2946        Neg->takeName(BO);
2947        return new ICmpInst(Pred, BOp0, Neg);
2948      }
2949    }
2950    break;
2951  }
2952  case Instruction::Xor:
2953    if (BO->hasOneUse()) {
2954      if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
2955        // For the xor case, we can xor two constants together, eliminating
2956        // the explicit xor.
2957        return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC));
2958      } else if (C.isNullValue()) {
2959        // Replace ((xor A, B) != 0) with (A != B)
2960        return new ICmpInst(Pred, BOp0, BOp1);
2961      }
2962    }
2963    break;
2964  case Instruction::Sub:
2965    if (BO->hasOneUse()) {
2966      const APInt *BOC;
2967      if (match(BOp0, m_APInt(BOC))) {
2968        // Replace ((sub BOC, B) != C) with (B != BOC-C).
2969        Constant *SubC = ConstantExpr::getSub(cast<Constant>(BOp0), RHS);
2970        return new ICmpInst(Pred, BOp1, SubC);
2971      } else if (C.isNullValue()) {
2972        // Replace ((sub A, B) != 0) with (A != B).
2973        return new ICmpInst(Pred, BOp0, BOp1);
2974      }
2975    }
2976    break;
2977  case Instruction::Or: {
2978    const APInt *BOC;
2979    if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) {
2980      // Comparing if all bits outside of a constant mask are set?
2981      // Replace (X | C) == -1 with (X & ~C) == ~C.
2982      // This removes the -1 constant.
2983      Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1));
2984      Value *And = Builder.CreateAnd(BOp0, NotBOC);
2985      return new ICmpInst(Pred, And, NotBOC);
2986    }
2987    break;
2988  }
2989  case Instruction::And: {
2990    const APInt *BOC;
2991    if (match(BOp1, m_APInt(BOC))) {
2992      // If we have ((X & C) == C), turn it into ((X & C) != 0).
2993      if (C == *BOC && C.isPowerOf2())
2994        return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE,
2995                            BO, Constant::getNullValue(RHS->getType()));
2996    }
2997    break;
2998  }
2999  case Instruction::Mul:
3000    if (C.isNullValue() && BO->hasNoSignedWrap()) {
3001      const APInt *BOC;
3002      if (match(BOp1, m_APInt(BOC)) && !BOC->isNullValue()) {
3003        // The trivial case (mul X, 0) is handled by InstSimplify.
3004        // General case : (mul X, C) != 0 iff X != 0
3005        //                (mul X, C) == 0 iff X == 0
3006        return new ICmpInst(Pred, BOp0, Constant::getNullValue(RHS->getType()));
3007      }
3008    }
3009    break;
3010  case Instruction::UDiv:
3011    if (C.isNullValue()) {
3012      // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A)
3013      auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
3014      return new ICmpInst(NewPred, BOp1, BOp0);
3015    }
3016    break;
3017  default:
3018    break;
3019  }
3020  return nullptr;
3021}
3022
3023/// Fold an equality icmp with LLVM intrinsic and constant operand.
3024Instruction *InstCombiner::foldICmpEqIntrinsicWithConstant(ICmpInst &Cmp,
3025                                                           IntrinsicInst *II,
3026                                                           const APInt &C) {
3027  Type *Ty = II->getType();
3028  unsigned BitWidth = C.getBitWidth();
3029  switch (II->getIntrinsicID()) {
3030  case Intrinsic::bswap:
3031    Worklist.Add(II);
3032    Cmp.setOperand(0, II->getArgOperand(0));
3033    Cmp.setOperand(1, ConstantInt::get(Ty, C.byteSwap()));
3034    return &Cmp;
3035
3036  case Intrinsic::ctlz:
3037  case Intrinsic::cttz: {
3038    // ctz(A) == bitwidth(A)  ->  A == 0 and likewise for !=
3039    if (C == BitWidth) {
3040      Worklist.Add(II);
3041      Cmp.setOperand(0, II->getArgOperand(0));
3042      Cmp.setOperand(1, ConstantInt::getNullValue(Ty));
3043      return &Cmp;
3044    }
3045
3046    // ctz(A) == C -> A & Mask1 == Mask2, where Mask2 only has bit C set
3047    // and Mask1 has bits 0..C+1 set. Similar for ctl, but for high bits.
3048    // Limit to one use to ensure we don't increase instruction count.
3049    unsigned Num = C.getLimitedValue(BitWidth);
3050    if (Num != BitWidth && II->hasOneUse()) {
3051      bool IsTrailing = II->getIntrinsicID() == Intrinsic::cttz;
3052      APInt Mask1 = IsTrailing ? APInt::getLowBitsSet(BitWidth, Num + 1)
3053                               : APInt::getHighBitsSet(BitWidth, Num + 1);
3054      APInt Mask2 = IsTrailing
3055        ? APInt::getOneBitSet(BitWidth, Num)
3056        : APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
3057      Cmp.setOperand(0, Builder.CreateAnd(II->getArgOperand(0), Mask1));
3058      Cmp.setOperand(1, ConstantInt::get(Ty, Mask2));
3059      Worklist.Add(II);
3060      return &Cmp;
3061    }
3062    break;
3063  }
3064
3065  case Intrinsic::ctpop: {
3066    // popcount(A) == 0  ->  A == 0 and likewise for !=
3067    // popcount(A) == bitwidth(A)  ->  A == -1 and likewise for !=
3068    bool IsZero = C.isNullValue();
3069    if (IsZero || C == BitWidth) {
3070      Worklist.Add(II);
3071      Cmp.setOperand(0, II->getArgOperand(0));
3072      auto *NewOp =
3073          IsZero ? Constant::getNullValue(Ty) : Constant::getAllOnesValue(Ty);
3074      Cmp.setOperand(1, NewOp);
3075      return &Cmp;
3076    }
3077    break;
3078  }
3079
3080  case Intrinsic::uadd_sat: {
3081    // uadd.sat(a, b) == 0  ->  (a | b) == 0
3082    if (C.isNullValue()) {
3083      Value *Or = Builder.CreateOr(II->getArgOperand(0), II->getArgOperand(1));
3084      return replaceInstUsesWith(Cmp, Builder.CreateICmp(
3085          Cmp.getPredicate(), Or, Constant::getNullValue(Ty)));
3086
3087    }
3088    break;
3089  }
3090
3091  case Intrinsic::usub_sat: {
3092    // usub.sat(a, b) == 0  ->  a <= b
3093    if (C.isNullValue()) {
3094      ICmpInst::Predicate NewPred = Cmp.getPredicate() == ICmpInst::ICMP_EQ
3095          ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
3096      return ICmpInst::Create(Instruction::ICmp, NewPred,
3097                              II->getArgOperand(0), II->getArgOperand(1));
3098    }
3099    break;
3100  }
3101  default:
3102    break;
3103  }
3104
3105  return nullptr;
3106}
3107
3108/// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
3109Instruction *InstCombiner::foldICmpIntrinsicWithConstant(ICmpInst &Cmp,
3110                                                         IntrinsicInst *II,
3111                                                         const APInt &C) {
3112  if (Cmp.isEquality())
3113    return foldICmpEqIntrinsicWithConstant(Cmp, II, C);
3114
3115  Type *Ty = II->getType();
3116  unsigned BitWidth = C.getBitWidth();
3117  switch (II->getIntrinsicID()) {
3118  case Intrinsic::ctlz: {
3119    // ctlz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX < 0b00010000
3120    if (Cmp.getPredicate() == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
3121      unsigned Num = C.getLimitedValue();
3122      APInt Limit = APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
3123      return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_ULT,
3124                             II->getArgOperand(0), ConstantInt::get(Ty, Limit));
3125    }
3126
3127    // ctlz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX > 0b00011111
3128    if (Cmp.getPredicate() == ICmpInst::ICMP_ULT &&
3129        C.uge(1) && C.ule(BitWidth)) {
3130      unsigned Num = C.getLimitedValue();
3131      APInt Limit = APInt::getLowBitsSet(BitWidth, BitWidth - Num);
3132      return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_UGT,
3133                             II->getArgOperand(0), ConstantInt::get(Ty, Limit));
3134    }
3135    break;
3136  }
3137  case Intrinsic::cttz: {
3138    // Limit to one use to ensure we don't increase instruction count.
3139    if (!II->hasOneUse())
3140      return nullptr;
3141
3142    // cttz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX & 0b00001111 == 0
3143    if (Cmp.getPredicate() == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
3144      APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue() + 1);
3145      return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ,
3146                             Builder.CreateAnd(II->getArgOperand(0), Mask),
3147                             ConstantInt::getNullValue(Ty));
3148    }
3149
3150    // cttz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX & 0b00000111 != 0
3151    if (Cmp.getPredicate() == ICmpInst::ICMP_ULT &&
3152        C.uge(1) && C.ule(BitWidth)) {
3153      APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue());
3154      return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE,
3155                             Builder.CreateAnd(II->getArgOperand(0), Mask),
3156                             ConstantInt::getNullValue(Ty));
3157    }
3158    break;
3159  }
3160  default:
3161    break;
3162  }
3163
3164  return nullptr;
3165}
3166
3167/// Handle icmp with constant (but not simple integer constant) RHS.
3168Instruction *InstCombiner::foldICmpInstWithConstantNotInt(ICmpInst &I) {
3169  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3170  Constant *RHSC = dyn_cast<Constant>(Op1);
3171  Instruction *LHSI = dyn_cast<Instruction>(Op0);
3172  if (!RHSC || !LHSI)
3173    return nullptr;
3174
3175  switch (LHSI->getOpcode()) {
3176  case Instruction::GetElementPtr:
3177    // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
3178    if (RHSC->isNullValue() &&
3179        cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices())
3180      return new ICmpInst(
3181          I.getPredicate(), LHSI->getOperand(0),
3182          Constant::getNullValue(LHSI->getOperand(0)->getType()));
3183    break;
3184  case Instruction::PHI:
3185    // Only fold icmp into the PHI if the phi and icmp are in the same
3186    // block.  If in the same block, we're encouraging jump threading.  If
3187    // not, we are just pessimizing the code by making an i1 phi.
3188    if (LHSI->getParent() == I.getParent())
3189      if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
3190        return NV;
3191    break;
3192  case Instruction::Select: {
3193    // If either operand of the select is a constant, we can fold the
3194    // comparison into the select arms, which will cause one to be
3195    // constant folded and the select turned into a bitwise or.
3196    Value *Op1 = nullptr, *Op2 = nullptr;
3197    ConstantInt *CI = nullptr;
3198    if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
3199      Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
3200      CI = dyn_cast<ConstantInt>(Op1);
3201    }
3202    if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
3203      Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
3204      CI = dyn_cast<ConstantInt>(Op2);
3205    }
3206
3207    // We only want to perform this transformation if it will not lead to
3208    // additional code. This is true if either both sides of the select
3209    // fold to a constant (in which case the icmp is replaced with a select
3210    // which will usually simplify) or this is the only user of the
3211    // select (in which case we are trading a select+icmp for a simpler
3212    // select+icmp) or all uses of the select can be replaced based on
3213    // dominance information ("Global cases").
3214    bool Transform = false;
3215    if (Op1 && Op2)
3216      Transform = true;
3217    else if (Op1 || Op2) {
3218      // Local case
3219      if (LHSI->hasOneUse())
3220        Transform = true;
3221      // Global cases
3222      else if (CI && !CI->isZero())
3223        // When Op1 is constant try replacing select with second operand.
3224        // Otherwise Op2 is constant and try replacing select with first
3225        // operand.
3226        Transform =
3227            replacedSelectWithOperand(cast<SelectInst>(LHSI), &I, Op1 ? 2 : 1);
3228    }
3229    if (Transform) {
3230      if (!Op1)
3231        Op1 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC,
3232                                 I.getName());
3233      if (!Op2)
3234        Op2 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC,
3235                                 I.getName());
3236      return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
3237    }
3238    break;
3239  }
3240  case Instruction::IntToPtr:
3241    // icmp pred inttoptr(X), null -> icmp pred X, 0
3242    if (RHSC->isNullValue() &&
3243        DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType())
3244      return new ICmpInst(
3245          I.getPredicate(), LHSI->getOperand(0),
3246          Constant::getNullValue(LHSI->getOperand(0)->getType()));
3247    break;
3248
3249  case Instruction::Load:
3250    // Try to optimize things like "A[i] > 4" to index computations.
3251    if (GetElementPtrInst *GEP =
3252            dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
3253      if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
3254        if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
3255            !cast<LoadInst>(LHSI)->isVolatile())
3256          if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
3257            return Res;
3258    }
3259    break;
3260  }
3261
3262  return nullptr;
3263}
3264
3265/// Some comparisons can be simplified.
3266/// In this case, we are looking for comparisons that look like
3267/// a check for a lossy truncation.
3268/// Folds:
3269///   icmp SrcPred (x & Mask), x    to    icmp DstPred x, Mask
3270/// Where Mask is some pattern that produces all-ones in low bits:
3271///    (-1 >> y)
3272///    ((-1 << y) >> y)     <- non-canonical, has extra uses
3273///   ~(-1 << y)
3274///    ((1 << y) + (-1))    <- non-canonical, has extra uses
3275/// The Mask can be a constant, too.
3276/// For some predicates, the operands are commutative.
3277/// For others, x can only be on a specific side.
3278static Value *foldICmpWithLowBitMaskedVal(ICmpInst &I,
3279                                          InstCombiner::BuilderTy &Builder) {
3280  ICmpInst::Predicate SrcPred;
3281  Value *X, *M, *Y;
3282  auto m_VariableMask = m_CombineOr(
3283      m_CombineOr(m_Not(m_Shl(m_AllOnes(), m_Value())),
3284                  m_Add(m_Shl(m_One(), m_Value()), m_AllOnes())),
3285      m_CombineOr(m_LShr(m_AllOnes(), m_Value()),
3286                  m_LShr(m_Shl(m_AllOnes(), m_Value(Y)), m_Deferred(Y))));
3287  auto m_Mask = m_CombineOr(m_VariableMask, m_LowBitMask());
3288  if (!match(&I, m_c_ICmp(SrcPred,
3289                          m_c_And(m_CombineAnd(m_Mask, m_Value(M)), m_Value(X)),
3290                          m_Deferred(X))))
3291    return nullptr;
3292
3293  ICmpInst::Predicate DstPred;
3294  switch (SrcPred) {
3295  case ICmpInst::Predicate::ICMP_EQ:
3296    //  x & (-1 >> y) == x    ->    x u<= (-1 >> y)
3297    DstPred = ICmpInst::Predicate::ICMP_ULE;
3298    break;
3299  case ICmpInst::Predicate::ICMP_NE:
3300    //  x & (-1 >> y) != x    ->    x u> (-1 >> y)
3301    DstPred = ICmpInst::Predicate::ICMP_UGT;
3302    break;
3303  case ICmpInst::Predicate::ICMP_UGT:
3304    //  x u> x & (-1 >> y)    ->    x u> (-1 >> y)
3305    assert(X == I.getOperand(0) && "instsimplify took care of commut. variant");
3306    DstPred = ICmpInst::Predicate::ICMP_UGT;
3307    break;
3308  case ICmpInst::Predicate::ICMP_UGE:
3309    //  x & (-1 >> y) u>= x    ->    x u<= (-1 >> y)
3310    assert(X == I.getOperand(1) && "instsimplify took care of commut. variant");
3311    DstPred = ICmpInst::Predicate::ICMP_ULE;
3312    break;
3313  case ICmpInst::Predicate::ICMP_ULT:
3314    //  x & (-1 >> y) u< x    ->    x u> (-1 >> y)
3315    assert(X == I.getOperand(1) && "instsimplify took care of commut. variant");
3316    DstPred = ICmpInst::Predicate::ICMP_UGT;
3317    break;
3318  case ICmpInst::Predicate::ICMP_ULE:
3319    //  x u<= x & (-1 >> y)    ->    x u<= (-1 >> y)
3320    assert(X == I.getOperand(0) && "instsimplify took care of commut. variant");
3321    DstPred = ICmpInst::Predicate::ICMP_ULE;
3322    break;
3323  case ICmpInst::Predicate::ICMP_SGT:
3324    //  x s> x & (-1 >> y)    ->    x s> (-1 >> y)
3325    if (X != I.getOperand(0)) // X must be on LHS of comparison!
3326      return nullptr;         // Ignore the other case.
3327    if (!match(M, m_Constant())) // Can not do this fold with non-constant.
3328      return nullptr;
3329    if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
3330      return nullptr;
3331    DstPred = ICmpInst::Predicate::ICMP_SGT;
3332    break;
3333  case ICmpInst::Predicate::ICMP_SGE:
3334    //  x & (-1 >> y) s>= x    ->    x s<= (-1 >> y)
3335    if (X != I.getOperand(1)) // X must be on RHS of comparison!
3336      return nullptr;         // Ignore the other case.
3337    if (!match(M, m_Constant())) // Can not do this fold with non-constant.
3338      return nullptr;
3339    if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
3340      return nullptr;
3341    DstPred = ICmpInst::Predicate::ICMP_SLE;
3342    break;
3343  case ICmpInst::Predicate::ICMP_SLT:
3344    //  x & (-1 >> y) s< x    ->    x s> (-1 >> y)
3345    if (X != I.getOperand(1)) // X must be on RHS of comparison!
3346      return nullptr;         // Ignore the other case.
3347    if (!match(M, m_Constant())) // Can not do this fold with non-constant.
3348      return nullptr;
3349    if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
3350      return nullptr;
3351    DstPred = ICmpInst::Predicate::ICMP_SGT;
3352    break;
3353  case ICmpInst::Predicate::ICMP_SLE:
3354    //  x s<= x & (-1 >> y)    ->    x s<= (-1 >> y)
3355    if (X != I.getOperand(0)) // X must be on LHS of comparison!
3356      return nullptr;         // Ignore the other case.
3357    if (!match(M, m_Constant())) // Can not do this fold with non-constant.
3358      return nullptr;
3359    if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
3360      return nullptr;
3361    DstPred = ICmpInst::Predicate::ICMP_SLE;
3362    break;
3363  default:
3364    llvm_unreachable("All possible folds are handled.");
3365  }
3366
3367  // The mask value may be a vector constant that has undefined elements. But it
3368  // may not be safe to propagate those undefs into the new compare, so replace
3369  // those elements by copying an existing, defined, and safe scalar constant.
3370  Type *OpTy = M->getType();
3371  auto *VecC = dyn_cast<Constant>(M);
3372  if (OpTy->isVectorTy() && VecC && VecC->containsUndefElement()) {
3373    Constant *SafeReplacementConstant = nullptr;
3374    for (unsigned i = 0, e = OpTy->getVectorNumElements(); i != e; ++i) {
3375      if (!isa<UndefValue>(VecC->getAggregateElement(i))) {
3376        SafeReplacementConstant = VecC->getAggregateElement(i);
3377        break;
3378      }
3379    }
3380    assert(SafeReplacementConstant && "Failed to find undef replacement");
3381    M = Constant::replaceUndefsWith(VecC, SafeReplacementConstant);
3382  }
3383
3384  return Builder.CreateICmp(DstPred, X, M);
3385}
3386
3387/// Some comparisons can be simplified.
3388/// In this case, we are looking for comparisons that look like
3389/// a check for a lossy signed truncation.
3390/// Folds:   (MaskedBits is a constant.)
3391///   ((%x << MaskedBits) a>> MaskedBits) SrcPred %x
3392/// Into:
3393///   (add %x, (1 << (KeptBits-1))) DstPred (1 << KeptBits)
3394/// Where  KeptBits = bitwidth(%x) - MaskedBits
3395static Value *
3396foldICmpWithTruncSignExtendedVal(ICmpInst &I,
3397                                 InstCombiner::BuilderTy &Builder) {
3398  ICmpInst::Predicate SrcPred;
3399  Value *X;
3400  const APInt *C0, *C1; // FIXME: non-splats, potentially with undef.
3401  // We are ok with 'shl' having multiple uses, but 'ashr' must be one-use.
3402  if (!match(&I, m_c_ICmp(SrcPred,
3403                          m_OneUse(m_AShr(m_Shl(m_Value(X), m_APInt(C0)),
3404                                          m_APInt(C1))),
3405                          m_Deferred(X))))
3406    return nullptr;
3407
3408  // Potential handling of non-splats: for each element:
3409  //  * if both are undef, replace with constant 0.
3410  //    Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0.
3411  //  * if both are not undef, and are different, bailout.
3412  //  * else, only one is undef, then pick the non-undef one.
3413
3414  // The shift amount must be equal.
3415  if (*C0 != *C1)
3416    return nullptr;
3417  const APInt &MaskedBits = *C0;
3418  assert(MaskedBits != 0 && "shift by zero should be folded away already.");
3419
3420  ICmpInst::Predicate DstPred;
3421  switch (SrcPred) {
3422  case ICmpInst::Predicate::ICMP_EQ:
3423    // ((%x << MaskedBits) a>> MaskedBits) == %x
3424    //   =>
3425    // (add %x, (1 << (KeptBits-1))) u< (1 << KeptBits)
3426    DstPred = ICmpInst::Predicate::ICMP_ULT;
3427    break;
3428  case ICmpInst::Predicate::ICMP_NE:
3429    // ((%x << MaskedBits) a>> MaskedBits) != %x
3430    //   =>
3431    // (add %x, (1 << (KeptBits-1))) u>= (1 << KeptBits)
3432    DstPred = ICmpInst::Predicate::ICMP_UGE;
3433    break;
3434  // FIXME: are more folds possible?
3435  default:
3436    return nullptr;
3437  }
3438
3439  auto *XType = X->getType();
3440  const unsigned XBitWidth = XType->getScalarSizeInBits();
3441  const APInt BitWidth = APInt(XBitWidth, XBitWidth);
3442  assert(BitWidth.ugt(MaskedBits) && "shifts should leave some bits untouched");
3443
3444  // KeptBits = bitwidth(%x) - MaskedBits
3445  const APInt KeptBits = BitWidth - MaskedBits;
3446  assert(KeptBits.ugt(0) && KeptBits.ult(BitWidth) && "unreachable");
3447  // ICmpCst = (1 << KeptBits)
3448  const APInt ICmpCst = APInt(XBitWidth, 1).shl(KeptBits);
3449  assert(ICmpCst.isPowerOf2());
3450  // AddCst = (1 << (KeptBits-1))
3451  const APInt AddCst = ICmpCst.lshr(1);
3452  assert(AddCst.ult(ICmpCst) && AddCst.isPowerOf2());
3453
3454  // T0 = add %x, AddCst
3455  Value *T0 = Builder.CreateAdd(X, ConstantInt::get(XType, AddCst));
3456  // T1 = T0 DstPred ICmpCst
3457  Value *T1 = Builder.CreateICmp(DstPred, T0, ConstantInt::get(XType, ICmpCst));
3458
3459  return T1;
3460}
3461
3462// Given pattern:
3463//   icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
3464// we should move shifts to the same hand of 'and', i.e. rewrite as
3465//   icmp eq/ne (and (x shift (Q+K)), y), 0  iff (Q+K) u< bitwidth(x)
3466// We are only interested in opposite logical shifts here.
3467// One of the shifts can be truncated.
3468// If we can, we want to end up creating 'lshr' shift.
3469static Value *
3470foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ,
3471                                           InstCombiner::BuilderTy &Builder) {
3472  if (!I.isEquality() || !match(I.getOperand(1), m_Zero()) ||
3473      !I.getOperand(0)->hasOneUse())
3474    return nullptr;
3475
3476  auto m_AnyLogicalShift = m_LogicalShift(m_Value(), m_Value());
3477
3478  // Look for an 'and' of two logical shifts, one of which may be truncated.
3479  // We use m_TruncOrSelf() on the RHS to correctly handle commutative case.
3480  Instruction *XShift, *MaybeTruncation, *YShift;
3481  if (!match(
3482          I.getOperand(0),
3483          m_c_And(m_CombineAnd(m_AnyLogicalShift, m_Instruction(XShift)),
3484                  m_CombineAnd(m_TruncOrSelf(m_CombineAnd(
3485                                   m_AnyLogicalShift, m_Instruction(YShift))),
3486                               m_Instruction(MaybeTruncation)))))
3487    return nullptr;
3488
3489  // We potentially looked past 'trunc', but only when matching YShift,
3490  // therefore YShift must have the widest type.
3491  Instruction *WidestShift = YShift;
3492  // Therefore XShift must have the shallowest type.
3493  // Or they both have identical types if there was no truncation.
3494  Instruction *NarrowestShift = XShift;
3495
3496  Type *WidestTy = WidestShift->getType();
3497  Type *NarrowestTy = NarrowestShift->getType();
3498  assert(NarrowestTy == I.getOperand(0)->getType() &&
3499         "We did not look past any shifts while matching XShift though.");
3500  bool HadTrunc = WidestTy != I.getOperand(0)->getType();
3501
3502  // If YShift is a 'lshr', swap the shifts around.
3503  if (match(YShift, m_LShr(m_Value(), m_Value())))
3504    std::swap(XShift, YShift);
3505
3506  // The shifts must be in opposite directions.
3507  auto XShiftOpcode = XShift->getOpcode();
3508  if (XShiftOpcode == YShift->getOpcode())
3509    return nullptr; // Do not care about same-direction shifts here.
3510
3511  Value *X, *XShAmt, *Y, *YShAmt;
3512  match(XShift, m_BinOp(m_Value(X), m_ZExtOrSelf(m_Value(XShAmt))));
3513  match(YShift, m_BinOp(m_Value(Y), m_ZExtOrSelf(m_Value(YShAmt))));
3514
3515  // If one of the values being shifted is a constant, then we will end with
3516  // and+icmp, and [zext+]shift instrs will be constant-folded. If they are not,
3517  // however, we will need to ensure that we won't increase instruction count.
3518  if (!isa<Constant>(X) && !isa<Constant>(Y)) {
3519    // At least one of the hands of the 'and' should be one-use shift.
3520    if (!match(I.getOperand(0),
3521               m_c_And(m_OneUse(m_AnyLogicalShift), m_Value())))
3522      return nullptr;
3523    if (HadTrunc) {
3524      // Due to the 'trunc', we will need to widen X. For that either the old
3525      // 'trunc' or the shift amt in the non-truncated shift should be one-use.
3526      if (!MaybeTruncation->hasOneUse() &&
3527          !NarrowestShift->getOperand(1)->hasOneUse())
3528        return nullptr;
3529    }
3530  }
3531
3532  // We have two shift amounts from two different shifts. The types of those
3533  // shift amounts may not match. If that's the case let's bailout now.
3534  if (XShAmt->getType() != YShAmt->getType())
3535    return nullptr;
3536
3537  // As input, we have the following pattern:
3538  //   icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
3539  // We want to rewrite that as:
3540  //   icmp eq/ne (and (x shift (Q+K)), y), 0  iff (Q+K) u< bitwidth(x)
3541  // While we know that originally (Q+K) would not overflow
3542  // (because  2 * (N-1) u<= iN -1), we have looked past extensions of
3543  // shift amounts. so it may now overflow in smaller bitwidth.
3544  // To ensure that does not happen, we need to ensure that the total maximal
3545  // shift amount is still representable in that smaller bit width.
3546  unsigned MaximalPossibleTotalShiftAmount =
3547      (WidestTy->getScalarSizeInBits() - 1) +
3548      (NarrowestTy->getScalarSizeInBits() - 1);
3549  APInt MaximalRepresentableShiftAmount =
3550      APInt::getAllOnesValue(XShAmt->getType()->getScalarSizeInBits());
3551  if (MaximalRepresentableShiftAmount.ult(MaximalPossibleTotalShiftAmount))
3552    return nullptr;
3553
3554  // Can we fold (XShAmt+YShAmt) ?
3555  auto *NewShAmt = dyn_cast_or_null<Constant>(
3556      SimplifyAddInst(XShAmt, YShAmt, /*isNSW=*/false,
3557                      /*isNUW=*/false, SQ.getWithInstruction(&I)));
3558  if (!NewShAmt)
3559    return nullptr;
3560  NewShAmt = ConstantExpr::getZExtOrBitCast(NewShAmt, WidestTy);
3561  unsigned WidestBitWidth = WidestTy->getScalarSizeInBits();
3562
3563  // Is the new shift amount smaller than the bit width?
3564  // FIXME: could also rely on ConstantRange.
3565  if (!match(NewShAmt,
3566             m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_ULT,
3567                                APInt(WidestBitWidth, WidestBitWidth))))
3568    return nullptr;
3569
3570  // An extra legality check is needed if we had trunc-of-lshr.
3571  if (HadTrunc && match(WidestShift, m_LShr(m_Value(), m_Value()))) {
3572    auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ,
3573                    WidestShift]() {
3574      // It isn't obvious whether it's worth it to analyze non-constants here.
3575      // Also, let's basically give up on non-splat cases, pessimizing vectors.
3576      // If *any* of these preconditions matches we can perform the fold.
3577      Constant *NewShAmtSplat = NewShAmt->getType()->isVectorTy()
3578                                    ? NewShAmt->getSplatValue()
3579                                    : NewShAmt;
3580      // If it's edge-case shift (by 0 or by WidestBitWidth-1) we can fold.
3581      if (NewShAmtSplat &&
3582          (NewShAmtSplat->isNullValue() ||
3583           NewShAmtSplat->getUniqueInteger() == WidestBitWidth - 1))
3584        return true;
3585      // We consider *min* leading zeros so a single outlier
3586      // blocks the transform as opposed to allowing it.
3587      if (auto *C = dyn_cast<Constant>(NarrowestShift->getOperand(0))) {
3588        KnownBits Known = computeKnownBits(C, SQ.DL);
3589        unsigned MinLeadZero = Known.countMinLeadingZeros();
3590        // If the value being shifted has at most lowest bit set we can fold.
3591        unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
3592        if (MaxActiveBits <= 1)
3593          return true;
3594        // Precondition:  NewShAmt u<= countLeadingZeros(C)
3595        if (NewShAmtSplat && NewShAmtSplat->getUniqueInteger().ule(MinLeadZero))
3596          return true;
3597      }
3598      if (auto *C = dyn_cast<Constant>(WidestShift->getOperand(0))) {
3599        KnownBits Known = computeKnownBits(C, SQ.DL);
3600        unsigned MinLeadZero = Known.countMinLeadingZeros();
3601        // If the value being shifted has at most lowest bit set we can fold.
3602        unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
3603        if (MaxActiveBits <= 1)
3604          return true;
3605        // Precondition:  ((WidestBitWidth-1)-NewShAmt) u<= countLeadingZeros(C)
3606        if (NewShAmtSplat) {
3607          APInt AdjNewShAmt =
3608              (WidestBitWidth - 1) - NewShAmtSplat->getUniqueInteger();
3609          if (AdjNewShAmt.ule(MinLeadZero))
3610            return true;
3611        }
3612      }
3613      return false; // Can't tell if it's ok.
3614    };
3615    if (!CanFold())
3616      return nullptr;
3617  }
3618
3619  // All good, we can do this fold.
3620  X = Builder.CreateZExt(X, WidestTy);
3621  Y = Builder.CreateZExt(Y, WidestTy);
3622  // The shift is the same that was for X.
3623  Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
3624                  ? Builder.CreateLShr(X, NewShAmt)
3625                  : Builder.CreateShl(X, NewShAmt);
3626  Value *T1 = Builder.CreateAnd(T0, Y);
3627  return Builder.CreateICmp(I.getPredicate(), T1,
3628                            Constant::getNullValue(WidestTy));
3629}
3630
3631/// Fold
3632///   (-1 u/ x) u< y
3633///   ((x * y) u/ x) != y
3634/// to
3635///   @llvm.umul.with.overflow(x, y) plus extraction of overflow bit
3636/// Note that the comparison is commutative, while inverted (u>=, ==) predicate
3637/// will mean that we are looking for the opposite answer.
3638Value *InstCombiner::foldUnsignedMultiplicationOverflowCheck(ICmpInst &I) {
3639  ICmpInst::Predicate Pred;
3640  Value *X, *Y;
3641  Instruction *Mul;
3642  bool NeedNegation;
3643  // Look for: (-1 u/ x) u</u>= y
3644  if (!I.isEquality() &&
3645      match(&I, m_c_ICmp(Pred, m_OneUse(m_UDiv(m_AllOnes(), m_Value(X))),
3646                         m_Value(Y)))) {
3647    Mul = nullptr;
3648    // Canonicalize as-if y was on RHS.
3649    if (I.getOperand(1) != Y)
3650      Pred = I.getSwappedPredicate();
3651
3652    // Are we checking that overflow does not happen, or does happen?
3653    switch (Pred) {
3654    case ICmpInst::Predicate::ICMP_ULT:
3655      NeedNegation = false;
3656      break; // OK
3657    case ICmpInst::Predicate::ICMP_UGE:
3658      NeedNegation = true;
3659      break; // OK
3660    default:
3661      return nullptr; // Wrong predicate.
3662    }
3663  } else // Look for: ((x * y) u/ x) !=/== y
3664      if (I.isEquality() &&
3665          match(&I, m_c_ICmp(Pred, m_Value(Y),
3666                             m_OneUse(m_UDiv(m_CombineAnd(m_c_Mul(m_Deferred(Y),
3667                                                                  m_Value(X)),
3668                                                          m_Instruction(Mul)),
3669                                             m_Deferred(X)))))) {
3670    NeedNegation = Pred == ICmpInst::Predicate::ICMP_EQ;
3671  } else
3672    return nullptr;
3673
3674  BuilderTy::InsertPointGuard Guard(Builder);
3675  // If the pattern included (x * y), we'll want to insert new instructions
3676  // right before that original multiplication so that we can replace it.
3677  bool MulHadOtherUses = Mul && !Mul->hasOneUse();
3678  if (MulHadOtherUses)
3679    Builder.SetInsertPoint(Mul);
3680
3681  Function *F = Intrinsic::getDeclaration(
3682      I.getModule(), Intrinsic::umul_with_overflow, X->getType());
3683  CallInst *Call = Builder.CreateCall(F, {X, Y}, "umul");
3684
3685  // If the multiplication was used elsewhere, to ensure that we don't leave
3686  // "duplicate" instructions, replace uses of that original multiplication
3687  // with the multiplication result from the with.overflow intrinsic.
3688  if (MulHadOtherUses)
3689    replaceInstUsesWith(*Mul, Builder.CreateExtractValue(Call, 0, "umul.val"));
3690
3691  Value *Res = Builder.CreateExtractValue(Call, 1, "umul.ov");
3692  if (NeedNegation) // This technically increases instruction count.
3693    Res = Builder.CreateNot(Res, "umul.not.ov");
3694
3695  return Res;
3696}
3697
3698/// Try to fold icmp (binop), X or icmp X, (binop).
3699/// TODO: A large part of this logic is duplicated in InstSimplify's
3700/// simplifyICmpWithBinOp(). We should be able to share that and avoid the code
3701/// duplication.
3702Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I, const SimplifyQuery &SQ) {
3703  const SimplifyQuery Q = SQ.getWithInstruction(&I);
3704  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3705
3706  // Special logic for binary operators.
3707  BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0);
3708  BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1);
3709  if (!BO0 && !BO1)
3710    return nullptr;
3711
3712  const CmpInst::Predicate Pred = I.getPredicate();
3713  Value *X;
3714
3715  // Convert add-with-unsigned-overflow comparisons into a 'not' with compare.
3716  // (Op1 + X) u</u>= Op1 --> ~Op1 u</u>= X
3717  if (match(Op0, m_OneUse(m_c_Add(m_Specific(Op1), m_Value(X)))) &&
3718      (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
3719    return new ICmpInst(Pred, Builder.CreateNot(Op1), X);
3720  // Op0 u>/u<= (Op0 + X) --> X u>/u<= ~Op0
3721  if (match(Op1, m_OneUse(m_c_Add(m_Specific(Op0), m_Value(X)))) &&
3722      (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
3723    return new ICmpInst(Pred, X, Builder.CreateNot(Op0));
3724
3725  bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
3726  if (BO0 && isa<OverflowingBinaryOperator>(BO0))
3727    NoOp0WrapProblem =
3728        ICmpInst::isEquality(Pred) ||
3729        (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) ||
3730        (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap());
3731  if (BO1 && isa<OverflowingBinaryOperator>(BO1))
3732    NoOp1WrapProblem =
3733        ICmpInst::isEquality(Pred) ||
3734        (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) ||
3735        (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap());
3736
3737  // Analyze the case when either Op0 or Op1 is an add instruction.
3738  // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
3739  Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
3740  if (BO0 && BO0->getOpcode() == Instruction::Add) {
3741    A = BO0->getOperand(0);
3742    B = BO0->getOperand(1);
3743  }
3744  if (BO1 && BO1->getOpcode() == Instruction::Add) {
3745    C = BO1->getOperand(0);
3746    D = BO1->getOperand(1);
3747  }
3748
3749  // icmp (A+B), A -> icmp B, 0 for equalities or if there is no overflow.
3750  // icmp (A+B), B -> icmp A, 0 for equalities or if there is no overflow.
3751  if ((A == Op1 || B == Op1) && NoOp0WrapProblem)
3752    return new ICmpInst(Pred, A == Op1 ? B : A,
3753                        Constant::getNullValue(Op1->getType()));
3754
3755  // icmp C, (C+D) -> icmp 0, D for equalities or if there is no overflow.
3756  // icmp D, (C+D) -> icmp 0, C for equalities or if there is no overflow.
3757  if ((C == Op0 || D == Op0) && NoOp1WrapProblem)
3758    return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()),
3759                        C == Op0 ? D : C);
3760
3761  // icmp (A+B), (A+D) -> icmp B, D for equalities or if there is no overflow.
3762  if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem &&
3763      NoOp1WrapProblem) {
3764    // Determine Y and Z in the form icmp (X+Y), (X+Z).
3765    Value *Y, *Z;
3766    if (A == C) {
3767      // C + B == C + D  ->  B == D
3768      Y = B;
3769      Z = D;
3770    } else if (A == D) {
3771      // D + B == C + D  ->  B == C
3772      Y = B;
3773      Z = C;
3774    } else if (B == C) {
3775      // A + C == C + D  ->  A == D
3776      Y = A;
3777      Z = D;
3778    } else {
3779      assert(B == D);
3780      // A + D == C + D  ->  A == C
3781      Y = A;
3782      Z = C;
3783    }
3784    return new ICmpInst(Pred, Y, Z);
3785  }
3786
3787  // icmp slt (A + -1), Op1 -> icmp sle A, Op1
3788  if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT &&
3789      match(B, m_AllOnes()))
3790    return new ICmpInst(CmpInst::ICMP_SLE, A, Op1);
3791
3792  // icmp sge (A + -1), Op1 -> icmp sgt A, Op1
3793  if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE &&
3794      match(B, m_AllOnes()))
3795    return new ICmpInst(CmpInst::ICMP_SGT, A, Op1);
3796
3797  // icmp sle (A + 1), Op1 -> icmp slt A, Op1
3798  if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE && match(B, m_One()))
3799    return new ICmpInst(CmpInst::ICMP_SLT, A, Op1);
3800
3801  // icmp sgt (A + 1), Op1 -> icmp sge A, Op1
3802  if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT && match(B, m_One()))
3803    return new ICmpInst(CmpInst::ICMP_SGE, A, Op1);
3804
3805  // icmp sgt Op0, (C + -1) -> icmp sge Op0, C
3806  if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGT &&
3807      match(D, m_AllOnes()))
3808    return new ICmpInst(CmpInst::ICMP_SGE, Op0, C);
3809
3810  // icmp sle Op0, (C + -1) -> icmp slt Op0, C
3811  if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLE &&
3812      match(D, m_AllOnes()))
3813    return new ICmpInst(CmpInst::ICMP_SLT, Op0, C);
3814
3815  // icmp sge Op0, (C + 1) -> icmp sgt Op0, C
3816  if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGE && match(D, m_One()))
3817    return new ICmpInst(CmpInst::ICMP_SGT, Op0, C);
3818
3819  // icmp slt Op0, (C + 1) -> icmp sle Op0, C
3820  if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLT && match(D, m_One()))
3821    return new ICmpInst(CmpInst::ICMP_SLE, Op0, C);
3822
3823  // TODO: The subtraction-related identities shown below also hold, but
3824  // canonicalization from (X -nuw 1) to (X + -1) means that the combinations
3825  // wouldn't happen even if they were implemented.
3826  //
3827  // icmp ult (A - 1), Op1 -> icmp ule A, Op1
3828  // icmp uge (A - 1), Op1 -> icmp ugt A, Op1
3829  // icmp ugt Op0, (C - 1) -> icmp uge Op0, C
3830  // icmp ule Op0, (C - 1) -> icmp ult Op0, C
3831
3832  // icmp ule (A + 1), Op0 -> icmp ult A, Op1
3833  if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_ULE && match(B, m_One()))
3834    return new ICmpInst(CmpInst::ICMP_ULT, A, Op1);
3835
3836  // icmp ugt (A + 1), Op0 -> icmp uge A, Op1
3837  if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_UGT && match(B, m_One()))
3838    return new ICmpInst(CmpInst::ICMP_UGE, A, Op1);
3839
3840  // icmp uge Op0, (C + 1) -> icmp ugt Op0, C
3841  if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_UGE && match(D, m_One()))
3842    return new ICmpInst(CmpInst::ICMP_UGT, Op0, C);
3843
3844  // icmp ult Op0, (C + 1) -> icmp ule Op0, C
3845  if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_ULT && match(D, m_One()))
3846    return new ICmpInst(CmpInst::ICMP_ULE, Op0, C);
3847
3848  // if C1 has greater magnitude than C2:
3849  //  icmp (A + C1), (C + C2) -> icmp (A + C3), C
3850  //  s.t. C3 = C1 - C2
3851  //
3852  // if C2 has greater magnitude than C1:
3853  //  icmp (A + C1), (C + C2) -> icmp A, (C + C3)
3854  //  s.t. C3 = C2 - C1
3855  if (A && C && NoOp0WrapProblem && NoOp1WrapProblem &&
3856      (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned())
3857    if (ConstantInt *C1 = dyn_cast<ConstantInt>(B))
3858      if (ConstantInt *C2 = dyn_cast<ConstantInt>(D)) {
3859        const APInt &AP1 = C1->getValue();
3860        const APInt &AP2 = C2->getValue();
3861        if (AP1.isNegative() == AP2.isNegative()) {
3862          APInt AP1Abs = C1->getValue().abs();
3863          APInt AP2Abs = C2->getValue().abs();
3864          if (AP1Abs.uge(AP2Abs)) {
3865            ConstantInt *C3 = Builder.getInt(AP1 - AP2);
3866            Value *NewAdd = Builder.CreateNSWAdd(A, C3);
3867            return new ICmpInst(Pred, NewAdd, C);
3868          } else {
3869            ConstantInt *C3 = Builder.getInt(AP2 - AP1);
3870            Value *NewAdd = Builder.CreateNSWAdd(C, C3);
3871            return new ICmpInst(Pred, A, NewAdd);
3872          }
3873        }
3874      }
3875
3876  // Analyze the case when either Op0 or Op1 is a sub instruction.
3877  // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
3878  A = nullptr;
3879  B = nullptr;
3880  C = nullptr;
3881  D = nullptr;
3882  if (BO0 && BO0->getOpcode() == Instruction::Sub) {
3883    A = BO0->getOperand(0);
3884    B = BO0->getOperand(1);
3885  }
3886  if (BO1 && BO1->getOpcode() == Instruction::Sub) {
3887    C = BO1->getOperand(0);
3888    D = BO1->getOperand(1);
3889  }
3890
3891  // icmp (A-B), A -> icmp 0, B for equalities or if there is no overflow.
3892  if (A == Op1 && NoOp0WrapProblem)
3893    return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B);
3894  // icmp C, (C-D) -> icmp D, 0 for equalities or if there is no overflow.
3895  if (C == Op0 && NoOp1WrapProblem)
3896    return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType()));
3897
3898  // Convert sub-with-unsigned-overflow comparisons into a comparison of args.
3899  // (A - B) u>/u<= A --> B u>/u<= A
3900  if (A == Op1 && (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
3901    return new ICmpInst(Pred, B, A);
3902  // C u</u>= (C - D) --> C u</u>= D
3903  if (C == Op0 && (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
3904    return new ICmpInst(Pred, C, D);
3905  // (A - B) u>=/u< A --> B u>/u<= A  iff B != 0
3906  if (A == Op1 && (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
3907      isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
3908    return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), B, A);
3909  // C u<=/u> (C - D) --> C u</u>= D  iff B != 0
3910  if (C == Op0 && (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
3911      isKnownNonZero(D, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
3912    return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), C, D);
3913
3914  // icmp (A-B), (C-B) -> icmp A, C for equalities or if there is no overflow.
3915  if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem)
3916    return new ICmpInst(Pred, A, C);
3917
3918  // icmp (A-B), (A-D) -> icmp D, B for equalities or if there is no overflow.
3919  if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem)
3920    return new ICmpInst(Pred, D, B);
3921
3922  // icmp (0-X) < cst --> x > -cst
3923  if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) {
3924    Value *X;
3925    if (match(BO0, m_Neg(m_Value(X))))
3926      if (Constant *RHSC = dyn_cast<Constant>(Op1))
3927        if (RHSC->isNotMinSignedValue())
3928          return new ICmpInst(I.getSwappedPredicate(), X,
3929                              ConstantExpr::getNeg(RHSC));
3930  }
3931
3932  BinaryOperator *SRem = nullptr;
3933  // icmp (srem X, Y), Y
3934  if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1))
3935    SRem = BO0;
3936  // icmp Y, (srem X, Y)
3937  else if (BO1 && BO1->getOpcode() == Instruction::SRem &&
3938           Op0 == BO1->getOperand(1))
3939    SRem = BO1;
3940  if (SRem) {
3941    // We don't check hasOneUse to avoid increasing register pressure because
3942    // the value we use is the same value this instruction was already using.
3943    switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) {
3944    default:
3945      break;
3946    case ICmpInst::ICMP_EQ:
3947      return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
3948    case ICmpInst::ICMP_NE:
3949      return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
3950    case ICmpInst::ICMP_SGT:
3951    case ICmpInst::ICMP_SGE:
3952      return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1),
3953                          Constant::getAllOnesValue(SRem->getType()));
3954    case ICmpInst::ICMP_SLT:
3955    case ICmpInst::ICMP_SLE:
3956      return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1),
3957                          Constant::getNullValue(SRem->getType()));
3958    }
3959  }
3960
3961  if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() && BO0->hasOneUse() &&
3962      BO1->hasOneUse() && BO0->getOperand(1) == BO1->getOperand(1)) {
3963    switch (BO0->getOpcode()) {
3964    default:
3965      break;
3966    case Instruction::Add:
3967    case Instruction::Sub:
3968    case Instruction::Xor: {
3969      if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
3970        return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3971
3972      const APInt *C;
3973      if (match(BO0->getOperand(1), m_APInt(C))) {
3974        // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
3975        if (C->isSignMask()) {
3976          ICmpInst::Predicate NewPred =
3977              I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate();
3978          return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
3979        }
3980
3981        // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b
3982        if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) {
3983          ICmpInst::Predicate NewPred =
3984              I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate();
3985          NewPred = I.getSwappedPredicate(NewPred);
3986          return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
3987        }
3988      }
3989      break;
3990    }
3991    case Instruction::Mul: {
3992      if (!I.isEquality())
3993        break;
3994
3995      const APInt *C;
3996      if (match(BO0->getOperand(1), m_APInt(C)) && !C->isNullValue() &&
3997          !C->isOneValue()) {
3998        // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask)
3999        // Mask = -1 >> count-trailing-zeros(C).
4000        if (unsigned TZs = C->countTrailingZeros()) {
4001          Constant *Mask = ConstantInt::get(
4002              BO0->getType(),
4003              APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs));
4004          Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask);
4005          Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask);
4006          return new ICmpInst(Pred, And1, And2);
4007        }
4008        // If there are no trailing zeros in the multiplier, just eliminate
4009        // the multiplies (no masking is needed):
4010        // icmp eq/ne (X * C), (Y * C) --> icmp eq/ne X, Y
4011        return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4012      }
4013      break;
4014    }
4015    case Instruction::UDiv:
4016    case Instruction::LShr:
4017      if (I.isSigned() || !BO0->isExact() || !BO1->isExact())
4018        break;
4019      return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4020
4021    case Instruction::SDiv:
4022      if (!I.isEquality() || !BO0->isExact() || !BO1->isExact())
4023        break;
4024      return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4025
4026    case Instruction::AShr:
4027      if (!BO0->isExact() || !BO1->isExact())
4028        break;
4029      return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4030
4031    case Instruction::Shl: {
4032      bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap();
4033      bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap();
4034      if (!NUW && !NSW)
4035        break;
4036      if (!NSW && I.isSigned())
4037        break;
4038      return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4039    }
4040    }
4041  }
4042
4043  if (BO0) {
4044    // Transform  A & (L - 1) `ult` L --> L != 0
4045    auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes());
4046    auto BitwiseAnd = m_c_And(m_Value(), LSubOne);
4047
4048    if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) {
4049      auto *Zero = Constant::getNullValue(BO0->getType());
4050      return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero);
4051    }
4052  }
4053
4054  if (Value *V = foldUnsignedMultiplicationOverflowCheck(I))
4055    return replaceInstUsesWith(I, V);
4056
4057  if (Value *V = foldICmpWithLowBitMaskedVal(I, Builder))
4058    return replaceInstUsesWith(I, V);
4059
4060  if (Value *V = foldICmpWithTruncSignExtendedVal(I, Builder))
4061    return replaceInstUsesWith(I, V);
4062
4063  if (Value *V = foldShiftIntoShiftInAnotherHandOfAndInICmp(I, SQ, Builder))
4064    return replaceInstUsesWith(I, V);
4065
4066  return nullptr;
4067}
4068
4069/// Fold icmp Pred min|max(X, Y), X.
4070static Instruction *foldICmpWithMinMax(ICmpInst &Cmp) {
4071  ICmpInst::Predicate Pred = Cmp.getPredicate();
4072  Value *Op0 = Cmp.getOperand(0);
4073  Value *X = Cmp.getOperand(1);
4074
4075  // Canonicalize minimum or maximum operand to LHS of the icmp.
4076  if (match(X, m_c_SMin(m_Specific(Op0), m_Value())) ||
4077      match(X, m_c_SMax(m_Specific(Op0), m_Value())) ||
4078      match(X, m_c_UMin(m_Specific(Op0), m_Value())) ||
4079      match(X, m_c_UMax(m_Specific(Op0), m_Value()))) {
4080    std::swap(Op0, X);
4081    Pred = Cmp.getSwappedPredicate();
4082  }
4083
4084  Value *Y;
4085  if (match(Op0, m_c_SMin(m_Specific(X), m_Value(Y)))) {
4086    // smin(X, Y)  == X --> X s<= Y
4087    // smin(X, Y) s>= X --> X s<= Y
4088    if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SGE)
4089      return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
4090
4091    // smin(X, Y) != X --> X s> Y
4092    // smin(X, Y) s< X --> X s> Y
4093    if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SLT)
4094      return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
4095
4096    // These cases should be handled in InstSimplify:
4097    // smin(X, Y) s<= X --> true
4098    // smin(X, Y) s> X --> false
4099    return nullptr;
4100  }
4101
4102  if (match(Op0, m_c_SMax(m_Specific(X), m_Value(Y)))) {
4103    // smax(X, Y)  == X --> X s>= Y
4104    // smax(X, Y) s<= X --> X s>= Y
4105    if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLE)
4106      return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
4107
4108    // smax(X, Y) != X --> X s< Y
4109    // smax(X, Y) s> X --> X s< Y
4110    if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SGT)
4111      return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
4112
4113    // These cases should be handled in InstSimplify:
4114    // smax(X, Y) s>= X --> true
4115    // smax(X, Y) s< X --> false
4116    return nullptr;
4117  }
4118
4119  if (match(Op0, m_c_UMin(m_Specific(X), m_Value(Y)))) {
4120    // umin(X, Y)  == X --> X u<= Y
4121    // umin(X, Y) u>= X --> X u<= Y
4122    if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_UGE)
4123      return new ICmpInst(ICmpInst::ICMP_ULE, X, Y);
4124
4125    // umin(X, Y) != X --> X u> Y
4126    // umin(X, Y) u< X --> X u> Y
4127    if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT)
4128      return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
4129
4130    // These cases should be handled in InstSimplify:
4131    // umin(X, Y) u<= X --> true
4132    // umin(X, Y) u> X --> false
4133    return nullptr;
4134  }
4135
4136  if (match(Op0, m_c_UMax(m_Specific(X), m_Value(Y)))) {
4137    // umax(X, Y)  == X --> X u>= Y
4138    // umax(X, Y) u<= X --> X u>= Y
4139    if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_ULE)
4140      return new ICmpInst(ICmpInst::ICMP_UGE, X, Y);
4141
4142    // umax(X, Y) != X --> X u< Y
4143    // umax(X, Y) u> X --> X u< Y
4144    if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_UGT)
4145      return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
4146
4147    // These cases should be handled in InstSimplify:
4148    // umax(X, Y) u>= X --> true
4149    // umax(X, Y) u< X --> false
4150    return nullptr;
4151  }
4152
4153  return nullptr;
4154}
4155
4156Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
4157  if (!I.isEquality())
4158    return nullptr;
4159
4160  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4161  const CmpInst::Predicate Pred = I.getPredicate();
4162  Value *A, *B, *C, *D;
4163  if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
4164    if (A == Op1 || B == Op1) { // (A^B) == A  ->  B == 0
4165      Value *OtherVal = A == Op1 ? B : A;
4166      return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
4167    }
4168
4169    if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
4170      // A^c1 == C^c2 --> A == C^(c1^c2)
4171      ConstantInt *C1, *C2;
4172      if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) &&
4173          Op1->hasOneUse()) {
4174        Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue());
4175        Value *Xor = Builder.CreateXor(C, NC);
4176        return new ICmpInst(Pred, A, Xor);
4177      }
4178
4179      // A^B == A^D -> B == D
4180      if (A == C)
4181        return new ICmpInst(Pred, B, D);
4182      if (A == D)
4183        return new ICmpInst(Pred, B, C);
4184      if (B == C)
4185        return new ICmpInst(Pred, A, D);
4186      if (B == D)
4187        return new ICmpInst(Pred, A, C);
4188    }
4189  }
4190
4191  if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) {
4192    // A == (A^B)  ->  B == 0
4193    Value *OtherVal = A == Op0 ? B : A;
4194    return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
4195  }
4196
4197  // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
4198  if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) &&
4199      match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) {
4200    Value *X = nullptr, *Y = nullptr, *Z = nullptr;
4201
4202    if (A == C) {
4203      X = B;
4204      Y = D;
4205      Z = A;
4206    } else if (A == D) {
4207      X = B;
4208      Y = C;
4209      Z = A;
4210    } else if (B == C) {
4211      X = A;
4212      Y = D;
4213      Z = B;
4214    } else if (B == D) {
4215      X = A;
4216      Y = C;
4217      Z = B;
4218    }
4219
4220    if (X) { // Build (X^Y) & Z
4221      Op1 = Builder.CreateXor(X, Y);
4222      Op1 = Builder.CreateAnd(Op1, Z);
4223      I.setOperand(0, Op1);
4224      I.setOperand(1, Constant::getNullValue(Op1->getType()));
4225      return &I;
4226    }
4227  }
4228
4229  // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B)
4230  // and       (B & (1<<X)-1) == (zext A) --> A == (trunc B)
4231  ConstantInt *Cst1;
4232  if ((Op0->hasOneUse() && match(Op0, m_ZExt(m_Value(A))) &&
4233       match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) ||
4234      (Op1->hasOneUse() && match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) &&
4235       match(Op1, m_ZExt(m_Value(A))))) {
4236    APInt Pow2 = Cst1->getValue() + 1;
4237    if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) &&
4238        Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth())
4239      return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType()));
4240  }
4241
4242  // (A >> C) == (B >> C) --> (A^B) u< (1 << C)
4243  // For lshr and ashr pairs.
4244  if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_ConstantInt(Cst1)))) &&
4245       match(Op1, m_OneUse(m_LShr(m_Value(B), m_Specific(Cst1))))) ||
4246      (match(Op0, m_OneUse(m_AShr(m_Value(A), m_ConstantInt(Cst1)))) &&
4247       match(Op1, m_OneUse(m_AShr(m_Value(B), m_Specific(Cst1)))))) {
4248    unsigned TypeBits = Cst1->getBitWidth();
4249    unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
4250    if (ShAmt < TypeBits && ShAmt != 0) {
4251      ICmpInst::Predicate NewPred =
4252          Pred == ICmpInst::ICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
4253      Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
4254      APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
4255      return new ICmpInst(NewPred, Xor, Builder.getInt(CmpVal));
4256    }
4257  }
4258
4259  // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0
4260  if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) &&
4261      match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) {
4262    unsigned TypeBits = Cst1->getBitWidth();
4263    unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
4264    if (ShAmt < TypeBits && ShAmt != 0) {
4265      Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
4266      APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
4267      Value *And = Builder.CreateAnd(Xor, Builder.getInt(AndVal),
4268                                      I.getName() + ".mask");
4269      return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType()));
4270    }
4271  }
4272
4273  // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to
4274  // "icmp (and X, mask), cst"
4275  uint64_t ShAmt = 0;
4276  if (Op0->hasOneUse() &&
4277      match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) &&
4278      match(Op1, m_ConstantInt(Cst1)) &&
4279      // Only do this when A has multiple uses.  This is most important to do
4280      // when it exposes other optimizations.
4281      !A->hasOneUse()) {
4282    unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits();
4283
4284    if (ShAmt < ASize) {
4285      APInt MaskV =
4286          APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits());
4287      MaskV <<= ShAmt;
4288
4289      APInt CmpV = Cst1->getValue().zext(ASize);
4290      CmpV <<= ShAmt;
4291
4292      Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV));
4293      return new ICmpInst(Pred, Mask, Builder.getInt(CmpV));
4294    }
4295  }
4296
4297  // If both operands are byte-swapped or bit-reversed, just compare the
4298  // original values.
4299  // TODO: Move this to a function similar to foldICmpIntrinsicWithConstant()
4300  // and handle more intrinsics.
4301  if ((match(Op0, m_BSwap(m_Value(A))) && match(Op1, m_BSwap(m_Value(B)))) ||
4302      (match(Op0, m_BitReverse(m_Value(A))) &&
4303       match(Op1, m_BitReverse(m_Value(B)))))
4304    return new ICmpInst(Pred, A, B);
4305
4306  // Canonicalize checking for a power-of-2-or-zero value:
4307  // (A & (A-1)) == 0 --> ctpop(A) < 2 (two commuted variants)
4308  // ((A-1) & A) != 0 --> ctpop(A) > 1 (two commuted variants)
4309  if (!match(Op0, m_OneUse(m_c_And(m_Add(m_Value(A), m_AllOnes()),
4310                                   m_Deferred(A)))) ||
4311      !match(Op1, m_ZeroInt()))
4312    A = nullptr;
4313
4314  // (A & -A) == A --> ctpop(A) < 2 (four commuted variants)
4315  // (-A & A) != A --> ctpop(A) > 1 (four commuted variants)
4316  if (match(Op0, m_OneUse(m_c_And(m_Neg(m_Specific(Op1)), m_Specific(Op1)))))
4317    A = Op1;
4318  else if (match(Op1,
4319                 m_OneUse(m_c_And(m_Neg(m_Specific(Op0)), m_Specific(Op0)))))
4320    A = Op0;
4321
4322  if (A) {
4323    Type *Ty = A->getType();
4324    CallInst *CtPop = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, A);
4325    return Pred == ICmpInst::ICMP_EQ
4326        ? new ICmpInst(ICmpInst::ICMP_ULT, CtPop, ConstantInt::get(Ty, 2))
4327        : new ICmpInst(ICmpInst::ICMP_UGT, CtPop, ConstantInt::get(Ty, 1));
4328  }
4329
4330  return nullptr;
4331}
4332
4333static Instruction *foldICmpWithZextOrSext(ICmpInst &ICmp,
4334                                           InstCombiner::BuilderTy &Builder) {
4335  assert(isa<CastInst>(ICmp.getOperand(0)) && "Expected cast for operand 0");
4336  auto *CastOp0 = cast<CastInst>(ICmp.getOperand(0));
4337  Value *X;
4338  if (!match(CastOp0, m_ZExtOrSExt(m_Value(X))))
4339    return nullptr;
4340
4341  bool IsSignedExt = CastOp0->getOpcode() == Instruction::SExt;
4342  bool IsSignedCmp = ICmp.isSigned();
4343  if (auto *CastOp1 = dyn_cast<CastInst>(ICmp.getOperand(1))) {
4344    // If the signedness of the two casts doesn't agree (i.e. one is a sext
4345    // and the other is a zext), then we can't handle this.
4346    // TODO: This is too strict. We can handle some predicates (equality?).
4347    if (CastOp0->getOpcode() != CastOp1->getOpcode())
4348      return nullptr;
4349
4350    // Not an extension from the same type?
4351    Value *Y = CastOp1->getOperand(0);
4352    Type *XTy = X->getType(), *YTy = Y->getType();
4353    if (XTy != YTy) {
4354      // One of the casts must have one use because we are creating a new cast.
4355      if (!CastOp0->hasOneUse() && !CastOp1->hasOneUse())
4356        return nullptr;
4357      // Extend the narrower operand to the type of the wider operand.
4358      if (XTy->getScalarSizeInBits() < YTy->getScalarSizeInBits())
4359        X = Builder.CreateCast(CastOp0->getOpcode(), X, YTy);
4360      else if (YTy->getScalarSizeInBits() < XTy->getScalarSizeInBits())
4361        Y = Builder.CreateCast(CastOp0->getOpcode(), Y, XTy);
4362      else
4363        return nullptr;
4364    }
4365
4366    // (zext X) == (zext Y) --> X == Y
4367    // (sext X) == (sext Y) --> X == Y
4368    if (ICmp.isEquality())
4369      return new ICmpInst(ICmp.getPredicate(), X, Y);
4370
4371    // A signed comparison of sign extended values simplifies into a
4372    // signed comparison.
4373    if (IsSignedCmp && IsSignedExt)
4374      return new ICmpInst(ICmp.getPredicate(), X, Y);
4375
4376    // The other three cases all fold into an unsigned comparison.
4377    return new ICmpInst(ICmp.getUnsignedPredicate(), X, Y);
4378  }
4379
4380  // Below here, we are only folding a compare with constant.
4381  auto *C = dyn_cast<Constant>(ICmp.getOperand(1));
4382  if (!C)
4383    return nullptr;
4384
4385  // Compute the constant that would happen if we truncated to SrcTy then
4386  // re-extended to DestTy.
4387  Type *SrcTy = CastOp0->getSrcTy();
4388  Type *DestTy = CastOp0->getDestTy();
4389  Constant *Res1 = ConstantExpr::getTrunc(C, SrcTy);
4390  Constant *Res2 = ConstantExpr::getCast(CastOp0->getOpcode(), Res1, DestTy);
4391
4392  // If the re-extended constant didn't change...
4393  if (Res2 == C) {
4394    if (ICmp.isEquality())
4395      return new ICmpInst(ICmp.getPredicate(), X, Res1);
4396
4397    // A signed comparison of sign extended values simplifies into a
4398    // signed comparison.
4399    if (IsSignedExt && IsSignedCmp)
4400      return new ICmpInst(ICmp.getPredicate(), X, Res1);
4401
4402    // The other three cases all fold into an unsigned comparison.
4403    return new ICmpInst(ICmp.getUnsignedPredicate(), X, Res1);
4404  }
4405
4406  // The re-extended constant changed, partly changed (in the case of a vector),
4407  // or could not be determined to be equal (in the case of a constant
4408  // expression), so the constant cannot be represented in the shorter type.
4409  // All the cases that fold to true or false will have already been handled
4410  // by SimplifyICmpInst, so only deal with the tricky case.
4411  if (IsSignedCmp || !IsSignedExt || !isa<ConstantInt>(C))
4412    return nullptr;
4413
4414  // Is source op positive?
4415  // icmp ult (sext X), C --> icmp sgt X, -1
4416  if (ICmp.getPredicate() == ICmpInst::ICMP_ULT)
4417    return new ICmpInst(CmpInst::ICMP_SGT, X, Constant::getAllOnesValue(SrcTy));
4418
4419  // Is source op negative?
4420  // icmp ugt (sext X), C --> icmp slt X, 0
4421  assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
4422  return new ICmpInst(CmpInst::ICMP_SLT, X, Constant::getNullValue(SrcTy));
4423}
4424
4425/// Handle icmp (cast x), (cast or constant).
4426Instruction *InstCombiner::foldICmpWithCastOp(ICmpInst &ICmp) {
4427  auto *CastOp0 = dyn_cast<CastInst>(ICmp.getOperand(0));
4428  if (!CastOp0)
4429    return nullptr;
4430  if (!isa<Constant>(ICmp.getOperand(1)) && !isa<CastInst>(ICmp.getOperand(1)))
4431    return nullptr;
4432
4433  Value *Op0Src = CastOp0->getOperand(0);
4434  Type *SrcTy = CastOp0->getSrcTy();
4435  Type *DestTy = CastOp0->getDestTy();
4436
4437  // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
4438  // integer type is the same size as the pointer type.
4439  auto CompatibleSizes = [&](Type *SrcTy, Type *DestTy) {
4440    if (isa<VectorType>(SrcTy)) {
4441      SrcTy = cast<VectorType>(SrcTy)->getElementType();
4442      DestTy = cast<VectorType>(DestTy)->getElementType();
4443    }
4444    return DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth();
4445  };
4446  if (CastOp0->getOpcode() == Instruction::PtrToInt &&
4447      CompatibleSizes(SrcTy, DestTy)) {
4448    Value *NewOp1 = nullptr;
4449    if (auto *PtrToIntOp1 = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) {
4450      Value *PtrSrc = PtrToIntOp1->getOperand(0);
4451      if (PtrSrc->getType()->getPointerAddressSpace() ==
4452          Op0Src->getType()->getPointerAddressSpace()) {
4453        NewOp1 = PtrToIntOp1->getOperand(0);
4454        // If the pointer types don't match, insert a bitcast.
4455        if (Op0Src->getType() != NewOp1->getType())
4456          NewOp1 = Builder.CreateBitCast(NewOp1, Op0Src->getType());
4457      }
4458    } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
4459      NewOp1 = ConstantExpr::getIntToPtr(RHSC, SrcTy);
4460    }
4461
4462    if (NewOp1)
4463      return new ICmpInst(ICmp.getPredicate(), Op0Src, NewOp1);
4464  }
4465
4466  return foldICmpWithZextOrSext(ICmp, Builder);
4467}
4468
4469static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS) {
4470  switch (BinaryOp) {
4471    default:
4472      llvm_unreachable("Unsupported binary op");
4473    case Instruction::Add:
4474    case Instruction::Sub:
4475      return match(RHS, m_Zero());
4476    case Instruction::Mul:
4477      return match(RHS, m_One());
4478  }
4479}
4480
4481OverflowResult InstCombiner::computeOverflow(
4482    Instruction::BinaryOps BinaryOp, bool IsSigned,
4483    Value *LHS, Value *RHS, Instruction *CxtI) const {
4484  switch (BinaryOp) {
4485    default:
4486      llvm_unreachable("Unsupported binary op");
4487    case Instruction::Add:
4488      if (IsSigned)
4489        return computeOverflowForSignedAdd(LHS, RHS, CxtI);
4490      else
4491        return computeOverflowForUnsignedAdd(LHS, RHS, CxtI);
4492    case Instruction::Sub:
4493      if (IsSigned)
4494        return computeOverflowForSignedSub(LHS, RHS, CxtI);
4495      else
4496        return computeOverflowForUnsignedSub(LHS, RHS, CxtI);
4497    case Instruction::Mul:
4498      if (IsSigned)
4499        return computeOverflowForSignedMul(LHS, RHS, CxtI);
4500      else
4501        return computeOverflowForUnsignedMul(LHS, RHS, CxtI);
4502  }
4503}
4504
4505bool InstCombiner::OptimizeOverflowCheck(
4506    Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS,
4507    Instruction &OrigI, Value *&Result, Constant *&Overflow) {
4508  if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
4509    std::swap(LHS, RHS);
4510
4511  // If the overflow check was an add followed by a compare, the insertion point
4512  // may be pointing to the compare.  We want to insert the new instructions
4513  // before the add in case there are uses of the add between the add and the
4514  // compare.
4515  Builder.SetInsertPoint(&OrigI);
4516
4517  if (isNeutralValue(BinaryOp, RHS)) {
4518    Result = LHS;
4519    Overflow = Builder.getFalse();
4520    return true;
4521  }
4522
4523  switch (computeOverflow(BinaryOp, IsSigned, LHS, RHS, &OrigI)) {
4524    case OverflowResult::MayOverflow:
4525      return false;
4526    case OverflowResult::AlwaysOverflowsLow:
4527    case OverflowResult::AlwaysOverflowsHigh:
4528      Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
4529      Result->takeName(&OrigI);
4530      Overflow = Builder.getTrue();
4531      return true;
4532    case OverflowResult::NeverOverflows:
4533      Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
4534      Result->takeName(&OrigI);
4535      Overflow = Builder.getFalse();
4536      if (auto *Inst = dyn_cast<Instruction>(Result)) {
4537        if (IsSigned)
4538          Inst->setHasNoSignedWrap();
4539        else
4540          Inst->setHasNoUnsignedWrap();
4541      }
4542      return true;
4543  }
4544
4545  llvm_unreachable("Unexpected overflow result");
4546}
4547
4548/// Recognize and process idiom involving test for multiplication
4549/// overflow.
4550///
4551/// The caller has matched a pattern of the form:
4552///   I = cmp u (mul(zext A, zext B), V
4553/// The function checks if this is a test for overflow and if so replaces
4554/// multiplication with call to 'mul.with.overflow' intrinsic.
4555///
4556/// \param I Compare instruction.
4557/// \param MulVal Result of 'mult' instruction.  It is one of the arguments of
4558///               the compare instruction.  Must be of integer type.
4559/// \param OtherVal The other argument of compare instruction.
4560/// \returns Instruction which must replace the compare instruction, NULL if no
4561///          replacement required.
4562static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
4563                                         Value *OtherVal, InstCombiner &IC) {
4564  // Don't bother doing this transformation for pointers, don't do it for
4565  // vectors.
4566  if (!isa<IntegerType>(MulVal->getType()))
4567    return nullptr;
4568
4569  assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal);
4570  assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal);
4571  auto *MulInstr = dyn_cast<Instruction>(MulVal);
4572  if (!MulInstr)
4573    return nullptr;
4574  assert(MulInstr->getOpcode() == Instruction::Mul);
4575
4576  auto *LHS = cast<ZExtOperator>(MulInstr->getOperand(0)),
4577       *RHS = cast<ZExtOperator>(MulInstr->getOperand(1));
4578  assert(LHS->getOpcode() == Instruction::ZExt);
4579  assert(RHS->getOpcode() == Instruction::ZExt);
4580  Value *A = LHS->getOperand(0), *B = RHS->getOperand(0);
4581
4582  // Calculate type and width of the result produced by mul.with.overflow.
4583  Type *TyA = A->getType(), *TyB = B->getType();
4584  unsigned WidthA = TyA->getPrimitiveSizeInBits(),
4585           WidthB = TyB->getPrimitiveSizeInBits();
4586  unsigned MulWidth;
4587  Type *MulType;
4588  if (WidthB > WidthA) {
4589    MulWidth = WidthB;
4590    MulType = TyB;
4591  } else {
4592    MulWidth = WidthA;
4593    MulType = TyA;
4594  }
4595
4596  // In order to replace the original mul with a narrower mul.with.overflow,
4597  // all uses must ignore upper bits of the product.  The number of used low
4598  // bits must be not greater than the width of mul.with.overflow.
4599  if (MulVal->hasNUsesOrMore(2))
4600    for (User *U : MulVal->users()) {
4601      if (U == &I)
4602        continue;
4603      if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
4604        // Check if truncation ignores bits above MulWidth.
4605        unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
4606        if (TruncWidth > MulWidth)
4607          return nullptr;
4608      } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
4609        // Check if AND ignores bits above MulWidth.
4610        if (BO->getOpcode() != Instruction::And)
4611          return nullptr;
4612        if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
4613          const APInt &CVal = CI->getValue();
4614          if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth)
4615            return nullptr;
4616        } else {
4617          // In this case we could have the operand of the binary operation
4618          // being defined in another block, and performing the replacement
4619          // could break the dominance relation.
4620          return nullptr;
4621        }
4622      } else {
4623        // Other uses prohibit this transformation.
4624        return nullptr;
4625      }
4626    }
4627
4628  // Recognize patterns
4629  switch (I.getPredicate()) {
4630  case ICmpInst::ICMP_EQ:
4631  case ICmpInst::ICMP_NE:
4632    // Recognize pattern:
4633    //   mulval = mul(zext A, zext B)
4634    //   cmp eq/neq mulval, zext trunc mulval
4635    if (ZExtInst *Zext = dyn_cast<ZExtInst>(OtherVal))
4636      if (Zext->hasOneUse()) {
4637        Value *ZextArg = Zext->getOperand(0);
4638        if (TruncInst *Trunc = dyn_cast<TruncInst>(ZextArg))
4639          if (Trunc->getType()->getPrimitiveSizeInBits() == MulWidth)
4640            break; //Recognized
4641      }
4642
4643    // Recognize pattern:
4644    //   mulval = mul(zext A, zext B)
4645    //   cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits.
4646    ConstantInt *CI;
4647    Value *ValToMask;
4648    if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) {
4649      if (ValToMask != MulVal)
4650        return nullptr;
4651      const APInt &CVal = CI->getValue() + 1;
4652      if (CVal.isPowerOf2()) {
4653        unsigned MaskWidth = CVal.logBase2();
4654        if (MaskWidth == MulWidth)
4655          break; // Recognized
4656      }
4657    }
4658    return nullptr;
4659
4660  case ICmpInst::ICMP_UGT:
4661    // Recognize pattern:
4662    //   mulval = mul(zext A, zext B)
4663    //   cmp ugt mulval, max
4664    if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4665      APInt MaxVal = APInt::getMaxValue(MulWidth);
4666      MaxVal = MaxVal.zext(CI->getBitWidth());
4667      if (MaxVal.eq(CI->getValue()))
4668        break; // Recognized
4669    }
4670    return nullptr;
4671
4672  case ICmpInst::ICMP_UGE:
4673    // Recognize pattern:
4674    //   mulval = mul(zext A, zext B)
4675    //   cmp uge mulval, max+1
4676    if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4677      APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
4678      if (MaxVal.eq(CI->getValue()))
4679        break; // Recognized
4680    }
4681    return nullptr;
4682
4683  case ICmpInst::ICMP_ULE:
4684    // Recognize pattern:
4685    //   mulval = mul(zext A, zext B)
4686    //   cmp ule mulval, max
4687    if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4688      APInt MaxVal = APInt::getMaxValue(MulWidth);
4689      MaxVal = MaxVal.zext(CI->getBitWidth());
4690      if (MaxVal.eq(CI->getValue()))
4691        break; // Recognized
4692    }
4693    return nullptr;
4694
4695  case ICmpInst::ICMP_ULT:
4696    // Recognize pattern:
4697    //   mulval = mul(zext A, zext B)
4698    //   cmp ule mulval, max + 1
4699    if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4700      APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
4701      if (MaxVal.eq(CI->getValue()))
4702        break; // Recognized
4703    }
4704    return nullptr;
4705
4706  default:
4707    return nullptr;
4708  }
4709
4710  InstCombiner::BuilderTy &Builder = IC.Builder;
4711  Builder.SetInsertPoint(MulInstr);
4712
4713  // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
4714  Value *MulA = A, *MulB = B;
4715  if (WidthA < MulWidth)
4716    MulA = Builder.CreateZExt(A, MulType);
4717  if (WidthB < MulWidth)
4718    MulB = Builder.CreateZExt(B, MulType);
4719  Function *F = Intrinsic::getDeclaration(
4720      I.getModule(), Intrinsic::umul_with_overflow, MulType);
4721  CallInst *Call = Builder.CreateCall(F, {MulA, MulB}, "umul");
4722  IC.Worklist.Add(MulInstr);
4723
4724  // If there are uses of mul result other than the comparison, we know that
4725  // they are truncation or binary AND. Change them to use result of
4726  // mul.with.overflow and adjust properly mask/size.
4727  if (MulVal->hasNUsesOrMore(2)) {
4728    Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value");
4729    for (auto UI = MulVal->user_begin(), UE = MulVal->user_end(); UI != UE;) {
4730      User *U = *UI++;
4731      if (U == &I || U == OtherVal)
4732        continue;
4733      if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
4734        if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
4735          IC.replaceInstUsesWith(*TI, Mul);
4736        else
4737          TI->setOperand(0, Mul);
4738      } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
4739        assert(BO->getOpcode() == Instruction::And);
4740        // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
4741        ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
4742        APInt ShortMask = CI->getValue().trunc(MulWidth);
4743        Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask);
4744        Instruction *Zext =
4745            cast<Instruction>(Builder.CreateZExt(ShortAnd, BO->getType()));
4746        IC.Worklist.Add(Zext);
4747        IC.replaceInstUsesWith(*BO, Zext);
4748      } else {
4749        llvm_unreachable("Unexpected Binary operation");
4750      }
4751      IC.Worklist.Add(cast<Instruction>(U));
4752    }
4753  }
4754  if (isa<Instruction>(OtherVal))
4755    IC.Worklist.Add(cast<Instruction>(OtherVal));
4756
4757  // The original icmp gets replaced with the overflow value, maybe inverted
4758  // depending on predicate.
4759  bool Inverse = false;
4760  switch (I.getPredicate()) {
4761  case ICmpInst::ICMP_NE:
4762    break;
4763  case ICmpInst::ICMP_EQ:
4764    Inverse = true;
4765    break;
4766  case ICmpInst::ICMP_UGT:
4767  case ICmpInst::ICMP_UGE:
4768    if (I.getOperand(0) == MulVal)
4769      break;
4770    Inverse = true;
4771    break;
4772  case ICmpInst::ICMP_ULT:
4773  case ICmpInst::ICMP_ULE:
4774    if (I.getOperand(1) == MulVal)
4775      break;
4776    Inverse = true;
4777    break;
4778  default:
4779    llvm_unreachable("Unexpected predicate");
4780  }
4781  if (Inverse) {
4782    Value *Res = Builder.CreateExtractValue(Call, 1);
4783    return BinaryOperator::CreateNot(Res);
4784  }
4785
4786  return ExtractValueInst::Create(Call, 1);
4787}
4788
4789/// When performing a comparison against a constant, it is possible that not all
4790/// the bits in the LHS are demanded. This helper method computes the mask that
4791/// IS demanded.
4792static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth) {
4793  const APInt *RHS;
4794  if (!match(I.getOperand(1), m_APInt(RHS)))
4795    return APInt::getAllOnesValue(BitWidth);
4796
4797  // If this is a normal comparison, it demands all bits. If it is a sign bit
4798  // comparison, it only demands the sign bit.
4799  bool UnusedBit;
4800  if (isSignBitCheck(I.getPredicate(), *RHS, UnusedBit))
4801    return APInt::getSignMask(BitWidth);
4802
4803  switch (I.getPredicate()) {
4804  // For a UGT comparison, we don't care about any bits that
4805  // correspond to the trailing ones of the comparand.  The value of these
4806  // bits doesn't impact the outcome of the comparison, because any value
4807  // greater than the RHS must differ in a bit higher than these due to carry.
4808  case ICmpInst::ICMP_UGT:
4809    return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingOnes());
4810
4811  // Similarly, for a ULT comparison, we don't care about the trailing zeros.
4812  // Any value less than the RHS must differ in a higher bit because of carries.
4813  case ICmpInst::ICMP_ULT:
4814    return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingZeros());
4815
4816  default:
4817    return APInt::getAllOnesValue(BitWidth);
4818  }
4819}
4820
4821/// Check if the order of \p Op0 and \p Op1 as operands in an ICmpInst
4822/// should be swapped.
4823/// The decision is based on how many times these two operands are reused
4824/// as subtract operands and their positions in those instructions.
4825/// The rationale is that several architectures use the same instruction for
4826/// both subtract and cmp. Thus, it is better if the order of those operands
4827/// match.
4828/// \return true if Op0 and Op1 should be swapped.
4829static bool swapMayExposeCSEOpportunities(const Value *Op0, const Value *Op1) {
4830  // Filter out pointer values as those cannot appear directly in subtract.
4831  // FIXME: we may want to go through inttoptrs or bitcasts.
4832  if (Op0->getType()->isPointerTy())
4833    return false;
4834  // If a subtract already has the same operands as a compare, swapping would be
4835  // bad. If a subtract has the same operands as a compare but in reverse order,
4836  // then swapping is good.
4837  int GoodToSwap = 0;
4838  for (const User *U : Op0->users()) {
4839    if (match(U, m_Sub(m_Specific(Op1), m_Specific(Op0))))
4840      GoodToSwap++;
4841    else if (match(U, m_Sub(m_Specific(Op0), m_Specific(Op1))))
4842      GoodToSwap--;
4843  }
4844  return GoodToSwap > 0;
4845}
4846
4847/// Check that one use is in the same block as the definition and all
4848/// other uses are in blocks dominated by a given block.
4849///
4850/// \param DI Definition
4851/// \param UI Use
4852/// \param DB Block that must dominate all uses of \p DI outside
4853///           the parent block
4854/// \return true when \p UI is the only use of \p DI in the parent block
4855/// and all other uses of \p DI are in blocks dominated by \p DB.
4856///
4857bool InstCombiner::dominatesAllUses(const Instruction *DI,
4858                                    const Instruction *UI,
4859                                    const BasicBlock *DB) const {
4860  assert(DI && UI && "Instruction not defined\n");
4861  // Ignore incomplete definitions.
4862  if (!DI->getParent())
4863    return false;
4864  // DI and UI must be in the same block.
4865  if (DI->getParent() != UI->getParent())
4866    return false;
4867  // Protect from self-referencing blocks.
4868  if (DI->getParent() == DB)
4869    return false;
4870  for (const User *U : DI->users()) {
4871    auto *Usr = cast<Instruction>(U);
4872    if (Usr != UI && !DT.dominates(DB, Usr->getParent()))
4873      return false;
4874  }
4875  return true;
4876}
4877
4878/// Return true when the instruction sequence within a block is select-cmp-br.
4879static bool isChainSelectCmpBranch(const SelectInst *SI) {
4880  const BasicBlock *BB = SI->getParent();
4881  if (!BB)
4882    return false;
4883  auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator());
4884  if (!BI || BI->getNumSuccessors() != 2)
4885    return false;
4886  auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
4887  if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
4888    return false;
4889  return true;
4890}
4891
4892/// True when a select result is replaced by one of its operands
4893/// in select-icmp sequence. This will eventually result in the elimination
4894/// of the select.
4895///
4896/// \param SI    Select instruction
4897/// \param Icmp  Compare instruction
4898/// \param SIOpd Operand that replaces the select
4899///
4900/// Notes:
4901/// - The replacement is global and requires dominator information
4902/// - The caller is responsible for the actual replacement
4903///
4904/// Example:
4905///
4906/// entry:
4907///  %4 = select i1 %3, %C* %0, %C* null
4908///  %5 = icmp eq %C* %4, null
4909///  br i1 %5, label %9, label %7
4910///  ...
4911///  ; <label>:7                                       ; preds = %entry
4912///  %8 = getelementptr inbounds %C* %4, i64 0, i32 0
4913///  ...
4914///
4915/// can be transformed to
4916///
4917///  %5 = icmp eq %C* %0, null
4918///  %6 = select i1 %3, i1 %5, i1 true
4919///  br i1 %6, label %9, label %7
4920///  ...
4921///  ; <label>:7                                       ; preds = %entry
4922///  %8 = getelementptr inbounds %C* %0, i64 0, i32 0  // replace by %0!
4923///
4924/// Similar when the first operand of the select is a constant or/and
4925/// the compare is for not equal rather than equal.
4926///
4927/// NOTE: The function is only called when the select and compare constants
4928/// are equal, the optimization can work only for EQ predicates. This is not a
4929/// major restriction since a NE compare should be 'normalized' to an equal
4930/// compare, which usually happens in the combiner and test case
4931/// select-cmp-br.ll checks for it.
4932bool InstCombiner::replacedSelectWithOperand(SelectInst *SI,
4933                                             const ICmpInst *Icmp,
4934                                             const unsigned SIOpd) {
4935  assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!");
4936  if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) {
4937    BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
4938    // The check for the single predecessor is not the best that can be
4939    // done. But it protects efficiently against cases like when SI's
4940    // home block has two successors, Succ and Succ1, and Succ1 predecessor
4941    // of Succ. Then SI can't be replaced by SIOpd because the use that gets
4942    // replaced can be reached on either path. So the uniqueness check
4943    // guarantees that the path all uses of SI (outside SI's parent) are on
4944    // is disjoint from all other paths out of SI. But that information
4945    // is more expensive to compute, and the trade-off here is in favor
4946    // of compile-time. It should also be noticed that we check for a single
4947    // predecessor and not only uniqueness. This to handle the situation when
4948    // Succ and Succ1 points to the same basic block.
4949    if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) {
4950      NumSel++;
4951      SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
4952      return true;
4953    }
4954  }
4955  return false;
4956}
4957
4958/// Try to fold the comparison based on range information we can get by checking
4959/// whether bits are known to be zero or one in the inputs.
4960Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
4961  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4962  Type *Ty = Op0->getType();
4963  ICmpInst::Predicate Pred = I.getPredicate();
4964
4965  // Get scalar or pointer size.
4966  unsigned BitWidth = Ty->isIntOrIntVectorTy()
4967                          ? Ty->getScalarSizeInBits()
4968                          : DL.getPointerTypeSizeInBits(Ty->getScalarType());
4969
4970  if (!BitWidth)
4971    return nullptr;
4972
4973  KnownBits Op0Known(BitWidth);
4974  KnownBits Op1Known(BitWidth);
4975
4976  if (SimplifyDemandedBits(&I, 0,
4977                           getDemandedBitsLHSMask(I, BitWidth),
4978                           Op0Known, 0))
4979    return &I;
4980
4981  if (SimplifyDemandedBits(&I, 1, APInt::getAllOnesValue(BitWidth),
4982                           Op1Known, 0))
4983    return &I;
4984
4985  // Given the known and unknown bits, compute a range that the LHS could be
4986  // in.  Compute the Min, Max and RHS values based on the known bits. For the
4987  // EQ and NE we use unsigned values.
4988  APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
4989  APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
4990  if (I.isSigned()) {
4991    computeSignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max);
4992    computeSignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max);
4993  } else {
4994    computeUnsignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max);
4995    computeUnsignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max);
4996  }
4997
4998  // If Min and Max are known to be the same, then SimplifyDemandedBits figured
4999  // out that the LHS or RHS is a constant. Constant fold this now, so that
5000  // code below can assume that Min != Max.
5001  if (!isa<Constant>(Op0) && Op0Min == Op0Max)
5002    return new ICmpInst(Pred, ConstantExpr::getIntegerValue(Ty, Op0Min), Op1);
5003  if (!isa<Constant>(Op1) && Op1Min == Op1Max)
5004    return new ICmpInst(Pred, Op0, ConstantExpr::getIntegerValue(Ty, Op1Min));
5005
5006  // Based on the range information we know about the LHS, see if we can
5007  // simplify this comparison.  For example, (x&4) < 8 is always true.
5008  switch (Pred) {
5009  default:
5010    llvm_unreachable("Unknown icmp opcode!");
5011  case ICmpInst::ICMP_EQ:
5012  case ICmpInst::ICMP_NE: {
5013    if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max)) {
5014      return Pred == CmpInst::ICMP_EQ
5015                 ? replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()))
5016                 : replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5017    }
5018
5019    // If all bits are known zero except for one, then we know at most one bit
5020    // is set. If the comparison is against zero, then this is a check to see if
5021    // *that* bit is set.
5022    APInt Op0KnownZeroInverted = ~Op0Known.Zero;
5023    if (Op1Known.isZero()) {
5024      // If the LHS is an AND with the same constant, look through it.
5025      Value *LHS = nullptr;
5026      const APInt *LHSC;
5027      if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) ||
5028          *LHSC != Op0KnownZeroInverted)
5029        LHS = Op0;
5030
5031      Value *X;
5032      if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
5033        APInt ValToCheck = Op0KnownZeroInverted;
5034        Type *XTy = X->getType();
5035        if (ValToCheck.isPowerOf2()) {
5036          // ((1 << X) & 8) == 0 -> X != 3
5037          // ((1 << X) & 8) != 0 -> X == 3
5038          auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
5039          auto NewPred = ICmpInst::getInversePredicate(Pred);
5040          return new ICmpInst(NewPred, X, CmpC);
5041        } else if ((++ValToCheck).isPowerOf2()) {
5042          // ((1 << X) & 7) == 0 -> X >= 3
5043          // ((1 << X) & 7) != 0 -> X  < 3
5044          auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
5045          auto NewPred =
5046              Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT;
5047          return new ICmpInst(NewPred, X, CmpC);
5048        }
5049      }
5050
5051      // Check if the LHS is 8 >>u x and the result is a power of 2 like 1.
5052      const APInt *CI;
5053      if (Op0KnownZeroInverted.isOneValue() &&
5054          match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) {
5055        // ((8 >>u X) & 1) == 0 -> X != 3
5056        // ((8 >>u X) & 1) != 0 -> X == 3
5057        unsigned CmpVal = CI->countTrailingZeros();
5058        auto NewPred = ICmpInst::getInversePredicate(Pred);
5059        return new ICmpInst(NewPred, X, ConstantInt::get(X->getType(), CmpVal));
5060      }
5061    }
5062    break;
5063  }
5064  case ICmpInst::ICMP_ULT: {
5065    if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
5066      return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5067    if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
5068      return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5069    if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
5070      return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5071
5072    const APInt *CmpC;
5073    if (match(Op1, m_APInt(CmpC))) {
5074      // A <u C -> A == C-1 if min(A)+1 == C
5075      if (*CmpC == Op0Min + 1)
5076        return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5077                            ConstantInt::get(Op1->getType(), *CmpC - 1));
5078      // X <u C --> X == 0, if the number of zero bits in the bottom of X
5079      // exceeds the log2 of C.
5080      if (Op0Known.countMinTrailingZeros() >= CmpC->ceilLogBase2())
5081        return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5082                            Constant::getNullValue(Op1->getType()));
5083    }
5084    break;
5085  }
5086  case ICmpInst::ICMP_UGT: {
5087    if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
5088      return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5089    if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
5090      return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5091    if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
5092      return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5093
5094    const APInt *CmpC;
5095    if (match(Op1, m_APInt(CmpC))) {
5096      // A >u C -> A == C+1 if max(a)-1 == C
5097      if (*CmpC == Op0Max - 1)
5098        return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5099                            ConstantInt::get(Op1->getType(), *CmpC + 1));
5100      // X >u C --> X != 0, if the number of zero bits in the bottom of X
5101      // exceeds the log2 of C.
5102      if (Op0Known.countMinTrailingZeros() >= CmpC->getActiveBits())
5103        return new ICmpInst(ICmpInst::ICMP_NE, Op0,
5104                            Constant::getNullValue(Op1->getType()));
5105    }
5106    break;
5107  }
5108  case ICmpInst::ICMP_SLT: {
5109    if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
5110      return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5111    if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
5112      return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5113    if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
5114      return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5115    const APInt *CmpC;
5116    if (match(Op1, m_APInt(CmpC))) {
5117      if (*CmpC == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C
5118        return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5119                            ConstantInt::get(Op1->getType(), *CmpC - 1));
5120    }
5121    break;
5122  }
5123  case ICmpInst::ICMP_SGT: {
5124    if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
5125      return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5126    if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
5127      return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5128    if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
5129      return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5130    const APInt *CmpC;
5131    if (match(Op1, m_APInt(CmpC))) {
5132      if (*CmpC == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C
5133        return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5134                            ConstantInt::get(Op1->getType(), *CmpC + 1));
5135    }
5136    break;
5137  }
5138  case ICmpInst::ICMP_SGE:
5139    assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
5140    if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
5141      return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5142    if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
5143      return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5144    if (Op1Min == Op0Max) // A >=s B -> A == B if max(A) == min(B)
5145      return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5146    break;
5147  case ICmpInst::ICMP_SLE:
5148    assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
5149    if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
5150      return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5151    if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
5152      return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5153    if (Op1Max == Op0Min) // A <=s B -> A == B if min(A) == max(B)
5154      return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5155    break;
5156  case ICmpInst::ICMP_UGE:
5157    assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
5158    if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
5159      return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5160    if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
5161      return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5162    if (Op1Min == Op0Max) // A >=u B -> A == B if max(A) == min(B)
5163      return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5164    break;
5165  case ICmpInst::ICMP_ULE:
5166    assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
5167    if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
5168      return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5169    if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
5170      return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5171    if (Op1Max == Op0Min) // A <=u B -> A == B if min(A) == max(B)
5172      return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5173    break;
5174  }
5175
5176  // Turn a signed comparison into an unsigned one if both operands are known to
5177  // have the same sign.
5178  if (I.isSigned() &&
5179      ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) ||
5180       (Op0Known.One.isNegative() && Op1Known.One.isNegative())))
5181    return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
5182
5183  return nullptr;
5184}
5185
5186llvm::Optional<std::pair<CmpInst::Predicate, Constant *>>
5187llvm::getFlippedStrictnessPredicateAndConstant(CmpInst::Predicate Pred,
5188                                               Constant *C) {
5189  assert(ICmpInst::isRelational(Pred) && ICmpInst::isIntPredicate(Pred) &&
5190         "Only for relational integer predicates.");
5191
5192  Type *Type = C->getType();
5193  bool IsSigned = ICmpInst::isSigned(Pred);
5194
5195  CmpInst::Predicate UnsignedPred = ICmpInst::getUnsignedPredicate(Pred);
5196  bool WillIncrement =
5197      UnsignedPred == ICmpInst::ICMP_ULE || UnsignedPred == ICmpInst::ICMP_UGT;
5198
5199  // Check if the constant operand can be safely incremented/decremented
5200  // without overflowing/underflowing.
5201  auto ConstantIsOk = [WillIncrement, IsSigned](ConstantInt *C) {
5202    return WillIncrement ? !C->isMaxValue(IsSigned) : !C->isMinValue(IsSigned);
5203  };
5204
5205  Constant *SafeReplacementConstant = nullptr;
5206  if (auto *CI = dyn_cast<ConstantInt>(C)) {
5207    // Bail out if the constant can't be safely incremented/decremented.
5208    if (!ConstantIsOk(CI))
5209      return llvm::None;
5210  } else if (Type->isVectorTy()) {
5211    unsigned NumElts = Type->getVectorNumElements();
5212    for (unsigned i = 0; i != NumElts; ++i) {
5213      Constant *Elt = C->getAggregateElement(i);
5214      if (!Elt)
5215        return llvm::None;
5216
5217      if (isa<UndefValue>(Elt))
5218        continue;
5219
5220      // Bail out if we can't determine if this constant is min/max or if we
5221      // know that this constant is min/max.
5222      auto *CI = dyn_cast<ConstantInt>(Elt);
5223      if (!CI || !ConstantIsOk(CI))
5224        return llvm::None;
5225
5226      if (!SafeReplacementConstant)
5227        SafeReplacementConstant = CI;
5228    }
5229  } else {
5230    // ConstantExpr?
5231    return llvm::None;
5232  }
5233
5234  // It may not be safe to change a compare predicate in the presence of
5235  // undefined elements, so replace those elements with the first safe constant
5236  // that we found.
5237  if (C->containsUndefElement()) {
5238    assert(SafeReplacementConstant && "Replacement constant not set");
5239    C = Constant::replaceUndefsWith(C, SafeReplacementConstant);
5240  }
5241
5242  CmpInst::Predicate NewPred = CmpInst::getFlippedStrictnessPredicate(Pred);
5243
5244  // Increment or decrement the constant.
5245  Constant *OneOrNegOne = ConstantInt::get(Type, WillIncrement ? 1 : -1, true);
5246  Constant *NewC = ConstantExpr::getAdd(C, OneOrNegOne);
5247
5248  return std::make_pair(NewPred, NewC);
5249}
5250
5251/// If we have an icmp le or icmp ge instruction with a constant operand, turn
5252/// it into the appropriate icmp lt or icmp gt instruction. This transform
5253/// allows them to be folded in visitICmpInst.
5254static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) {
5255  ICmpInst::Predicate Pred = I.getPredicate();
5256  if (ICmpInst::isEquality(Pred) || !ICmpInst::isIntPredicate(Pred) ||
5257      isCanonicalPredicate(Pred))
5258    return nullptr;
5259
5260  Value *Op0 = I.getOperand(0);
5261  Value *Op1 = I.getOperand(1);
5262  auto *Op1C = dyn_cast<Constant>(Op1);
5263  if (!Op1C)
5264    return nullptr;
5265
5266  auto FlippedStrictness = getFlippedStrictnessPredicateAndConstant(Pred, Op1C);
5267  if (!FlippedStrictness)
5268    return nullptr;
5269
5270  return new ICmpInst(FlippedStrictness->first, Op0, FlippedStrictness->second);
5271}
5272
5273/// Integer compare with boolean values can always be turned into bitwise ops.
5274static Instruction *canonicalizeICmpBool(ICmpInst &I,
5275                                         InstCombiner::BuilderTy &Builder) {
5276  Value *A = I.getOperand(0), *B = I.getOperand(1);
5277  assert(A->getType()->isIntOrIntVectorTy(1) && "Bools only");
5278
5279  // A boolean compared to true/false can be simplified to Op0/true/false in
5280  // 14 out of the 20 (10 predicates * 2 constants) possible combinations.
5281  // Cases not handled by InstSimplify are always 'not' of Op0.
5282  if (match(B, m_Zero())) {
5283    switch (I.getPredicate()) {
5284      case CmpInst::ICMP_EQ:  // A ==   0 -> !A
5285      case CmpInst::ICMP_ULE: // A <=u  0 -> !A
5286      case CmpInst::ICMP_SGE: // A >=s  0 -> !A
5287        return BinaryOperator::CreateNot(A);
5288      default:
5289        llvm_unreachable("ICmp i1 X, C not simplified as expected.");
5290    }
5291  } else if (match(B, m_One())) {
5292    switch (I.getPredicate()) {
5293      case CmpInst::ICMP_NE:  // A !=  1 -> !A
5294      case CmpInst::ICMP_ULT: // A <u  1 -> !A
5295      case CmpInst::ICMP_SGT: // A >s -1 -> !A
5296        return BinaryOperator::CreateNot(A);
5297      default:
5298        llvm_unreachable("ICmp i1 X, C not simplified as expected.");
5299    }
5300  }
5301
5302  switch (I.getPredicate()) {
5303  default:
5304    llvm_unreachable("Invalid icmp instruction!");
5305  case ICmpInst::ICMP_EQ:
5306    // icmp eq i1 A, B -> ~(A ^ B)
5307    return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
5308
5309  case ICmpInst::ICMP_NE:
5310    // icmp ne i1 A, B -> A ^ B
5311    return BinaryOperator::CreateXor(A, B);
5312
5313  case ICmpInst::ICMP_UGT:
5314    // icmp ugt -> icmp ult
5315    std::swap(A, B);
5316    LLVM_FALLTHROUGH;
5317  case ICmpInst::ICMP_ULT:
5318    // icmp ult i1 A, B -> ~A & B
5319    return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
5320
5321  case ICmpInst::ICMP_SGT:
5322    // icmp sgt -> icmp slt
5323    std::swap(A, B);
5324    LLVM_FALLTHROUGH;
5325  case ICmpInst::ICMP_SLT:
5326    // icmp slt i1 A, B -> A & ~B
5327    return BinaryOperator::CreateAnd(Builder.CreateNot(B), A);
5328
5329  case ICmpInst::ICMP_UGE:
5330    // icmp uge -> icmp ule
5331    std::swap(A, B);
5332    LLVM_FALLTHROUGH;
5333  case ICmpInst::ICMP_ULE:
5334    // icmp ule i1 A, B -> ~A | B
5335    return BinaryOperator::CreateOr(Builder.CreateNot(A), B);
5336
5337  case ICmpInst::ICMP_SGE:
5338    // icmp sge -> icmp sle
5339    std::swap(A, B);
5340    LLVM_FALLTHROUGH;
5341  case ICmpInst::ICMP_SLE:
5342    // icmp sle i1 A, B -> A | ~B
5343    return BinaryOperator::CreateOr(Builder.CreateNot(B), A);
5344  }
5345}
5346
5347// Transform pattern like:
5348//   (1 << Y) u<= X  or  ~(-1 << Y) u<  X  or  ((1 << Y)+(-1)) u<  X
5349//   (1 << Y) u>  X  or  ~(-1 << Y) u>= X  or  ((1 << Y)+(-1)) u>= X
5350// Into:
5351//   (X l>> Y) != 0
5352//   (X l>> Y) == 0
5353static Instruction *foldICmpWithHighBitMask(ICmpInst &Cmp,
5354                                            InstCombiner::BuilderTy &Builder) {
5355  ICmpInst::Predicate Pred, NewPred;
5356  Value *X, *Y;
5357  if (match(&Cmp,
5358            m_c_ICmp(Pred, m_OneUse(m_Shl(m_One(), m_Value(Y))), m_Value(X)))) {
5359    // We want X to be the icmp's second operand, so swap predicate if it isn't.
5360    if (Cmp.getOperand(0) == X)
5361      Pred = Cmp.getSwappedPredicate();
5362
5363    switch (Pred) {
5364    case ICmpInst::ICMP_ULE:
5365      NewPred = ICmpInst::ICMP_NE;
5366      break;
5367    case ICmpInst::ICMP_UGT:
5368      NewPred = ICmpInst::ICMP_EQ;
5369      break;
5370    default:
5371      return nullptr;
5372    }
5373  } else if (match(&Cmp, m_c_ICmp(Pred,
5374                                  m_OneUse(m_CombineOr(
5375                                      m_Not(m_Shl(m_AllOnes(), m_Value(Y))),
5376                                      m_Add(m_Shl(m_One(), m_Value(Y)),
5377                                            m_AllOnes()))),
5378                                  m_Value(X)))) {
5379    // The variant with 'add' is not canonical, (the variant with 'not' is)
5380    // we only get it because it has extra uses, and can't be canonicalized,
5381
5382    // We want X to be the icmp's second operand, so swap predicate if it isn't.
5383    if (Cmp.getOperand(0) == X)
5384      Pred = Cmp.getSwappedPredicate();
5385
5386    switch (Pred) {
5387    case ICmpInst::ICMP_ULT:
5388      NewPred = ICmpInst::ICMP_NE;
5389      break;
5390    case ICmpInst::ICMP_UGE:
5391      NewPred = ICmpInst::ICMP_EQ;
5392      break;
5393    default:
5394      return nullptr;
5395    }
5396  } else
5397    return nullptr;
5398
5399  Value *NewX = Builder.CreateLShr(X, Y, X->getName() + ".highbits");
5400  Constant *Zero = Constant::getNullValue(NewX->getType());
5401  return CmpInst::Create(Instruction::ICmp, NewPred, NewX, Zero);
5402}
5403
5404static Instruction *foldVectorCmp(CmpInst &Cmp,
5405                                  InstCombiner::BuilderTy &Builder) {
5406  // If both arguments of the cmp are shuffles that use the same mask and
5407  // shuffle within a single vector, move the shuffle after the cmp.
5408  Value *LHS = Cmp.getOperand(0), *RHS = Cmp.getOperand(1);
5409  Value *V1, *V2;
5410  Constant *M;
5411  if (match(LHS, m_ShuffleVector(m_Value(V1), m_Undef(), m_Constant(M))) &&
5412      match(RHS, m_ShuffleVector(m_Value(V2), m_Undef(), m_Specific(M))) &&
5413      V1->getType() == V2->getType() &&
5414      (LHS->hasOneUse() || RHS->hasOneUse())) {
5415    // cmp (shuffle V1, M), (shuffle V2, M) --> shuffle (cmp V1, V2), M
5416    CmpInst::Predicate P = Cmp.getPredicate();
5417    Value *NewCmp = isa<ICmpInst>(Cmp) ? Builder.CreateICmp(P, V1, V2)
5418                                       : Builder.CreateFCmp(P, V1, V2);
5419    return new ShuffleVectorInst(NewCmp, UndefValue::get(NewCmp->getType()), M);
5420  }
5421  return nullptr;
5422}
5423
5424// extract(uadd.with.overflow(A, B), 0) ult A
5425//  -> extract(uadd.with.overflow(A, B), 1)
5426static Instruction *foldICmpOfUAddOv(ICmpInst &I) {
5427  CmpInst::Predicate Pred = I.getPredicate();
5428  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5429
5430  Value *UAddOv;
5431  Value *A, *B;
5432  auto UAddOvResultPat = m_ExtractValue<0>(
5433      m_Intrinsic<Intrinsic::uadd_with_overflow>(m_Value(A), m_Value(B)));
5434  if (match(Op0, UAddOvResultPat) &&
5435      ((Pred == ICmpInst::ICMP_ULT && (Op1 == A || Op1 == B)) ||
5436       (Pred == ICmpInst::ICMP_EQ && match(Op1, m_ZeroInt()) &&
5437        (match(A, m_One()) || match(B, m_One()))) ||
5438       (Pred == ICmpInst::ICMP_NE && match(Op1, m_AllOnes()) &&
5439        (match(A, m_AllOnes()) || match(B, m_AllOnes())))))
5440    // extract(uadd.with.overflow(A, B), 0) < A
5441    // extract(uadd.with.overflow(A, 1), 0) == 0
5442    // extract(uadd.with.overflow(A, -1), 0) != -1
5443    UAddOv = cast<ExtractValueInst>(Op0)->getAggregateOperand();
5444  else if (match(Op1, UAddOvResultPat) &&
5445           Pred == ICmpInst::ICMP_UGT && (Op0 == A || Op0 == B))
5446    // A > extract(uadd.with.overflow(A, B), 0)
5447    UAddOv = cast<ExtractValueInst>(Op1)->getAggregateOperand();
5448  else
5449    return nullptr;
5450
5451  return ExtractValueInst::Create(UAddOv, 1);
5452}
5453
5454Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
5455  bool Changed = false;
5456  const SimplifyQuery Q = SQ.getWithInstruction(&I);
5457  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5458  unsigned Op0Cplxity = getComplexity(Op0);
5459  unsigned Op1Cplxity = getComplexity(Op1);
5460
5461  /// Orders the operands of the compare so that they are listed from most
5462  /// complex to least complex.  This puts constants before unary operators,
5463  /// before binary operators.
5464  if (Op0Cplxity < Op1Cplxity ||
5465      (Op0Cplxity == Op1Cplxity && swapMayExposeCSEOpportunities(Op0, Op1))) {
5466    I.swapOperands();
5467    std::swap(Op0, Op1);
5468    Changed = true;
5469  }
5470
5471  if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, Q))
5472    return replaceInstUsesWith(I, V);
5473
5474  // Comparing -val or val with non-zero is the same as just comparing val
5475  // ie, abs(val) != 0 -> val != 0
5476  if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) {
5477    Value *Cond, *SelectTrue, *SelectFalse;
5478    if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue),
5479                            m_Value(SelectFalse)))) {
5480      if (Value *V = dyn_castNegVal(SelectTrue)) {
5481        if (V == SelectFalse)
5482          return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
5483      }
5484      else if (Value *V = dyn_castNegVal(SelectFalse)) {
5485        if (V == SelectTrue)
5486          return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
5487      }
5488    }
5489  }
5490
5491  if (Op0->getType()->isIntOrIntVectorTy(1))
5492    if (Instruction *Res = canonicalizeICmpBool(I, Builder))
5493      return Res;
5494
5495  if (ICmpInst *NewICmp = canonicalizeCmpWithConstant(I))
5496    return NewICmp;
5497
5498  if (Instruction *Res = foldICmpWithConstant(I))
5499    return Res;
5500
5501  if (Instruction *Res = foldICmpWithDominatingICmp(I))
5502    return Res;
5503
5504  if (Instruction *Res = foldICmpBinOp(I, Q))
5505    return Res;
5506
5507  if (Instruction *Res = foldICmpUsingKnownBits(I))
5508    return Res;
5509
5510  // Test if the ICmpInst instruction is used exclusively by a select as
5511  // part of a minimum or maximum operation. If so, refrain from doing
5512  // any other folding. This helps out other analyses which understand
5513  // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
5514  // and CodeGen. And in this case, at least one of the comparison
5515  // operands has at least one user besides the compare (the select),
5516  // which would often largely negate the benefit of folding anyway.
5517  //
5518  // Do the same for the other patterns recognized by matchSelectPattern.
5519  if (I.hasOneUse())
5520    if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
5521      Value *A, *B;
5522      SelectPatternResult SPR = matchSelectPattern(SI, A, B);
5523      if (SPR.Flavor != SPF_UNKNOWN)
5524        return nullptr;
5525    }
5526
5527  // Do this after checking for min/max to prevent infinite looping.
5528  if (Instruction *Res = foldICmpWithZero(I))
5529    return Res;
5530
5531  // FIXME: We only do this after checking for min/max to prevent infinite
5532  // looping caused by a reverse canonicalization of these patterns for min/max.
5533  // FIXME: The organization of folds is a mess. These would naturally go into
5534  // canonicalizeCmpWithConstant(), but we can't move all of the above folds
5535  // down here after the min/max restriction.
5536  ICmpInst::Predicate Pred = I.getPredicate();
5537  const APInt *C;
5538  if (match(Op1, m_APInt(C))) {
5539    // For i32: x >u 2147483647 -> x <s 0  -> true if sign bit set
5540    if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) {
5541      Constant *Zero = Constant::getNullValue(Op0->getType());
5542      return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero);
5543    }
5544
5545    // For i32: x <u 2147483648 -> x >s -1  -> true if sign bit clear
5546    if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) {
5547      Constant *AllOnes = Constant::getAllOnesValue(Op0->getType());
5548      return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes);
5549    }
5550  }
5551
5552  if (Instruction *Res = foldICmpInstWithConstant(I))
5553    return Res;
5554
5555  // Try to match comparison as a sign bit test. Intentionally do this after
5556  // foldICmpInstWithConstant() to potentially let other folds to happen first.
5557  if (Instruction *New = foldSignBitTest(I))
5558    return New;
5559
5560  if (Instruction *Res = foldICmpInstWithConstantNotInt(I))
5561    return Res;
5562
5563  // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
5564  if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0))
5565    if (Instruction *NI = foldGEPICmp(GEP, Op1, I.getPredicate(), I))
5566      return NI;
5567  if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1))
5568    if (Instruction *NI = foldGEPICmp(GEP, Op0,
5569                           ICmpInst::getSwappedPredicate(I.getPredicate()), I))
5570      return NI;
5571
5572  // Try to optimize equality comparisons against alloca-based pointers.
5573  if (Op0->getType()->isPointerTy() && I.isEquality()) {
5574    assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?");
5575    if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op0, DL)))
5576      if (Instruction *New = foldAllocaCmp(I, Alloca, Op1))
5577        return New;
5578    if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op1, DL)))
5579      if (Instruction *New = foldAllocaCmp(I, Alloca, Op0))
5580        return New;
5581  }
5582
5583  if (Instruction *Res = foldICmpBitCast(I, Builder))
5584    return Res;
5585
5586  if (Instruction *R = foldICmpWithCastOp(I))
5587    return R;
5588
5589  if (Instruction *Res = foldICmpWithMinMax(I))
5590    return Res;
5591
5592  {
5593    Value *A, *B;
5594    // Transform (A & ~B) == 0 --> (A & B) != 0
5595    // and       (A & ~B) != 0 --> (A & B) == 0
5596    // if A is a power of 2.
5597    if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
5598        match(Op1, m_Zero()) &&
5599        isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality())
5600      return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(A, B),
5601                          Op1);
5602
5603    // ~X < ~Y --> Y < X
5604    // ~X < C -->  X > ~C
5605    if (match(Op0, m_Not(m_Value(A)))) {
5606      if (match(Op1, m_Not(m_Value(B))))
5607        return new ICmpInst(I.getPredicate(), B, A);
5608
5609      const APInt *C;
5610      if (match(Op1, m_APInt(C)))
5611        return new ICmpInst(I.getSwappedPredicate(), A,
5612                            ConstantInt::get(Op1->getType(), ~(*C)));
5613    }
5614
5615    Instruction *AddI = nullptr;
5616    if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B),
5617                                     m_Instruction(AddI))) &&
5618        isa<IntegerType>(A->getType())) {
5619      Value *Result;
5620      Constant *Overflow;
5621      if (OptimizeOverflowCheck(Instruction::Add, /*Signed*/false, A, B,
5622                                *AddI, Result, Overflow)) {
5623        replaceInstUsesWith(*AddI, Result);
5624        return replaceInstUsesWith(I, Overflow);
5625      }
5626    }
5627
5628    // (zext a) * (zext b)  --> llvm.umul.with.overflow.
5629    if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
5630      if (Instruction *R = processUMulZExtIdiom(I, Op0, Op1, *this))
5631        return R;
5632    }
5633    if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
5634      if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this))
5635        return R;
5636    }
5637  }
5638
5639  if (Instruction *Res = foldICmpEquality(I))
5640    return Res;
5641
5642  if (Instruction *Res = foldICmpOfUAddOv(I))
5643    return Res;
5644
5645  // The 'cmpxchg' instruction returns an aggregate containing the old value and
5646  // an i1 which indicates whether or not we successfully did the swap.
5647  //
5648  // Replace comparisons between the old value and the expected value with the
5649  // indicator that 'cmpxchg' returns.
5650  //
5651  // N.B.  This transform is only valid when the 'cmpxchg' is not permitted to
5652  // spuriously fail.  In those cases, the old value may equal the expected
5653  // value but it is possible for the swap to not occur.
5654  if (I.getPredicate() == ICmpInst::ICMP_EQ)
5655    if (auto *EVI = dyn_cast<ExtractValueInst>(Op0))
5656      if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
5657        if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
5658            !ACXI->isWeak())
5659          return ExtractValueInst::Create(ACXI, 1);
5660
5661  {
5662    Value *X;
5663    const APInt *C;
5664    // icmp X+Cst, X
5665    if (match(Op0, m_Add(m_Value(X), m_APInt(C))) && Op1 == X)
5666      return foldICmpAddOpConst(X, *C, I.getPredicate());
5667
5668    // icmp X, X+Cst
5669    if (match(Op1, m_Add(m_Value(X), m_APInt(C))) && Op0 == X)
5670      return foldICmpAddOpConst(X, *C, I.getSwappedPredicate());
5671  }
5672
5673  if (Instruction *Res = foldICmpWithHighBitMask(I, Builder))
5674    return Res;
5675
5676  if (I.getType()->isVectorTy())
5677    if (Instruction *Res = foldVectorCmp(I, Builder))
5678      return Res;
5679
5680  return Changed ? &I : nullptr;
5681}
5682
5683/// Fold fcmp ([us]itofp x, cst) if possible.
5684Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
5685                                                Constant *RHSC) {
5686  if (!isa<ConstantFP>(RHSC)) return nullptr;
5687  const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
5688
5689  // Get the width of the mantissa.  We don't want to hack on conversions that
5690  // might lose information from the integer, e.g. "i64 -> float"
5691  int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
5692  if (MantissaWidth == -1) return nullptr;  // Unknown.
5693
5694  IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
5695
5696  bool LHSUnsigned = isa<UIToFPInst>(LHSI);
5697
5698  if (I.isEquality()) {
5699    FCmpInst::Predicate P = I.getPredicate();
5700    bool IsExact = false;
5701    APSInt RHSCvt(IntTy->getBitWidth(), LHSUnsigned);
5702    RHS.convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact);
5703
5704    // If the floating point constant isn't an integer value, we know if we will
5705    // ever compare equal / not equal to it.
5706    if (!IsExact) {
5707      // TODO: Can never be -0.0 and other non-representable values
5708      APFloat RHSRoundInt(RHS);
5709      RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven);
5710      if (RHS.compare(RHSRoundInt) != APFloat::cmpEqual) {
5711        if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ)
5712          return replaceInstUsesWith(I, Builder.getFalse());
5713
5714        assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE);
5715        return replaceInstUsesWith(I, Builder.getTrue());
5716      }
5717    }
5718
5719    // TODO: If the constant is exactly representable, is it always OK to do
5720    // equality compares as integer?
5721  }
5722
5723  // Check to see that the input is converted from an integer type that is small
5724  // enough that preserves all bits.  TODO: check here for "known" sign bits.
5725  // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
5726  unsigned InputSize = IntTy->getScalarSizeInBits();
5727
5728  // Following test does NOT adjust InputSize downwards for signed inputs,
5729  // because the most negative value still requires all the mantissa bits
5730  // to distinguish it from one less than that value.
5731  if ((int)InputSize > MantissaWidth) {
5732    // Conversion would lose accuracy. Check if loss can impact comparison.
5733    int Exp = ilogb(RHS);
5734    if (Exp == APFloat::IEK_Inf) {
5735      int MaxExponent = ilogb(APFloat::getLargest(RHS.getSemantics()));
5736      if (MaxExponent < (int)InputSize - !LHSUnsigned)
5737        // Conversion could create infinity.
5738        return nullptr;
5739    } else {
5740      // Note that if RHS is zero or NaN, then Exp is negative
5741      // and first condition is trivially false.
5742      if (MantissaWidth <= Exp && Exp <= (int)InputSize - !LHSUnsigned)
5743        // Conversion could affect comparison.
5744        return nullptr;
5745    }
5746  }
5747
5748  // Otherwise, we can potentially simplify the comparison.  We know that it
5749  // will always come through as an integer value and we know the constant is
5750  // not a NAN (it would have been previously simplified).
5751  assert(!RHS.isNaN() && "NaN comparison not already folded!");
5752
5753  ICmpInst::Predicate Pred;
5754  switch (I.getPredicate()) {
5755  default: llvm_unreachable("Unexpected predicate!");
5756  case FCmpInst::FCMP_UEQ:
5757  case FCmpInst::FCMP_OEQ:
5758    Pred = ICmpInst::ICMP_EQ;
5759    break;
5760  case FCmpInst::FCMP_UGT:
5761  case FCmpInst::FCMP_OGT:
5762    Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
5763    break;
5764  case FCmpInst::FCMP_UGE:
5765  case FCmpInst::FCMP_OGE:
5766    Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
5767    break;
5768  case FCmpInst::FCMP_ULT:
5769  case FCmpInst::FCMP_OLT:
5770    Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
5771    break;
5772  case FCmpInst::FCMP_ULE:
5773  case FCmpInst::FCMP_OLE:
5774    Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
5775    break;
5776  case FCmpInst::FCMP_UNE:
5777  case FCmpInst::FCMP_ONE:
5778    Pred = ICmpInst::ICMP_NE;
5779    break;
5780  case FCmpInst::FCMP_ORD:
5781    return replaceInstUsesWith(I, Builder.getTrue());
5782  case FCmpInst::FCMP_UNO:
5783    return replaceInstUsesWith(I, Builder.getFalse());
5784  }
5785
5786  // Now we know that the APFloat is a normal number, zero or inf.
5787
5788  // See if the FP constant is too large for the integer.  For example,
5789  // comparing an i8 to 300.0.
5790  unsigned IntWidth = IntTy->getScalarSizeInBits();
5791
5792  if (!LHSUnsigned) {
5793    // If the RHS value is > SignedMax, fold the comparison.  This handles +INF
5794    // and large values.
5795    APFloat SMax(RHS.getSemantics());
5796    SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
5797                          APFloat::rmNearestTiesToEven);
5798    if (SMax.compare(RHS) == APFloat::cmpLessThan) {  // smax < 13123.0
5799      if (Pred == ICmpInst::ICMP_NE  || Pred == ICmpInst::ICMP_SLT ||
5800          Pred == ICmpInst::ICMP_SLE)
5801        return replaceInstUsesWith(I, Builder.getTrue());
5802      return replaceInstUsesWith(I, Builder.getFalse());
5803    }
5804  } else {
5805    // If the RHS value is > UnsignedMax, fold the comparison. This handles
5806    // +INF and large values.
5807    APFloat UMax(RHS.getSemantics());
5808    UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
5809                          APFloat::rmNearestTiesToEven);
5810    if (UMax.compare(RHS) == APFloat::cmpLessThan) {  // umax < 13123.0
5811      if (Pred == ICmpInst::ICMP_NE  || Pred == ICmpInst::ICMP_ULT ||
5812          Pred == ICmpInst::ICMP_ULE)
5813        return replaceInstUsesWith(I, Builder.getTrue());
5814      return replaceInstUsesWith(I, Builder.getFalse());
5815    }
5816  }
5817
5818  if (!LHSUnsigned) {
5819    // See if the RHS value is < SignedMin.
5820    APFloat SMin(RHS.getSemantics());
5821    SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
5822                          APFloat::rmNearestTiesToEven);
5823    if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0
5824      if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
5825          Pred == ICmpInst::ICMP_SGE)
5826        return replaceInstUsesWith(I, Builder.getTrue());
5827      return replaceInstUsesWith(I, Builder.getFalse());
5828    }
5829  } else {
5830    // See if the RHS value is < UnsignedMin.
5831    APFloat SMin(RHS.getSemantics());
5832    SMin.convertFromAPInt(APInt::getMinValue(IntWidth), true,
5833                          APFloat::rmNearestTiesToEven);
5834    if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // umin > 12312.0
5835      if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
5836          Pred == ICmpInst::ICMP_UGE)
5837        return replaceInstUsesWith(I, Builder.getTrue());
5838      return replaceInstUsesWith(I, Builder.getFalse());
5839    }
5840  }
5841
5842  // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
5843  // [0, UMAX], but it may still be fractional.  See if it is fractional by
5844  // casting the FP value to the integer value and back, checking for equality.
5845  // Don't do this for zero, because -0.0 is not fractional.
5846  Constant *RHSInt = LHSUnsigned
5847    ? ConstantExpr::getFPToUI(RHSC, IntTy)
5848    : ConstantExpr::getFPToSI(RHSC, IntTy);
5849  if (!RHS.isZero()) {
5850    bool Equal = LHSUnsigned
5851      ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC
5852      : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC;
5853    if (!Equal) {
5854      // If we had a comparison against a fractional value, we have to adjust
5855      // the compare predicate and sometimes the value.  RHSC is rounded towards
5856      // zero at this point.
5857      switch (Pred) {
5858      default: llvm_unreachable("Unexpected integer comparison!");
5859      case ICmpInst::ICMP_NE:  // (float)int != 4.4   --> true
5860        return replaceInstUsesWith(I, Builder.getTrue());
5861      case ICmpInst::ICMP_EQ:  // (float)int == 4.4   --> false
5862        return replaceInstUsesWith(I, Builder.getFalse());
5863      case ICmpInst::ICMP_ULE:
5864        // (float)int <= 4.4   --> int <= 4
5865        // (float)int <= -4.4  --> false
5866        if (RHS.isNegative())
5867          return replaceInstUsesWith(I, Builder.getFalse());
5868        break;
5869      case ICmpInst::ICMP_SLE:
5870        // (float)int <= 4.4   --> int <= 4
5871        // (float)int <= -4.4  --> int < -4
5872        if (RHS.isNegative())
5873          Pred = ICmpInst::ICMP_SLT;
5874        break;
5875      case ICmpInst::ICMP_ULT:
5876        // (float)int < -4.4   --> false
5877        // (float)int < 4.4    --> int <= 4
5878        if (RHS.isNegative())
5879          return replaceInstUsesWith(I, Builder.getFalse());
5880        Pred = ICmpInst::ICMP_ULE;
5881        break;
5882      case ICmpInst::ICMP_SLT:
5883        // (float)int < -4.4   --> int < -4
5884        // (float)int < 4.4    --> int <= 4
5885        if (!RHS.isNegative())
5886          Pred = ICmpInst::ICMP_SLE;
5887        break;
5888      case ICmpInst::ICMP_UGT:
5889        // (float)int > 4.4    --> int > 4
5890        // (float)int > -4.4   --> true
5891        if (RHS.isNegative())
5892          return replaceInstUsesWith(I, Builder.getTrue());
5893        break;
5894      case ICmpInst::ICMP_SGT:
5895        // (float)int > 4.4    --> int > 4
5896        // (float)int > -4.4   --> int >= -4
5897        if (RHS.isNegative())
5898          Pred = ICmpInst::ICMP_SGE;
5899        break;
5900      case ICmpInst::ICMP_UGE:
5901        // (float)int >= -4.4   --> true
5902        // (float)int >= 4.4    --> int > 4
5903        if (RHS.isNegative())
5904          return replaceInstUsesWith(I, Builder.getTrue());
5905        Pred = ICmpInst::ICMP_UGT;
5906        break;
5907      case ICmpInst::ICMP_SGE:
5908        // (float)int >= -4.4   --> int >= -4
5909        // (float)int >= 4.4    --> int > 4
5910        if (!RHS.isNegative())
5911          Pred = ICmpInst::ICMP_SGT;
5912        break;
5913      }
5914    }
5915  }
5916
5917  // Lower this FP comparison into an appropriate integer version of the
5918  // comparison.
5919  return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt);
5920}
5921
5922/// Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
5923static Instruction *foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI,
5924                                              Constant *RHSC) {
5925  // When C is not 0.0 and infinities are not allowed:
5926  // (C / X) < 0.0 is a sign-bit test of X
5927  // (C / X) < 0.0 --> X < 0.0 (if C is positive)
5928  // (C / X) < 0.0 --> X > 0.0 (if C is negative, swap the predicate)
5929  //
5930  // Proof:
5931  // Multiply (C / X) < 0.0 by X * X / C.
5932  // - X is non zero, if it is the flag 'ninf' is violated.
5933  // - C defines the sign of X * X * C. Thus it also defines whether to swap
5934  //   the predicate. C is also non zero by definition.
5935  //
5936  // Thus X * X / C is non zero and the transformation is valid. [qed]
5937
5938  FCmpInst::Predicate Pred = I.getPredicate();
5939
5940  // Check that predicates are valid.
5941  if ((Pred != FCmpInst::FCMP_OGT) && (Pred != FCmpInst::FCMP_OLT) &&
5942      (Pred != FCmpInst::FCMP_OGE) && (Pred != FCmpInst::FCMP_OLE))
5943    return nullptr;
5944
5945  // Check that RHS operand is zero.
5946  if (!match(RHSC, m_AnyZeroFP()))
5947    return nullptr;
5948
5949  // Check fastmath flags ('ninf').
5950  if (!LHSI->hasNoInfs() || !I.hasNoInfs())
5951    return nullptr;
5952
5953  // Check the properties of the dividend. It must not be zero to avoid a
5954  // division by zero (see Proof).
5955  const APFloat *C;
5956  if (!match(LHSI->getOperand(0), m_APFloat(C)))
5957    return nullptr;
5958
5959  if (C->isZero())
5960    return nullptr;
5961
5962  // Get swapped predicate if necessary.
5963  if (C->isNegative())
5964    Pred = I.getSwappedPredicate();
5965
5966  return new FCmpInst(Pred, LHSI->getOperand(1), RHSC, "", &I);
5967}
5968
5969/// Optimize fabs(X) compared with zero.
5970static Instruction *foldFabsWithFcmpZero(FCmpInst &I) {
5971  Value *X;
5972  if (!match(I.getOperand(0), m_Intrinsic<Intrinsic::fabs>(m_Value(X))) ||
5973      !match(I.getOperand(1), m_PosZeroFP()))
5974    return nullptr;
5975
5976  auto replacePredAndOp0 = [](FCmpInst *I, FCmpInst::Predicate P, Value *X) {
5977    I->setPredicate(P);
5978    I->setOperand(0, X);
5979    return I;
5980  };
5981
5982  switch (I.getPredicate()) {
5983  case FCmpInst::FCMP_UGE:
5984  case FCmpInst::FCMP_OLT:
5985    // fabs(X) >= 0.0 --> true
5986    // fabs(X) <  0.0 --> false
5987    llvm_unreachable("fcmp should have simplified");
5988
5989  case FCmpInst::FCMP_OGT:
5990    // fabs(X) > 0.0 --> X != 0.0
5991    return replacePredAndOp0(&I, FCmpInst::FCMP_ONE, X);
5992
5993  case FCmpInst::FCMP_UGT:
5994    // fabs(X) u> 0.0 --> X u!= 0.0
5995    return replacePredAndOp0(&I, FCmpInst::FCMP_UNE, X);
5996
5997  case FCmpInst::FCMP_OLE:
5998    // fabs(X) <= 0.0 --> X == 0.0
5999    return replacePredAndOp0(&I, FCmpInst::FCMP_OEQ, X);
6000
6001  case FCmpInst::FCMP_ULE:
6002    // fabs(X) u<= 0.0 --> X u== 0.0
6003    return replacePredAndOp0(&I, FCmpInst::FCMP_UEQ, X);
6004
6005  case FCmpInst::FCMP_OGE:
6006    // fabs(X) >= 0.0 --> !isnan(X)
6007    assert(!I.hasNoNaNs() && "fcmp should have simplified");
6008    return replacePredAndOp0(&I, FCmpInst::FCMP_ORD, X);
6009
6010  case FCmpInst::FCMP_ULT:
6011    // fabs(X) u< 0.0 --> isnan(X)
6012    assert(!I.hasNoNaNs() && "fcmp should have simplified");
6013    return replacePredAndOp0(&I, FCmpInst::FCMP_UNO, X);
6014
6015  case FCmpInst::FCMP_OEQ:
6016  case FCmpInst::FCMP_UEQ:
6017  case FCmpInst::FCMP_ONE:
6018  case FCmpInst::FCMP_UNE:
6019  case FCmpInst::FCMP_ORD:
6020  case FCmpInst::FCMP_UNO:
6021    // Look through the fabs() because it doesn't change anything but the sign.
6022    // fabs(X) == 0.0 --> X == 0.0,
6023    // fabs(X) != 0.0 --> X != 0.0
6024    // isnan(fabs(X)) --> isnan(X)
6025    // !isnan(fabs(X) --> !isnan(X)
6026    return replacePredAndOp0(&I, I.getPredicate(), X);
6027
6028  default:
6029    return nullptr;
6030  }
6031}
6032
6033Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
6034  bool Changed = false;
6035
6036  /// Orders the operands of the compare so that they are listed from most
6037  /// complex to least complex.  This puts constants before unary operators,
6038  /// before binary operators.
6039  if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
6040    I.swapOperands();
6041    Changed = true;
6042  }
6043
6044  const CmpInst::Predicate Pred = I.getPredicate();
6045  Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6046  if (Value *V = SimplifyFCmpInst(Pred, Op0, Op1, I.getFastMathFlags(),
6047                                  SQ.getWithInstruction(&I)))
6048    return replaceInstUsesWith(I, V);
6049
6050  // Simplify 'fcmp pred X, X'
6051  Type *OpType = Op0->getType();
6052  assert(OpType == Op1->getType() && "fcmp with different-typed operands?");
6053  if (Op0 == Op1) {
6054    switch (Pred) {
6055      default: break;
6056    case FCmpInst::FCMP_UNO:    // True if unordered: isnan(X) | isnan(Y)
6057    case FCmpInst::FCMP_ULT:    // True if unordered or less than
6058    case FCmpInst::FCMP_UGT:    // True if unordered or greater than
6059    case FCmpInst::FCMP_UNE:    // True if unordered or not equal
6060      // Canonicalize these to be 'fcmp uno %X, 0.0'.
6061      I.setPredicate(FCmpInst::FCMP_UNO);
6062      I.setOperand(1, Constant::getNullValue(OpType));
6063      return &I;
6064
6065    case FCmpInst::FCMP_ORD:    // True if ordered (no nans)
6066    case FCmpInst::FCMP_OEQ:    // True if ordered and equal
6067    case FCmpInst::FCMP_OGE:    // True if ordered and greater than or equal
6068    case FCmpInst::FCMP_OLE:    // True if ordered and less than or equal
6069      // Canonicalize these to be 'fcmp ord %X, 0.0'.
6070      I.setPredicate(FCmpInst::FCMP_ORD);
6071      I.setOperand(1, Constant::getNullValue(OpType));
6072      return &I;
6073    }
6074  }
6075
6076  // If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand,
6077  // then canonicalize the operand to 0.0.
6078  if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
6079    if (!match(Op0, m_PosZeroFP()) && isKnownNeverNaN(Op0, &TLI)) {
6080      I.setOperand(0, ConstantFP::getNullValue(OpType));
6081      return &I;
6082    }
6083    if (!match(Op1, m_PosZeroFP()) && isKnownNeverNaN(Op1, &TLI)) {
6084      I.setOperand(1, ConstantFP::getNullValue(OpType));
6085      return &I;
6086    }
6087  }
6088
6089  // fcmp pred (fneg X), (fneg Y) -> fcmp swap(pred) X, Y
6090  Value *X, *Y;
6091  if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
6092    return new FCmpInst(I.getSwappedPredicate(), X, Y, "", &I);
6093
6094  // Test if the FCmpInst instruction is used exclusively by a select as
6095  // part of a minimum or maximum operation. If so, refrain from doing
6096  // any other folding. This helps out other analyses which understand
6097  // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
6098  // and CodeGen. And in this case, at least one of the comparison
6099  // operands has at least one user besides the compare (the select),
6100  // which would often largely negate the benefit of folding anyway.
6101  if (I.hasOneUse())
6102    if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
6103      Value *A, *B;
6104      SelectPatternResult SPR = matchSelectPattern(SI, A, B);
6105      if (SPR.Flavor != SPF_UNKNOWN)
6106        return nullptr;
6107    }
6108
6109  // The sign of 0.0 is ignored by fcmp, so canonicalize to +0.0:
6110  // fcmp Pred X, -0.0 --> fcmp Pred X, 0.0
6111  if (match(Op1, m_AnyZeroFP()) && !match(Op1, m_PosZeroFP())) {
6112    I.setOperand(1, ConstantFP::getNullValue(OpType));
6113    return &I;
6114  }
6115
6116  // Handle fcmp with instruction LHS and constant RHS.
6117  Instruction *LHSI;
6118  Constant *RHSC;
6119  if (match(Op0, m_Instruction(LHSI)) && match(Op1, m_Constant(RHSC))) {
6120    switch (LHSI->getOpcode()) {
6121    case Instruction::PHI:
6122      // Only fold fcmp into the PHI if the phi and fcmp are in the same
6123      // block.  If in the same block, we're encouraging jump threading.  If
6124      // not, we are just pessimizing the code by making an i1 phi.
6125      if (LHSI->getParent() == I.getParent())
6126        if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
6127          return NV;
6128      break;
6129    case Instruction::SIToFP:
6130    case Instruction::UIToFP:
6131      if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC))
6132        return NV;
6133      break;
6134    case Instruction::FDiv:
6135      if (Instruction *NV = foldFCmpReciprocalAndZero(I, LHSI, RHSC))
6136        return NV;
6137      break;
6138    case Instruction::Load:
6139      if (auto *GEP = dyn_cast<GetElementPtrInst>(LHSI->getOperand(0)))
6140        if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
6141          if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
6142              !cast<LoadInst>(LHSI)->isVolatile())
6143            if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
6144              return Res;
6145      break;
6146  }
6147  }
6148
6149  if (Instruction *R = foldFabsWithFcmpZero(I))
6150    return R;
6151
6152  if (match(Op0, m_FNeg(m_Value(X)))) {
6153    // fcmp pred (fneg X), C --> fcmp swap(pred) X, -C
6154    Constant *C;
6155    if (match(Op1, m_Constant(C))) {
6156      Constant *NegC = ConstantExpr::getFNeg(C);
6157      return new FCmpInst(I.getSwappedPredicate(), X, NegC, "", &I);
6158    }
6159  }
6160
6161  if (match(Op0, m_FPExt(m_Value(X)))) {
6162    // fcmp (fpext X), (fpext Y) -> fcmp X, Y
6163    if (match(Op1, m_FPExt(m_Value(Y))) && X->getType() == Y->getType())
6164      return new FCmpInst(Pred, X, Y, "", &I);
6165
6166    // fcmp (fpext X), C -> fcmp X, (fptrunc C) if fptrunc is lossless
6167    const APFloat *C;
6168    if (match(Op1, m_APFloat(C))) {
6169      const fltSemantics &FPSem =
6170          X->getType()->getScalarType()->getFltSemantics();
6171      bool Lossy;
6172      APFloat TruncC = *C;
6173      TruncC.convert(FPSem, APFloat::rmNearestTiesToEven, &Lossy);
6174
6175      // Avoid lossy conversions and denormals.
6176      // Zero is a special case that's OK to convert.
6177      APFloat Fabs = TruncC;
6178      Fabs.clearSign();
6179      if (!Lossy &&
6180          ((Fabs.compare(APFloat::getSmallestNormalized(FPSem)) !=
6181            APFloat::cmpLessThan) || Fabs.isZero())) {
6182        Constant *NewC = ConstantFP::get(X->getType(), TruncC);
6183        return new FCmpInst(Pred, X, NewC, "", &I);
6184      }
6185    }
6186  }
6187
6188  if (I.getType()->isVectorTy())
6189    if (Instruction *Res = foldVectorCmp(I, Builder))
6190      return Res;
6191
6192  return Changed ? &I : nullptr;
6193}
6194