1//===- ConstantFold.cpp - LLVM constant folder ----------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements folding of constants for LLVM.  This implements the
10// (internal) ConstantFold.h interface, which is used by the
11// ConstantExpr::get* methods to automatically fold constants when possible.
12//
13// The current constant folding implementation is implemented in two pieces: the
14// pieces that don't need DataLayout, and the pieces that do. This is to avoid
15// a dependence in IR on Target.
16//
17//===----------------------------------------------------------------------===//
18
19#include "ConstantFold.h"
20#include "llvm/ADT/APSInt.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/IR/Constants.h"
23#include "llvm/IR/DerivedTypes.h"
24#include "llvm/IR/Function.h"
25#include "llvm/IR/GetElementPtrTypeIterator.h"
26#include "llvm/IR/GlobalAlias.h"
27#include "llvm/IR/GlobalVariable.h"
28#include "llvm/IR/Instructions.h"
29#include "llvm/IR/Module.h"
30#include "llvm/IR/Operator.h"
31#include "llvm/IR/PatternMatch.h"
32#include "llvm/Support/ErrorHandling.h"
33#include "llvm/Support/ManagedStatic.h"
34#include "llvm/Support/MathExtras.h"
35using namespace llvm;
36using namespace llvm::PatternMatch;
37
38//===----------------------------------------------------------------------===//
39//                ConstantFold*Instruction Implementations
40//===----------------------------------------------------------------------===//
41
42/// Convert the specified vector Constant node to the specified vector type.
43/// At this point, we know that the elements of the input vector constant are
44/// all simple integer or FP values.
45static Constant *BitCastConstantVector(Constant *CV, VectorType *DstTy) {
46
47  if (CV->isAllOnesValue()) return Constant::getAllOnesValue(DstTy);
48  if (CV->isNullValue()) return Constant::getNullValue(DstTy);
49
50  // Do not iterate on scalable vector. The num of elements is unknown at
51  // compile-time.
52  if (isa<ScalableVectorType>(DstTy))
53    return nullptr;
54
55  // If this cast changes element count then we can't handle it here:
56  // doing so requires endianness information.  This should be handled by
57  // Analysis/ConstantFolding.cpp
58  unsigned NumElts = cast<FixedVectorType>(DstTy)->getNumElements();
59  if (NumElts != cast<FixedVectorType>(CV->getType())->getNumElements())
60    return nullptr;
61
62  Type *DstEltTy = DstTy->getElementType();
63  // Fast path for splatted constants.
64  if (Constant *Splat = CV->getSplatValue()) {
65    return ConstantVector::getSplat(DstTy->getElementCount(),
66                                    ConstantExpr::getBitCast(Splat, DstEltTy));
67  }
68
69  SmallVector<Constant*, 16> Result;
70  Type *Ty = IntegerType::get(CV->getContext(), 32);
71  for (unsigned i = 0; i != NumElts; ++i) {
72    Constant *C =
73      ConstantExpr::getExtractElement(CV, ConstantInt::get(Ty, i));
74    C = ConstantExpr::getBitCast(C, DstEltTy);
75    Result.push_back(C);
76  }
77
78  return ConstantVector::get(Result);
79}
80
81/// This function determines which opcode to use to fold two constant cast
82/// expressions together. It uses CastInst::isEliminableCastPair to determine
83/// the opcode. Consequently its just a wrapper around that function.
84/// Determine if it is valid to fold a cast of a cast
85static unsigned
86foldConstantCastPair(
87  unsigned opc,          ///< opcode of the second cast constant expression
88  ConstantExpr *Op,      ///< the first cast constant expression
89  Type *DstTy            ///< destination type of the first cast
90) {
91  assert(Op && Op->isCast() && "Can't fold cast of cast without a cast!");
92  assert(DstTy && DstTy->isFirstClassType() && "Invalid cast destination type");
93  assert(CastInst::isCast(opc) && "Invalid cast opcode");
94
95  // The types and opcodes for the two Cast constant expressions
96  Type *SrcTy = Op->getOperand(0)->getType();
97  Type *MidTy = Op->getType();
98  Instruction::CastOps firstOp = Instruction::CastOps(Op->getOpcode());
99  Instruction::CastOps secondOp = Instruction::CastOps(opc);
100
101  // Assume that pointers are never more than 64 bits wide, and only use this
102  // for the middle type. Otherwise we could end up folding away illegal
103  // bitcasts between address spaces with different sizes.
104  IntegerType *FakeIntPtrTy = Type::getInt64Ty(DstTy->getContext());
105
106  // Let CastInst::isEliminableCastPair do the heavy lifting.
107  return CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, DstTy,
108                                        nullptr, FakeIntPtrTy, nullptr);
109}
110
111static Constant *FoldBitCast(Constant *V, Type *DestTy) {
112  Type *SrcTy = V->getType();
113  if (SrcTy == DestTy)
114    return V; // no-op cast
115
116  // Check to see if we are casting a pointer to an aggregate to a pointer to
117  // the first element.  If so, return the appropriate GEP instruction.
118  if (PointerType *PTy = dyn_cast<PointerType>(V->getType()))
119    if (PointerType *DPTy = dyn_cast<PointerType>(DestTy))
120      if (PTy->getAddressSpace() == DPTy->getAddressSpace()
121          && PTy->getElementType()->isSized()) {
122        SmallVector<Value*, 8> IdxList;
123        Value *Zero =
124          Constant::getNullValue(Type::getInt32Ty(DPTy->getContext()));
125        IdxList.push_back(Zero);
126        Type *ElTy = PTy->getElementType();
127        while (ElTy && ElTy != DPTy->getElementType()) {
128          ElTy = GetElementPtrInst::getTypeAtIndex(ElTy, (uint64_t)0);
129          IdxList.push_back(Zero);
130        }
131
132        if (ElTy == DPTy->getElementType())
133          // This GEP is inbounds because all indices are zero.
134          return ConstantExpr::getInBoundsGetElementPtr(PTy->getElementType(),
135                                                        V, IdxList);
136      }
137
138  // Handle casts from one vector constant to another.  We know that the src
139  // and dest type have the same size (otherwise its an illegal cast).
140  if (VectorType *DestPTy = dyn_cast<VectorType>(DestTy)) {
141    if (VectorType *SrcTy = dyn_cast<VectorType>(V->getType())) {
142      assert(DestPTy->getPrimitiveSizeInBits() ==
143                 SrcTy->getPrimitiveSizeInBits() &&
144             "Not cast between same sized vectors!");
145      SrcTy = nullptr;
146      // First, check for null.  Undef is already handled.
147      if (isa<ConstantAggregateZero>(V))
148        return Constant::getNullValue(DestTy);
149
150      // Handle ConstantVector and ConstantAggregateVector.
151      return BitCastConstantVector(V, DestPTy);
152    }
153
154    // Canonicalize scalar-to-vector bitcasts into vector-to-vector bitcasts
155    // This allows for other simplifications (although some of them
156    // can only be handled by Analysis/ConstantFolding.cpp).
157    if (isa<ConstantInt>(V) || isa<ConstantFP>(V))
158      return ConstantExpr::getBitCast(ConstantVector::get(V), DestPTy);
159  }
160
161  // Finally, implement bitcast folding now.   The code below doesn't handle
162  // bitcast right.
163  if (isa<ConstantPointerNull>(V))  // ptr->ptr cast.
164    return ConstantPointerNull::get(cast<PointerType>(DestTy));
165
166  // Handle integral constant input.
167  if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
168    if (DestTy->isIntegerTy())
169      // Integral -> Integral. This is a no-op because the bit widths must
170      // be the same. Consequently, we just fold to V.
171      return V;
172
173    // See note below regarding the PPC_FP128 restriction.
174    if (DestTy->isFloatingPointTy() && !DestTy->isPPC_FP128Ty())
175      return ConstantFP::get(DestTy->getContext(),
176                             APFloat(DestTy->getFltSemantics(),
177                                     CI->getValue()));
178
179    // Otherwise, can't fold this (vector?)
180    return nullptr;
181  }
182
183  // Handle ConstantFP input: FP -> Integral.
184  if (ConstantFP *FP = dyn_cast<ConstantFP>(V)) {
185    // PPC_FP128 is really the sum of two consecutive doubles, where the first
186    // double is always stored first in memory, regardless of the target
187    // endianness. The memory layout of i128, however, depends on the target
188    // endianness, and so we can't fold this without target endianness
189    // information. This should instead be handled by
190    // Analysis/ConstantFolding.cpp
191    if (FP->getType()->isPPC_FP128Ty())
192      return nullptr;
193
194    // Make sure dest type is compatible with the folded integer constant.
195    if (!DestTy->isIntegerTy())
196      return nullptr;
197
198    return ConstantInt::get(FP->getContext(),
199                            FP->getValueAPF().bitcastToAPInt());
200  }
201
202  return nullptr;
203}
204
205
206/// V is an integer constant which only has a subset of its bytes used.
207/// The bytes used are indicated by ByteStart (which is the first byte used,
208/// counting from the least significant byte) and ByteSize, which is the number
209/// of bytes used.
210///
211/// This function analyzes the specified constant to see if the specified byte
212/// range can be returned as a simplified constant.  If so, the constant is
213/// returned, otherwise null is returned.
214static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
215                                      unsigned ByteSize) {
216  assert(C->getType()->isIntegerTy() &&
217         (cast<IntegerType>(C->getType())->getBitWidth() & 7) == 0 &&
218         "Non-byte sized integer input");
219  unsigned CSize = cast<IntegerType>(C->getType())->getBitWidth()/8;
220  assert(ByteSize && "Must be accessing some piece");
221  assert(ByteStart+ByteSize <= CSize && "Extracting invalid piece from input");
222  assert(ByteSize != CSize && "Should not extract everything");
223
224  // Constant Integers are simple.
225  if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
226    APInt V = CI->getValue();
227    if (ByteStart)
228      V.lshrInPlace(ByteStart*8);
229    V = V.trunc(ByteSize*8);
230    return ConstantInt::get(CI->getContext(), V);
231  }
232
233  // In the input is a constant expr, we might be able to recursively simplify.
234  // If not, we definitely can't do anything.
235  ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
236  if (!CE) return nullptr;
237
238  switch (CE->getOpcode()) {
239  default: return nullptr;
240  case Instruction::Or: {
241    Constant *RHS = ExtractConstantBytes(CE->getOperand(1), ByteStart,ByteSize);
242    if (!RHS)
243      return nullptr;
244
245    // X | -1 -> -1.
246    if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS))
247      if (RHSC->isMinusOne())
248        return RHSC;
249
250    Constant *LHS = ExtractConstantBytes(CE->getOperand(0), ByteStart,ByteSize);
251    if (!LHS)
252      return nullptr;
253    return ConstantExpr::getOr(LHS, RHS);
254  }
255  case Instruction::And: {
256    Constant *RHS = ExtractConstantBytes(CE->getOperand(1), ByteStart,ByteSize);
257    if (!RHS)
258      return nullptr;
259
260    // X & 0 -> 0.
261    if (RHS->isNullValue())
262      return RHS;
263
264    Constant *LHS = ExtractConstantBytes(CE->getOperand(0), ByteStart,ByteSize);
265    if (!LHS)
266      return nullptr;
267    return ConstantExpr::getAnd(LHS, RHS);
268  }
269  case Instruction::LShr: {
270    ConstantInt *Amt = dyn_cast<ConstantInt>(CE->getOperand(1));
271    if (!Amt)
272      return nullptr;
273    APInt ShAmt = Amt->getValue();
274    // Cannot analyze non-byte shifts.
275    if ((ShAmt & 7) != 0)
276      return nullptr;
277    ShAmt.lshrInPlace(3);
278
279    // If the extract is known to be all zeros, return zero.
280    if (ShAmt.uge(CSize - ByteStart))
281      return Constant::getNullValue(
282          IntegerType::get(CE->getContext(), ByteSize * 8));
283    // If the extract is known to be fully in the input, extract it.
284    if (ShAmt.ule(CSize - (ByteStart + ByteSize)))
285      return ExtractConstantBytes(CE->getOperand(0),
286                                  ByteStart + ShAmt.getZExtValue(), ByteSize);
287
288    // TODO: Handle the 'partially zero' case.
289    return nullptr;
290  }
291
292  case Instruction::Shl: {
293    ConstantInt *Amt = dyn_cast<ConstantInt>(CE->getOperand(1));
294    if (!Amt)
295      return nullptr;
296    APInt ShAmt = Amt->getValue();
297    // Cannot analyze non-byte shifts.
298    if ((ShAmt & 7) != 0)
299      return nullptr;
300    ShAmt.lshrInPlace(3);
301
302    // If the extract is known to be all zeros, return zero.
303    if (ShAmt.uge(ByteStart + ByteSize))
304      return Constant::getNullValue(
305          IntegerType::get(CE->getContext(), ByteSize * 8));
306    // If the extract is known to be fully in the input, extract it.
307    if (ShAmt.ule(ByteStart))
308      return ExtractConstantBytes(CE->getOperand(0),
309                                  ByteStart - ShAmt.getZExtValue(), ByteSize);
310
311    // TODO: Handle the 'partially zero' case.
312    return nullptr;
313  }
314
315  case Instruction::ZExt: {
316    unsigned SrcBitSize =
317      cast<IntegerType>(CE->getOperand(0)->getType())->getBitWidth();
318
319    // If extracting something that is completely zero, return 0.
320    if (ByteStart*8 >= SrcBitSize)
321      return Constant::getNullValue(IntegerType::get(CE->getContext(),
322                                                     ByteSize*8));
323
324    // If exactly extracting the input, return it.
325    if (ByteStart == 0 && ByteSize*8 == SrcBitSize)
326      return CE->getOperand(0);
327
328    // If extracting something completely in the input, if the input is a
329    // multiple of 8 bits, recurse.
330    if ((SrcBitSize&7) == 0 && (ByteStart+ByteSize)*8 <= SrcBitSize)
331      return ExtractConstantBytes(CE->getOperand(0), ByteStart, ByteSize);
332
333    // Otherwise, if extracting a subset of the input, which is not multiple of
334    // 8 bits, do a shift and trunc to get the bits.
335    if ((ByteStart+ByteSize)*8 < SrcBitSize) {
336      assert((SrcBitSize&7) && "Shouldn't get byte sized case here");
337      Constant *Res = CE->getOperand(0);
338      if (ByteStart)
339        Res = ConstantExpr::getLShr(Res,
340                                 ConstantInt::get(Res->getType(), ByteStart*8));
341      return ConstantExpr::getTrunc(Res, IntegerType::get(C->getContext(),
342                                                          ByteSize*8));
343    }
344
345    // TODO: Handle the 'partially zero' case.
346    return nullptr;
347  }
348  }
349}
350
351/// Return a ConstantExpr with type DestTy for sizeof on Ty, with any known
352/// factors factored out. If Folded is false, return null if no factoring was
353/// possible, to avoid endlessly bouncing an unfoldable expression back into the
354/// top-level folder.
355static Constant *getFoldedSizeOf(Type *Ty, Type *DestTy, bool Folded) {
356  if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
357    Constant *N = ConstantInt::get(DestTy, ATy->getNumElements());
358    Constant *E = getFoldedSizeOf(ATy->getElementType(), DestTy, true);
359    return ConstantExpr::getNUWMul(E, N);
360  }
361
362  if (StructType *STy = dyn_cast<StructType>(Ty))
363    if (!STy->isPacked()) {
364      unsigned NumElems = STy->getNumElements();
365      // An empty struct has size zero.
366      if (NumElems == 0)
367        return ConstantExpr::getNullValue(DestTy);
368      // Check for a struct with all members having the same size.
369      Constant *MemberSize =
370        getFoldedSizeOf(STy->getElementType(0), DestTy, true);
371      bool AllSame = true;
372      for (unsigned i = 1; i != NumElems; ++i)
373        if (MemberSize !=
374            getFoldedSizeOf(STy->getElementType(i), DestTy, true)) {
375          AllSame = false;
376          break;
377        }
378      if (AllSame) {
379        Constant *N = ConstantInt::get(DestTy, NumElems);
380        return ConstantExpr::getNUWMul(MemberSize, N);
381      }
382    }
383
384  // Pointer size doesn't depend on the pointee type, so canonicalize them
385  // to an arbitrary pointee.
386  if (PointerType *PTy = dyn_cast<PointerType>(Ty))
387    if (!PTy->getElementType()->isIntegerTy(1))
388      return
389        getFoldedSizeOf(PointerType::get(IntegerType::get(PTy->getContext(), 1),
390                                         PTy->getAddressSpace()),
391                        DestTy, true);
392
393  // If there's no interesting folding happening, bail so that we don't create
394  // a constant that looks like it needs folding but really doesn't.
395  if (!Folded)
396    return nullptr;
397
398  // Base case: Get a regular sizeof expression.
399  Constant *C = ConstantExpr::getSizeOf(Ty);
400  C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
401                                                    DestTy, false),
402                            C, DestTy);
403  return C;
404}
405
406/// Return a ConstantExpr with type DestTy for alignof on Ty, with any known
407/// factors factored out. If Folded is false, return null if no factoring was
408/// possible, to avoid endlessly bouncing an unfoldable expression back into the
409/// top-level folder.
410static Constant *getFoldedAlignOf(Type *Ty, Type *DestTy, bool Folded) {
411  // The alignment of an array is equal to the alignment of the
412  // array element. Note that this is not always true for vectors.
413  if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
414    Constant *C = ConstantExpr::getAlignOf(ATy->getElementType());
415    C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
416                                                      DestTy,
417                                                      false),
418                              C, DestTy);
419    return C;
420  }
421
422  if (StructType *STy = dyn_cast<StructType>(Ty)) {
423    // Packed structs always have an alignment of 1.
424    if (STy->isPacked())
425      return ConstantInt::get(DestTy, 1);
426
427    // Otherwise, struct alignment is the maximum alignment of any member.
428    // Without target data, we can't compare much, but we can check to see
429    // if all the members have the same alignment.
430    unsigned NumElems = STy->getNumElements();
431    // An empty struct has minimal alignment.
432    if (NumElems == 0)
433      return ConstantInt::get(DestTy, 1);
434    // Check for a struct with all members having the same alignment.
435    Constant *MemberAlign =
436      getFoldedAlignOf(STy->getElementType(0), DestTy, true);
437    bool AllSame = true;
438    for (unsigned i = 1; i != NumElems; ++i)
439      if (MemberAlign != getFoldedAlignOf(STy->getElementType(i), DestTy, true)) {
440        AllSame = false;
441        break;
442      }
443    if (AllSame)
444      return MemberAlign;
445  }
446
447  // Pointer alignment doesn't depend on the pointee type, so canonicalize them
448  // to an arbitrary pointee.
449  if (PointerType *PTy = dyn_cast<PointerType>(Ty))
450    if (!PTy->getElementType()->isIntegerTy(1))
451      return
452        getFoldedAlignOf(PointerType::get(IntegerType::get(PTy->getContext(),
453                                                           1),
454                                          PTy->getAddressSpace()),
455                         DestTy, true);
456
457  // If there's no interesting folding happening, bail so that we don't create
458  // a constant that looks like it needs folding but really doesn't.
459  if (!Folded)
460    return nullptr;
461
462  // Base case: Get a regular alignof expression.
463  Constant *C = ConstantExpr::getAlignOf(Ty);
464  C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
465                                                    DestTy, false),
466                            C, DestTy);
467  return C;
468}
469
470/// Return a ConstantExpr with type DestTy for offsetof on Ty and FieldNo, with
471/// any known factors factored out. If Folded is false, return null if no
472/// factoring was possible, to avoid endlessly bouncing an unfoldable expression
473/// back into the top-level folder.
474static Constant *getFoldedOffsetOf(Type *Ty, Constant *FieldNo, Type *DestTy,
475                                   bool Folded) {
476  if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
477    Constant *N = ConstantExpr::getCast(CastInst::getCastOpcode(FieldNo, false,
478                                                                DestTy, false),
479                                        FieldNo, DestTy);
480    Constant *E = getFoldedSizeOf(ATy->getElementType(), DestTy, true);
481    return ConstantExpr::getNUWMul(E, N);
482  }
483
484  if (StructType *STy = dyn_cast<StructType>(Ty))
485    if (!STy->isPacked()) {
486      unsigned NumElems = STy->getNumElements();
487      // An empty struct has no members.
488      if (NumElems == 0)
489        return nullptr;
490      // Check for a struct with all members having the same size.
491      Constant *MemberSize =
492        getFoldedSizeOf(STy->getElementType(0), DestTy, true);
493      bool AllSame = true;
494      for (unsigned i = 1; i != NumElems; ++i)
495        if (MemberSize !=
496            getFoldedSizeOf(STy->getElementType(i), DestTy, true)) {
497          AllSame = false;
498          break;
499        }
500      if (AllSame) {
501        Constant *N = ConstantExpr::getCast(CastInst::getCastOpcode(FieldNo,
502                                                                    false,
503                                                                    DestTy,
504                                                                    false),
505                                            FieldNo, DestTy);
506        return ConstantExpr::getNUWMul(MemberSize, N);
507      }
508    }
509
510  // If there's no interesting folding happening, bail so that we don't create
511  // a constant that looks like it needs folding but really doesn't.
512  if (!Folded)
513    return nullptr;
514
515  // Base case: Get a regular offsetof expression.
516  Constant *C = ConstantExpr::getOffsetOf(Ty, FieldNo);
517  C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
518                                                    DestTy, false),
519                            C, DestTy);
520  return C;
521}
522
523Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
524                                            Type *DestTy) {
525  if (isa<UndefValue>(V)) {
526    // zext(undef) = 0, because the top bits will be zero.
527    // sext(undef) = 0, because the top bits will all be the same.
528    // [us]itofp(undef) = 0, because the result value is bounded.
529    if (opc == Instruction::ZExt || opc == Instruction::SExt ||
530        opc == Instruction::UIToFP || opc == Instruction::SIToFP)
531      return Constant::getNullValue(DestTy);
532    return UndefValue::get(DestTy);
533  }
534
535  if (V->isNullValue() && !DestTy->isX86_MMXTy() &&
536      opc != Instruction::AddrSpaceCast)
537    return Constant::getNullValue(DestTy);
538
539  // If the cast operand is a constant expression, there's a few things we can
540  // do to try to simplify it.
541  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
542    if (CE->isCast()) {
543      // Try hard to fold cast of cast because they are often eliminable.
544      if (unsigned newOpc = foldConstantCastPair(opc, CE, DestTy))
545        return ConstantExpr::getCast(newOpc, CE->getOperand(0), DestTy);
546    } else if (CE->getOpcode() == Instruction::GetElementPtr &&
547               // Do not fold addrspacecast (gep 0, .., 0). It might make the
548               // addrspacecast uncanonicalized.
549               opc != Instruction::AddrSpaceCast &&
550               // Do not fold bitcast (gep) with inrange index, as this loses
551               // information.
552               !cast<GEPOperator>(CE)->getInRangeIndex().hasValue() &&
553               // Do not fold if the gep type is a vector, as bitcasting
554               // operand 0 of a vector gep will result in a bitcast between
555               // different sizes.
556               !CE->getType()->isVectorTy()) {
557      // If all of the indexes in the GEP are null values, there is no pointer
558      // adjustment going on.  We might as well cast the source pointer.
559      bool isAllNull = true;
560      for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
561        if (!CE->getOperand(i)->isNullValue()) {
562          isAllNull = false;
563          break;
564        }
565      if (isAllNull)
566        // This is casting one pointer type to another, always BitCast
567        return ConstantExpr::getPointerCast(CE->getOperand(0), DestTy);
568    }
569  }
570
571  // If the cast operand is a constant vector, perform the cast by
572  // operating on each element. In the cast of bitcasts, the element
573  // count may be mismatched; don't attempt to handle that here.
574  if ((isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) &&
575      DestTy->isVectorTy() &&
576      cast<FixedVectorType>(DestTy)->getNumElements() ==
577          cast<FixedVectorType>(V->getType())->getNumElements()) {
578    VectorType *DestVecTy = cast<VectorType>(DestTy);
579    Type *DstEltTy = DestVecTy->getElementType();
580    // Fast path for splatted constants.
581    if (Constant *Splat = V->getSplatValue()) {
582      return ConstantVector::getSplat(
583          cast<VectorType>(DestTy)->getElementCount(),
584          ConstantExpr::getCast(opc, Splat, DstEltTy));
585    }
586    SmallVector<Constant *, 16> res;
587    Type *Ty = IntegerType::get(V->getContext(), 32);
588    for (unsigned i = 0,
589                  e = cast<FixedVectorType>(V->getType())->getNumElements();
590         i != e; ++i) {
591      Constant *C =
592        ConstantExpr::getExtractElement(V, ConstantInt::get(Ty, i));
593      res.push_back(ConstantExpr::getCast(opc, C, DstEltTy));
594    }
595    return ConstantVector::get(res);
596  }
597
598  // We actually have to do a cast now. Perform the cast according to the
599  // opcode specified.
600  switch (opc) {
601  default:
602    llvm_unreachable("Failed to cast constant expression");
603  case Instruction::FPTrunc:
604  case Instruction::FPExt:
605    if (ConstantFP *FPC = dyn_cast<ConstantFP>(V)) {
606      bool ignored;
607      APFloat Val = FPC->getValueAPF();
608      Val.convert(DestTy->isHalfTy() ? APFloat::IEEEhalf() :
609                  DestTy->isFloatTy() ? APFloat::IEEEsingle() :
610                  DestTy->isDoubleTy() ? APFloat::IEEEdouble() :
611                  DestTy->isX86_FP80Ty() ? APFloat::x87DoubleExtended() :
612                  DestTy->isFP128Ty() ? APFloat::IEEEquad() :
613                  DestTy->isPPC_FP128Ty() ? APFloat::PPCDoubleDouble() :
614                  APFloat::Bogus(),
615                  APFloat::rmNearestTiesToEven, &ignored);
616      return ConstantFP::get(V->getContext(), Val);
617    }
618    return nullptr; // Can't fold.
619  case Instruction::FPToUI:
620  case Instruction::FPToSI:
621    if (ConstantFP *FPC = dyn_cast<ConstantFP>(V)) {
622      const APFloat &V = FPC->getValueAPF();
623      bool ignored;
624      uint32_t DestBitWidth = cast<IntegerType>(DestTy)->getBitWidth();
625      APSInt IntVal(DestBitWidth, opc == Instruction::FPToUI);
626      if (APFloat::opInvalidOp ==
627          V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored)) {
628        // Undefined behavior invoked - the destination type can't represent
629        // the input constant.
630        return UndefValue::get(DestTy);
631      }
632      return ConstantInt::get(FPC->getContext(), IntVal);
633    }
634    return nullptr; // Can't fold.
635  case Instruction::IntToPtr:   //always treated as unsigned
636    if (V->isNullValue())       // Is it an integral null value?
637      return ConstantPointerNull::get(cast<PointerType>(DestTy));
638    return nullptr;                   // Other pointer types cannot be casted
639  case Instruction::PtrToInt:   // always treated as unsigned
640    // Is it a null pointer value?
641    if (V->isNullValue())
642      return ConstantInt::get(DestTy, 0);
643    // If this is a sizeof-like expression, pull out multiplications by
644    // known factors to expose them to subsequent folding. If it's an
645    // alignof-like expression, factor out known factors.
646    if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
647      if (CE->getOpcode() == Instruction::GetElementPtr &&
648          CE->getOperand(0)->isNullValue()) {
649        // FIXME: Looks like getFoldedSizeOf(), getFoldedOffsetOf() and
650        // getFoldedAlignOf() don't handle the case when DestTy is a vector of
651        // pointers yet. We end up in asserts in CastInst::getCastOpcode (see
652        // test/Analysis/ConstantFolding/cast-vector.ll). I've only seen this
653        // happen in one "real" C-code test case, so it does not seem to be an
654        // important optimization to handle vectors here. For now, simply bail
655        // out.
656        if (DestTy->isVectorTy())
657          return nullptr;
658        GEPOperator *GEPO = cast<GEPOperator>(CE);
659        Type *Ty = GEPO->getSourceElementType();
660        if (CE->getNumOperands() == 2) {
661          // Handle a sizeof-like expression.
662          Constant *Idx = CE->getOperand(1);
663          bool isOne = isa<ConstantInt>(Idx) && cast<ConstantInt>(Idx)->isOne();
664          if (Constant *C = getFoldedSizeOf(Ty, DestTy, !isOne)) {
665            Idx = ConstantExpr::getCast(CastInst::getCastOpcode(Idx, true,
666                                                                DestTy, false),
667                                        Idx, DestTy);
668            return ConstantExpr::getMul(C, Idx);
669          }
670        } else if (CE->getNumOperands() == 3 &&
671                   CE->getOperand(1)->isNullValue()) {
672          // Handle an alignof-like expression.
673          if (StructType *STy = dyn_cast<StructType>(Ty))
674            if (!STy->isPacked()) {
675              ConstantInt *CI = cast<ConstantInt>(CE->getOperand(2));
676              if (CI->isOne() &&
677                  STy->getNumElements() == 2 &&
678                  STy->getElementType(0)->isIntegerTy(1)) {
679                return getFoldedAlignOf(STy->getElementType(1), DestTy, false);
680              }
681            }
682          // Handle an offsetof-like expression.
683          if (Ty->isStructTy() || Ty->isArrayTy()) {
684            if (Constant *C = getFoldedOffsetOf(Ty, CE->getOperand(2),
685                                                DestTy, false))
686              return C;
687          }
688        }
689      }
690    // Other pointer types cannot be casted
691    return nullptr;
692  case Instruction::UIToFP:
693  case Instruction::SIToFP:
694    if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
695      const APInt &api = CI->getValue();
696      APFloat apf(DestTy->getFltSemantics(),
697                  APInt::getNullValue(DestTy->getPrimitiveSizeInBits()));
698      apf.convertFromAPInt(api, opc==Instruction::SIToFP,
699                           APFloat::rmNearestTiesToEven);
700      return ConstantFP::get(V->getContext(), apf);
701    }
702    return nullptr;
703  case Instruction::ZExt:
704    if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
705      uint32_t BitWidth = cast<IntegerType>(DestTy)->getBitWidth();
706      return ConstantInt::get(V->getContext(),
707                              CI->getValue().zext(BitWidth));
708    }
709    return nullptr;
710  case Instruction::SExt:
711    if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
712      uint32_t BitWidth = cast<IntegerType>(DestTy)->getBitWidth();
713      return ConstantInt::get(V->getContext(),
714                              CI->getValue().sext(BitWidth));
715    }
716    return nullptr;
717  case Instruction::Trunc: {
718    if (V->getType()->isVectorTy())
719      return nullptr;
720
721    uint32_t DestBitWidth = cast<IntegerType>(DestTy)->getBitWidth();
722    if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
723      return ConstantInt::get(V->getContext(),
724                              CI->getValue().trunc(DestBitWidth));
725    }
726
727    // The input must be a constantexpr.  See if we can simplify this based on
728    // the bytes we are demanding.  Only do this if the source and dest are an
729    // even multiple of a byte.
730    if ((DestBitWidth & 7) == 0 &&
731        (cast<IntegerType>(V->getType())->getBitWidth() & 7) == 0)
732      if (Constant *Res = ExtractConstantBytes(V, 0, DestBitWidth / 8))
733        return Res;
734
735    return nullptr;
736  }
737  case Instruction::BitCast:
738    return FoldBitCast(V, DestTy);
739  case Instruction::AddrSpaceCast:
740    return nullptr;
741  }
742}
743
744Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
745                                              Constant *V1, Constant *V2) {
746  // Check for i1 and vector true/false conditions.
747  if (Cond->isNullValue()) return V2;
748  if (Cond->isAllOnesValue()) return V1;
749
750  // If the condition is a vector constant, fold the result elementwise.
751  if (ConstantVector *CondV = dyn_cast<ConstantVector>(Cond)) {
752    auto *V1VTy = CondV->getType();
753    SmallVector<Constant*, 16> Result;
754    Type *Ty = IntegerType::get(CondV->getContext(), 32);
755    for (unsigned i = 0, e = V1VTy->getNumElements(); i != e; ++i) {
756      Constant *V;
757      Constant *V1Element = ConstantExpr::getExtractElement(V1,
758                                                    ConstantInt::get(Ty, i));
759      Constant *V2Element = ConstantExpr::getExtractElement(V2,
760                                                    ConstantInt::get(Ty, i));
761      auto *Cond = cast<Constant>(CondV->getOperand(i));
762      if (V1Element == V2Element) {
763        V = V1Element;
764      } else if (isa<UndefValue>(Cond)) {
765        V = isa<UndefValue>(V1Element) ? V1Element : V2Element;
766      } else {
767        if (!isa<ConstantInt>(Cond)) break;
768        V = Cond->isNullValue() ? V2Element : V1Element;
769      }
770      Result.push_back(V);
771    }
772
773    // If we were able to build the vector, return it.
774    if (Result.size() == V1VTy->getNumElements())
775      return ConstantVector::get(Result);
776  }
777
778  if (isa<UndefValue>(Cond)) {
779    if (isa<UndefValue>(V1)) return V1;
780    return V2;
781  }
782  if (isa<UndefValue>(V1)) return V2;
783  if (isa<UndefValue>(V2)) return V1;
784  if (V1 == V2) return V1;
785
786  if (ConstantExpr *TrueVal = dyn_cast<ConstantExpr>(V1)) {
787    if (TrueVal->getOpcode() == Instruction::Select)
788      if (TrueVal->getOperand(0) == Cond)
789        return ConstantExpr::getSelect(Cond, TrueVal->getOperand(1), V2);
790  }
791  if (ConstantExpr *FalseVal = dyn_cast<ConstantExpr>(V2)) {
792    if (FalseVal->getOpcode() == Instruction::Select)
793      if (FalseVal->getOperand(0) == Cond)
794        return ConstantExpr::getSelect(Cond, V1, FalseVal->getOperand(2));
795  }
796
797  return nullptr;
798}
799
800Constant *llvm::ConstantFoldExtractElementInstruction(Constant *Val,
801                                                      Constant *Idx) {
802  auto *ValVTy = cast<VectorType>(Val->getType());
803
804  // extractelt undef, C -> undef
805  // extractelt C, undef -> undef
806  if (isa<UndefValue>(Val) || isa<UndefValue>(Idx))
807    return UndefValue::get(ValVTy->getElementType());
808
809  auto *CIdx = dyn_cast<ConstantInt>(Idx);
810  if (!CIdx)
811    return nullptr;
812
813  if (auto *ValFVTy = dyn_cast<FixedVectorType>(Val->getType())) {
814    // ee({w,x,y,z}, wrong_value) -> undef
815    if (CIdx->uge(ValFVTy->getNumElements()))
816      return UndefValue::get(ValFVTy->getElementType());
817  }
818
819  // ee (gep (ptr, idx0, ...), idx) -> gep (ee (ptr, idx), ee (idx0, idx), ...)
820  if (auto *CE = dyn_cast<ConstantExpr>(Val)) {
821    if (CE->getOpcode() == Instruction::GetElementPtr) {
822      SmallVector<Constant *, 8> Ops;
823      Ops.reserve(CE->getNumOperands());
824      for (unsigned i = 0, e = CE->getNumOperands(); i != e; ++i) {
825        Constant *Op = CE->getOperand(i);
826        if (Op->getType()->isVectorTy()) {
827          Constant *ScalarOp = ConstantExpr::getExtractElement(Op, Idx);
828          if (!ScalarOp)
829            return nullptr;
830          Ops.push_back(ScalarOp);
831        } else
832          Ops.push_back(Op);
833      }
834      return CE->getWithOperands(Ops, ValVTy->getElementType(), false,
835                                 Ops[0]->getType()->getPointerElementType());
836    }
837  }
838
839  // CAZ of type ScalableVectorType and n < CAZ->getMinNumElements() =>
840  //   extractelt CAZ, n -> 0
841  if (auto *ValSVTy = dyn_cast<ScalableVectorType>(Val->getType())) {
842    if (!CIdx->uge(ValSVTy->getMinNumElements())) {
843      if (auto *CAZ = dyn_cast<ConstantAggregateZero>(Val))
844        return CAZ->getElementValue(CIdx->getZExtValue());
845    }
846    return nullptr;
847  }
848
849  return Val->getAggregateElement(CIdx);
850}
851
852Constant *llvm::ConstantFoldInsertElementInstruction(Constant *Val,
853                                                     Constant *Elt,
854                                                     Constant *Idx) {
855  if (isa<UndefValue>(Idx))
856    return UndefValue::get(Val->getType());
857
858  ConstantInt *CIdx = dyn_cast<ConstantInt>(Idx);
859  if (!CIdx) return nullptr;
860
861  // Do not iterate on scalable vector. The num of elements is unknown at
862  // compile-time.
863  if (isa<ScalableVectorType>(Val->getType()))
864    return nullptr;
865
866  auto *ValTy = cast<FixedVectorType>(Val->getType());
867
868  unsigned NumElts = ValTy->getNumElements();
869  if (CIdx->uge(NumElts))
870    return UndefValue::get(Val->getType());
871
872  SmallVector<Constant*, 16> Result;
873  Result.reserve(NumElts);
874  auto *Ty = Type::getInt32Ty(Val->getContext());
875  uint64_t IdxVal = CIdx->getZExtValue();
876  for (unsigned i = 0; i != NumElts; ++i) {
877    if (i == IdxVal) {
878      Result.push_back(Elt);
879      continue;
880    }
881
882    Constant *C = ConstantExpr::getExtractElement(Val, ConstantInt::get(Ty, i));
883    Result.push_back(C);
884  }
885
886  return ConstantVector::get(Result);
887}
888
889Constant *llvm::ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2,
890                                                     ArrayRef<int> Mask) {
891  auto *V1VTy = cast<VectorType>(V1->getType());
892  unsigned MaskNumElts = Mask.size();
893  ElementCount MaskEltCount = {MaskNumElts, isa<ScalableVectorType>(V1VTy)};
894  Type *EltTy = V1VTy->getElementType();
895
896  // Undefined shuffle mask -> undefined value.
897  if (all_of(Mask, [](int Elt) { return Elt == UndefMaskElem; })) {
898    return UndefValue::get(FixedVectorType::get(EltTy, MaskNumElts));
899  }
900
901  // If the mask is all zeros this is a splat, no need to go through all
902  // elements.
903  if (all_of(Mask, [](int Elt) { return Elt == 0; }) &&
904      !MaskEltCount.Scalable) {
905    Type *Ty = IntegerType::get(V1->getContext(), 32);
906    Constant *Elt =
907        ConstantExpr::getExtractElement(V1, ConstantInt::get(Ty, 0));
908    return ConstantVector::getSplat(MaskEltCount, Elt);
909  }
910  // Do not iterate on scalable vector. The num of elements is unknown at
911  // compile-time.
912  if (isa<ScalableVectorType>(V1VTy))
913    return nullptr;
914
915  unsigned SrcNumElts = V1VTy->getElementCount().Min;
916
917  // Loop over the shuffle mask, evaluating each element.
918  SmallVector<Constant*, 32> Result;
919  for (unsigned i = 0; i != MaskNumElts; ++i) {
920    int Elt = Mask[i];
921    if (Elt == -1) {
922      Result.push_back(UndefValue::get(EltTy));
923      continue;
924    }
925    Constant *InElt;
926    if (unsigned(Elt) >= SrcNumElts*2)
927      InElt = UndefValue::get(EltTy);
928    else if (unsigned(Elt) >= SrcNumElts) {
929      Type *Ty = IntegerType::get(V2->getContext(), 32);
930      InElt =
931        ConstantExpr::getExtractElement(V2,
932                                        ConstantInt::get(Ty, Elt - SrcNumElts));
933    } else {
934      Type *Ty = IntegerType::get(V1->getContext(), 32);
935      InElt = ConstantExpr::getExtractElement(V1, ConstantInt::get(Ty, Elt));
936    }
937    Result.push_back(InElt);
938  }
939
940  return ConstantVector::get(Result);
941}
942
943Constant *llvm::ConstantFoldExtractValueInstruction(Constant *Agg,
944                                                    ArrayRef<unsigned> Idxs) {
945  // Base case: no indices, so return the entire value.
946  if (Idxs.empty())
947    return Agg;
948
949  if (Constant *C = Agg->getAggregateElement(Idxs[0]))
950    return ConstantFoldExtractValueInstruction(C, Idxs.slice(1));
951
952  return nullptr;
953}
954
955Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
956                                                   Constant *Val,
957                                                   ArrayRef<unsigned> Idxs) {
958  // Base case: no indices, so replace the entire value.
959  if (Idxs.empty())
960    return Val;
961
962  unsigned NumElts;
963  if (StructType *ST = dyn_cast<StructType>(Agg->getType()))
964    NumElts = ST->getNumElements();
965  else
966    NumElts = cast<ArrayType>(Agg->getType())->getNumElements();
967
968  SmallVector<Constant*, 32> Result;
969  for (unsigned i = 0; i != NumElts; ++i) {
970    Constant *C = Agg->getAggregateElement(i);
971    if (!C) return nullptr;
972
973    if (Idxs[0] == i)
974      C = ConstantFoldInsertValueInstruction(C, Val, Idxs.slice(1));
975
976    Result.push_back(C);
977  }
978
979  if (StructType *ST = dyn_cast<StructType>(Agg->getType()))
980    return ConstantStruct::get(ST, Result);
981  return ConstantArray::get(cast<ArrayType>(Agg->getType()), Result);
982}
983
984Constant *llvm::ConstantFoldUnaryInstruction(unsigned Opcode, Constant *C) {
985  assert(Instruction::isUnaryOp(Opcode) && "Non-unary instruction detected");
986
987  // Handle scalar UndefValue and scalable vector UndefValue. Fixed-length
988  // vectors are always evaluated per element.
989  bool IsScalableVector = isa<ScalableVectorType>(C->getType());
990  bool HasScalarUndefOrScalableVectorUndef =
991      (!C->getType()->isVectorTy() || IsScalableVector) && isa<UndefValue>(C);
992
993  if (HasScalarUndefOrScalableVectorUndef) {
994    switch (static_cast<Instruction::UnaryOps>(Opcode)) {
995    case Instruction::FNeg:
996      return C; // -undef -> undef
997    case Instruction::UnaryOpsEnd:
998      llvm_unreachable("Invalid UnaryOp");
999    }
1000  }
1001
1002  // Constant should not be UndefValue, unless these are vector constants.
1003  assert(!HasScalarUndefOrScalableVectorUndef && "Unexpected UndefValue");
1004  // We only have FP UnaryOps right now.
1005  assert(!isa<ConstantInt>(C) && "Unexpected Integer UnaryOp");
1006
1007  if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
1008    const APFloat &CV = CFP->getValueAPF();
1009    switch (Opcode) {
1010    default:
1011      break;
1012    case Instruction::FNeg:
1013      return ConstantFP::get(C->getContext(), neg(CV));
1014    }
1015  } else if (auto *VTy = dyn_cast<FixedVectorType>(C->getType())) {
1016
1017    Type *Ty = IntegerType::get(VTy->getContext(), 32);
1018    // Fast path for splatted constants.
1019    if (Constant *Splat = C->getSplatValue()) {
1020      Constant *Elt = ConstantExpr::get(Opcode, Splat);
1021      return ConstantVector::getSplat(VTy->getElementCount(), Elt);
1022    }
1023
1024    // Fold each element and create a vector constant from those constants.
1025    SmallVector<Constant *, 16> Result;
1026    for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
1027      Constant *ExtractIdx = ConstantInt::get(Ty, i);
1028      Constant *Elt = ConstantExpr::getExtractElement(C, ExtractIdx);
1029
1030      Result.push_back(ConstantExpr::get(Opcode, Elt));
1031    }
1032
1033    return ConstantVector::get(Result);
1034  }
1035
1036  // We don't know how to fold this.
1037  return nullptr;
1038}
1039
1040Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1,
1041                                              Constant *C2) {
1042  assert(Instruction::isBinaryOp(Opcode) && "Non-binary instruction detected");
1043
1044  // Simplify BinOps with their identity values first. They are no-ops and we
1045  // can always return the other value, including undef or poison values.
1046  // FIXME: remove unnecessary duplicated identity patterns below.
1047  // FIXME: Use AllowRHSConstant with getBinOpIdentity to handle additional ops,
1048  //        like X << 0 = X.
1049  Constant *Identity = ConstantExpr::getBinOpIdentity(Opcode, C1->getType());
1050  if (Identity) {
1051    if (C1 == Identity)
1052      return C2;
1053    if (C2 == Identity)
1054      return C1;
1055  }
1056
1057  // Handle scalar UndefValue and scalable vector UndefValue. Fixed-length
1058  // vectors are always evaluated per element.
1059  bool IsScalableVector = isa<ScalableVectorType>(C1->getType());
1060  bool HasScalarUndefOrScalableVectorUndef =
1061      (!C1->getType()->isVectorTy() || IsScalableVector) &&
1062      (isa<UndefValue>(C1) || isa<UndefValue>(C2));
1063  if (HasScalarUndefOrScalableVectorUndef) {
1064    switch (static_cast<Instruction::BinaryOps>(Opcode)) {
1065    case Instruction::Xor:
1066      if (isa<UndefValue>(C1) && isa<UndefValue>(C2))
1067        // Handle undef ^ undef -> 0 special case. This is a common
1068        // idiom (misuse).
1069        return Constant::getNullValue(C1->getType());
1070      LLVM_FALLTHROUGH;
1071    case Instruction::Add:
1072    case Instruction::Sub:
1073      return UndefValue::get(C1->getType());
1074    case Instruction::And:
1075      if (isa<UndefValue>(C1) && isa<UndefValue>(C2)) // undef & undef -> undef
1076        return C1;
1077      return Constant::getNullValue(C1->getType());   // undef & X -> 0
1078    case Instruction::Mul: {
1079      // undef * undef -> undef
1080      if (isa<UndefValue>(C1) && isa<UndefValue>(C2))
1081        return C1;
1082      const APInt *CV;
1083      // X * undef -> undef   if X is odd
1084      if (match(C1, m_APInt(CV)) || match(C2, m_APInt(CV)))
1085        if ((*CV)[0])
1086          return UndefValue::get(C1->getType());
1087
1088      // X * undef -> 0       otherwise
1089      return Constant::getNullValue(C1->getType());
1090    }
1091    case Instruction::SDiv:
1092    case Instruction::UDiv:
1093      // X / undef -> undef
1094      if (isa<UndefValue>(C2))
1095        return C2;
1096      // undef / 0 -> undef
1097      // undef / 1 -> undef
1098      if (match(C2, m_Zero()) || match(C2, m_One()))
1099        return C1;
1100      // undef / X -> 0       otherwise
1101      return Constant::getNullValue(C1->getType());
1102    case Instruction::URem:
1103    case Instruction::SRem:
1104      // X % undef -> undef
1105      if (match(C2, m_Undef()))
1106        return C2;
1107      // undef % 0 -> undef
1108      if (match(C2, m_Zero()))
1109        return C1;
1110      // undef % X -> 0       otherwise
1111      return Constant::getNullValue(C1->getType());
1112    case Instruction::Or:                          // X | undef -> -1
1113      if (isa<UndefValue>(C1) && isa<UndefValue>(C2)) // undef | undef -> undef
1114        return C1;
1115      return Constant::getAllOnesValue(C1->getType()); // undef | X -> ~0
1116    case Instruction::LShr:
1117      // X >>l undef -> undef
1118      if (isa<UndefValue>(C2))
1119        return C2;
1120      // undef >>l 0 -> undef
1121      if (match(C2, m_Zero()))
1122        return C1;
1123      // undef >>l X -> 0
1124      return Constant::getNullValue(C1->getType());
1125    case Instruction::AShr:
1126      // X >>a undef -> undef
1127      if (isa<UndefValue>(C2))
1128        return C2;
1129      // undef >>a 0 -> undef
1130      if (match(C2, m_Zero()))
1131        return C1;
1132      // TODO: undef >>a X -> undef if the shift is exact
1133      // undef >>a X -> 0
1134      return Constant::getNullValue(C1->getType());
1135    case Instruction::Shl:
1136      // X << undef -> undef
1137      if (isa<UndefValue>(C2))
1138        return C2;
1139      // undef << 0 -> undef
1140      if (match(C2, m_Zero()))
1141        return C1;
1142      // undef << X -> 0
1143      return Constant::getNullValue(C1->getType());
1144    case Instruction::FSub:
1145      // -0.0 - undef --> undef (consistent with "fneg undef")
1146      if (match(C1, m_NegZeroFP()) && isa<UndefValue>(C2))
1147        return C2;
1148      LLVM_FALLTHROUGH;
1149    case Instruction::FAdd:
1150    case Instruction::FMul:
1151    case Instruction::FDiv:
1152    case Instruction::FRem:
1153      // [any flop] undef, undef -> undef
1154      if (isa<UndefValue>(C1) && isa<UndefValue>(C2))
1155        return C1;
1156      // [any flop] C, undef -> NaN
1157      // [any flop] undef, C -> NaN
1158      // We could potentially specialize NaN/Inf constants vs. 'normal'
1159      // constants (possibly differently depending on opcode and operand). This
1160      // would allow returning undef sometimes. But it is always safe to fold to
1161      // NaN because we can choose the undef operand as NaN, and any FP opcode
1162      // with a NaN operand will propagate NaN.
1163      return ConstantFP::getNaN(C1->getType());
1164    case Instruction::BinaryOpsEnd:
1165      llvm_unreachable("Invalid BinaryOp");
1166    }
1167  }
1168
1169  // Neither constant should be UndefValue, unless these are vector constants.
1170  assert((!HasScalarUndefOrScalableVectorUndef) && "Unexpected UndefValue");
1171
1172  // Handle simplifications when the RHS is a constant int.
1173  if (ConstantInt *CI2 = dyn_cast<ConstantInt>(C2)) {
1174    switch (Opcode) {
1175    case Instruction::Add:
1176      if (CI2->isZero()) return C1;                             // X + 0 == X
1177      break;
1178    case Instruction::Sub:
1179      if (CI2->isZero()) return C1;                             // X - 0 == X
1180      break;
1181    case Instruction::Mul:
1182      if (CI2->isZero()) return C2;                             // X * 0 == 0
1183      if (CI2->isOne())
1184        return C1;                                              // X * 1 == X
1185      break;
1186    case Instruction::UDiv:
1187    case Instruction::SDiv:
1188      if (CI2->isOne())
1189        return C1;                                            // X / 1 == X
1190      if (CI2->isZero())
1191        return UndefValue::get(CI2->getType());               // X / 0 == undef
1192      break;
1193    case Instruction::URem:
1194    case Instruction::SRem:
1195      if (CI2->isOne())
1196        return Constant::getNullValue(CI2->getType());        // X % 1 == 0
1197      if (CI2->isZero())
1198        return UndefValue::get(CI2->getType());               // X % 0 == undef
1199      break;
1200    case Instruction::And:
1201      if (CI2->isZero()) return C2;                           // X & 0 == 0
1202      if (CI2->isMinusOne())
1203        return C1;                                            // X & -1 == X
1204
1205      if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
1206        // (zext i32 to i64) & 4294967295 -> (zext i32 to i64)
1207        if (CE1->getOpcode() == Instruction::ZExt) {
1208          unsigned DstWidth = CI2->getType()->getBitWidth();
1209          unsigned SrcWidth =
1210            CE1->getOperand(0)->getType()->getPrimitiveSizeInBits();
1211          APInt PossiblySetBits(APInt::getLowBitsSet(DstWidth, SrcWidth));
1212          if ((PossiblySetBits & CI2->getValue()) == PossiblySetBits)
1213            return C1;
1214        }
1215
1216        // If and'ing the address of a global with a constant, fold it.
1217        if (CE1->getOpcode() == Instruction::PtrToInt &&
1218            isa<GlobalValue>(CE1->getOperand(0))) {
1219          GlobalValue *GV = cast<GlobalValue>(CE1->getOperand(0));
1220
1221          MaybeAlign GVAlign;
1222
1223          if (Module *TheModule = GV->getParent()) {
1224            const DataLayout &DL = TheModule->getDataLayout();
1225            GVAlign = GV->getPointerAlignment(DL);
1226
1227            // If the function alignment is not specified then assume that it
1228            // is 4.
1229            // This is dangerous; on x86, the alignment of the pointer
1230            // corresponds to the alignment of the function, but might be less
1231            // than 4 if it isn't explicitly specified.
1232            // However, a fix for this behaviour was reverted because it
1233            // increased code size (see https://reviews.llvm.org/D55115)
1234            // FIXME: This code should be deleted once existing targets have
1235            // appropriate defaults
1236            if (isa<Function>(GV) && !DL.getFunctionPtrAlign())
1237              GVAlign = Align(4);
1238          } else if (isa<Function>(GV)) {
1239            // Without a datalayout we have to assume the worst case: that the
1240            // function pointer isn't aligned at all.
1241            GVAlign = llvm::None;
1242          } else if (isa<GlobalVariable>(GV)) {
1243            GVAlign = cast<GlobalVariable>(GV)->getAlign();
1244          }
1245
1246          if (GVAlign && *GVAlign > 1) {
1247            unsigned DstWidth = CI2->getType()->getBitWidth();
1248            unsigned SrcWidth = std::min(DstWidth, Log2(*GVAlign));
1249            APInt BitsNotSet(APInt::getLowBitsSet(DstWidth, SrcWidth));
1250
1251            // If checking bits we know are clear, return zero.
1252            if ((CI2->getValue() & BitsNotSet) == CI2->getValue())
1253              return Constant::getNullValue(CI2->getType());
1254          }
1255        }
1256      }
1257      break;
1258    case Instruction::Or:
1259      if (CI2->isZero()) return C1;        // X | 0 == X
1260      if (CI2->isMinusOne())
1261        return C2;                         // X | -1 == -1
1262      break;
1263    case Instruction::Xor:
1264      if (CI2->isZero()) return C1;        // X ^ 0 == X
1265
1266      if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
1267        switch (CE1->getOpcode()) {
1268        default: break;
1269        case Instruction::ICmp:
1270        case Instruction::FCmp:
1271          // cmp pred ^ true -> cmp !pred
1272          assert(CI2->isOne());
1273          CmpInst::Predicate pred = (CmpInst::Predicate)CE1->getPredicate();
1274          pred = CmpInst::getInversePredicate(pred);
1275          return ConstantExpr::getCompare(pred, CE1->getOperand(0),
1276                                          CE1->getOperand(1));
1277        }
1278      }
1279      break;
1280    case Instruction::AShr:
1281      // ashr (zext C to Ty), C2 -> lshr (zext C, CSA), C2
1282      if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1))
1283        if (CE1->getOpcode() == Instruction::ZExt)  // Top bits known zero.
1284          return ConstantExpr::getLShr(C1, C2);
1285      break;
1286    }
1287  } else if (isa<ConstantInt>(C1)) {
1288    // If C1 is a ConstantInt and C2 is not, swap the operands.
1289    if (Instruction::isCommutative(Opcode))
1290      return ConstantExpr::get(Opcode, C2, C1);
1291  }
1292
1293  if (ConstantInt *CI1 = dyn_cast<ConstantInt>(C1)) {
1294    if (ConstantInt *CI2 = dyn_cast<ConstantInt>(C2)) {
1295      const APInt &C1V = CI1->getValue();
1296      const APInt &C2V = CI2->getValue();
1297      switch (Opcode) {
1298      default:
1299        break;
1300      case Instruction::Add:
1301        return ConstantInt::get(CI1->getContext(), C1V + C2V);
1302      case Instruction::Sub:
1303        return ConstantInt::get(CI1->getContext(), C1V - C2V);
1304      case Instruction::Mul:
1305        return ConstantInt::get(CI1->getContext(), C1V * C2V);
1306      case Instruction::UDiv:
1307        assert(!CI2->isZero() && "Div by zero handled above");
1308        return ConstantInt::get(CI1->getContext(), C1V.udiv(C2V));
1309      case Instruction::SDiv:
1310        assert(!CI2->isZero() && "Div by zero handled above");
1311        if (C2V.isAllOnesValue() && C1V.isMinSignedValue())
1312          return UndefValue::get(CI1->getType());   // MIN_INT / -1 -> undef
1313        return ConstantInt::get(CI1->getContext(), C1V.sdiv(C2V));
1314      case Instruction::URem:
1315        assert(!CI2->isZero() && "Div by zero handled above");
1316        return ConstantInt::get(CI1->getContext(), C1V.urem(C2V));
1317      case Instruction::SRem:
1318        assert(!CI2->isZero() && "Div by zero handled above");
1319        if (C2V.isAllOnesValue() && C1V.isMinSignedValue())
1320          return UndefValue::get(CI1->getType());   // MIN_INT % -1 -> undef
1321        return ConstantInt::get(CI1->getContext(), C1V.srem(C2V));
1322      case Instruction::And:
1323        return ConstantInt::get(CI1->getContext(), C1V & C2V);
1324      case Instruction::Or:
1325        return ConstantInt::get(CI1->getContext(), C1V | C2V);
1326      case Instruction::Xor:
1327        return ConstantInt::get(CI1->getContext(), C1V ^ C2V);
1328      case Instruction::Shl:
1329        if (C2V.ult(C1V.getBitWidth()))
1330          return ConstantInt::get(CI1->getContext(), C1V.shl(C2V));
1331        return UndefValue::get(C1->getType()); // too big shift is undef
1332      case Instruction::LShr:
1333        if (C2V.ult(C1V.getBitWidth()))
1334          return ConstantInt::get(CI1->getContext(), C1V.lshr(C2V));
1335        return UndefValue::get(C1->getType()); // too big shift is undef
1336      case Instruction::AShr:
1337        if (C2V.ult(C1V.getBitWidth()))
1338          return ConstantInt::get(CI1->getContext(), C1V.ashr(C2V));
1339        return UndefValue::get(C1->getType()); // too big shift is undef
1340      }
1341    }
1342
1343    switch (Opcode) {
1344    case Instruction::SDiv:
1345    case Instruction::UDiv:
1346    case Instruction::URem:
1347    case Instruction::SRem:
1348    case Instruction::LShr:
1349    case Instruction::AShr:
1350    case Instruction::Shl:
1351      if (CI1->isZero()) return C1;
1352      break;
1353    default:
1354      break;
1355    }
1356  } else if (ConstantFP *CFP1 = dyn_cast<ConstantFP>(C1)) {
1357    if (ConstantFP *CFP2 = dyn_cast<ConstantFP>(C2)) {
1358      const APFloat &C1V = CFP1->getValueAPF();
1359      const APFloat &C2V = CFP2->getValueAPF();
1360      APFloat C3V = C1V;  // copy for modification
1361      switch (Opcode) {
1362      default:
1363        break;
1364      case Instruction::FAdd:
1365        (void)C3V.add(C2V, APFloat::rmNearestTiesToEven);
1366        return ConstantFP::get(C1->getContext(), C3V);
1367      case Instruction::FSub:
1368        (void)C3V.subtract(C2V, APFloat::rmNearestTiesToEven);
1369        return ConstantFP::get(C1->getContext(), C3V);
1370      case Instruction::FMul:
1371        (void)C3V.multiply(C2V, APFloat::rmNearestTiesToEven);
1372        return ConstantFP::get(C1->getContext(), C3V);
1373      case Instruction::FDiv:
1374        (void)C3V.divide(C2V, APFloat::rmNearestTiesToEven);
1375        return ConstantFP::get(C1->getContext(), C3V);
1376      case Instruction::FRem:
1377        (void)C3V.mod(C2V);
1378        return ConstantFP::get(C1->getContext(), C3V);
1379      }
1380    }
1381  } else if (IsScalableVector) {
1382    // Do not iterate on scalable vector. The number of elements is unknown at
1383    // compile-time.
1384    // FIXME: this branch can potentially be removed
1385    return nullptr;
1386  } else if (auto *VTy = dyn_cast<FixedVectorType>(C1->getType())) {
1387    // Fast path for splatted constants.
1388    if (Constant *C2Splat = C2->getSplatValue()) {
1389      if (Instruction::isIntDivRem(Opcode) && C2Splat->isNullValue())
1390        return UndefValue::get(VTy);
1391      if (Constant *C1Splat = C1->getSplatValue()) {
1392        return ConstantVector::getSplat(
1393            VTy->getElementCount(),
1394            ConstantExpr::get(Opcode, C1Splat, C2Splat));
1395      }
1396    }
1397
1398    // Fold each element and create a vector constant from those constants.
1399    SmallVector<Constant*, 16> Result;
1400    Type *Ty = IntegerType::get(VTy->getContext(), 32);
1401    for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
1402      Constant *ExtractIdx = ConstantInt::get(Ty, i);
1403      Constant *LHS = ConstantExpr::getExtractElement(C1, ExtractIdx);
1404      Constant *RHS = ConstantExpr::getExtractElement(C2, ExtractIdx);
1405
1406      // If any element of a divisor vector is zero, the whole op is undef.
1407      if (Instruction::isIntDivRem(Opcode) && RHS->isNullValue())
1408        return UndefValue::get(VTy);
1409
1410      Result.push_back(ConstantExpr::get(Opcode, LHS, RHS));
1411    }
1412
1413    return ConstantVector::get(Result);
1414  }
1415
1416  if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
1417    // There are many possible foldings we could do here.  We should probably
1418    // at least fold add of a pointer with an integer into the appropriate
1419    // getelementptr.  This will improve alias analysis a bit.
1420
1421    // Given ((a + b) + c), if (b + c) folds to something interesting, return
1422    // (a + (b + c)).
1423    if (Instruction::isAssociative(Opcode) && CE1->getOpcode() == Opcode) {
1424      Constant *T = ConstantExpr::get(Opcode, CE1->getOperand(1), C2);
1425      if (!isa<ConstantExpr>(T) || cast<ConstantExpr>(T)->getOpcode() != Opcode)
1426        return ConstantExpr::get(Opcode, CE1->getOperand(0), T);
1427    }
1428  } else if (isa<ConstantExpr>(C2)) {
1429    // If C2 is a constant expr and C1 isn't, flop them around and fold the
1430    // other way if possible.
1431    if (Instruction::isCommutative(Opcode))
1432      return ConstantFoldBinaryInstruction(Opcode, C2, C1);
1433  }
1434
1435  // i1 can be simplified in many cases.
1436  if (C1->getType()->isIntegerTy(1)) {
1437    switch (Opcode) {
1438    case Instruction::Add:
1439    case Instruction::Sub:
1440      return ConstantExpr::getXor(C1, C2);
1441    case Instruction::Mul:
1442      return ConstantExpr::getAnd(C1, C2);
1443    case Instruction::Shl:
1444    case Instruction::LShr:
1445    case Instruction::AShr:
1446      // We can assume that C2 == 0.  If it were one the result would be
1447      // undefined because the shift value is as large as the bitwidth.
1448      return C1;
1449    case Instruction::SDiv:
1450    case Instruction::UDiv:
1451      // We can assume that C2 == 1.  If it were zero the result would be
1452      // undefined through division by zero.
1453      return C1;
1454    case Instruction::URem:
1455    case Instruction::SRem:
1456      // We can assume that C2 == 1.  If it were zero the result would be
1457      // undefined through division by zero.
1458      return ConstantInt::getFalse(C1->getContext());
1459    default:
1460      break;
1461    }
1462  }
1463
1464  // We don't know how to fold this.
1465  return nullptr;
1466}
1467
1468/// This type is zero-sized if it's an array or structure of zero-sized types.
1469/// The only leaf zero-sized type is an empty structure.
1470static bool isMaybeZeroSizedType(Type *Ty) {
1471  if (StructType *STy = dyn_cast<StructType>(Ty)) {
1472    if (STy->isOpaque()) return true;  // Can't say.
1473
1474    // If all of elements have zero size, this does too.
1475    for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
1476      if (!isMaybeZeroSizedType(STy->getElementType(i))) return false;
1477    return true;
1478
1479  } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1480    return isMaybeZeroSizedType(ATy->getElementType());
1481  }
1482  return false;
1483}
1484
1485/// Compare the two constants as though they were getelementptr indices.
1486/// This allows coercion of the types to be the same thing.
1487///
1488/// If the two constants are the "same" (after coercion), return 0.  If the
1489/// first is less than the second, return -1, if the second is less than the
1490/// first, return 1.  If the constants are not integral, return -2.
1491///
1492static int IdxCompare(Constant *C1, Constant *C2, Type *ElTy) {
1493  if (C1 == C2) return 0;
1494
1495  // Ok, we found a different index.  If they are not ConstantInt, we can't do
1496  // anything with them.
1497  if (!isa<ConstantInt>(C1) || !isa<ConstantInt>(C2))
1498    return -2; // don't know!
1499
1500  // We cannot compare the indices if they don't fit in an int64_t.
1501  if (cast<ConstantInt>(C1)->getValue().getActiveBits() > 64 ||
1502      cast<ConstantInt>(C2)->getValue().getActiveBits() > 64)
1503    return -2; // don't know!
1504
1505  // Ok, we have two differing integer indices.  Sign extend them to be the same
1506  // type.
1507  int64_t C1Val = cast<ConstantInt>(C1)->getSExtValue();
1508  int64_t C2Val = cast<ConstantInt>(C2)->getSExtValue();
1509
1510  if (C1Val == C2Val) return 0;  // They are equal
1511
1512  // If the type being indexed over is really just a zero sized type, there is
1513  // no pointer difference being made here.
1514  if (isMaybeZeroSizedType(ElTy))
1515    return -2; // dunno.
1516
1517  // If they are really different, now that they are the same type, then we
1518  // found a difference!
1519  if (C1Val < C2Val)
1520    return -1;
1521  else
1522    return 1;
1523}
1524
1525/// This function determines if there is anything we can decide about the two
1526/// constants provided. This doesn't need to handle simple things like
1527/// ConstantFP comparisons, but should instead handle ConstantExprs.
1528/// If we can determine that the two constants have a particular relation to
1529/// each other, we should return the corresponding FCmpInst predicate,
1530/// otherwise return FCmpInst::BAD_FCMP_PREDICATE. This is used below in
1531/// ConstantFoldCompareInstruction.
1532///
1533/// To simplify this code we canonicalize the relation so that the first
1534/// operand is always the most "complex" of the two.  We consider ConstantFP
1535/// to be the simplest, and ConstantExprs to be the most complex.
1536static FCmpInst::Predicate evaluateFCmpRelation(Constant *V1, Constant *V2) {
1537  assert(V1->getType() == V2->getType() &&
1538         "Cannot compare values of different types!");
1539
1540  // We do not know if a constant expression will evaluate to a number or NaN.
1541  // Therefore, we can only say that the relation is unordered or equal.
1542  if (V1 == V2) return FCmpInst::FCMP_UEQ;
1543
1544  if (!isa<ConstantExpr>(V1)) {
1545    if (!isa<ConstantExpr>(V2)) {
1546      // Simple case, use the standard constant folder.
1547      ConstantInt *R = nullptr;
1548      R = dyn_cast<ConstantInt>(
1549                      ConstantExpr::getFCmp(FCmpInst::FCMP_OEQ, V1, V2));
1550      if (R && !R->isZero())
1551        return FCmpInst::FCMP_OEQ;
1552      R = dyn_cast<ConstantInt>(
1553                      ConstantExpr::getFCmp(FCmpInst::FCMP_OLT, V1, V2));
1554      if (R && !R->isZero())
1555        return FCmpInst::FCMP_OLT;
1556      R = dyn_cast<ConstantInt>(
1557                      ConstantExpr::getFCmp(FCmpInst::FCMP_OGT, V1, V2));
1558      if (R && !R->isZero())
1559        return FCmpInst::FCMP_OGT;
1560
1561      // Nothing more we can do
1562      return FCmpInst::BAD_FCMP_PREDICATE;
1563    }
1564
1565    // If the first operand is simple and second is ConstantExpr, swap operands.
1566    FCmpInst::Predicate SwappedRelation = evaluateFCmpRelation(V2, V1);
1567    if (SwappedRelation != FCmpInst::BAD_FCMP_PREDICATE)
1568      return FCmpInst::getSwappedPredicate(SwappedRelation);
1569  } else {
1570    // Ok, the LHS is known to be a constantexpr.  The RHS can be any of a
1571    // constantexpr or a simple constant.
1572    ConstantExpr *CE1 = cast<ConstantExpr>(V1);
1573    switch (CE1->getOpcode()) {
1574    case Instruction::FPTrunc:
1575    case Instruction::FPExt:
1576    case Instruction::UIToFP:
1577    case Instruction::SIToFP:
1578      // We might be able to do something with these but we don't right now.
1579      break;
1580    default:
1581      break;
1582    }
1583  }
1584  // There are MANY other foldings that we could perform here.  They will
1585  // probably be added on demand, as they seem needed.
1586  return FCmpInst::BAD_FCMP_PREDICATE;
1587}
1588
1589static ICmpInst::Predicate areGlobalsPotentiallyEqual(const GlobalValue *GV1,
1590                                                      const GlobalValue *GV2) {
1591  auto isGlobalUnsafeForEquality = [](const GlobalValue *GV) {
1592    if (GV->isInterposable() || GV->hasGlobalUnnamedAddr())
1593      return true;
1594    if (const auto *GVar = dyn_cast<GlobalVariable>(GV)) {
1595      Type *Ty = GVar->getValueType();
1596      // A global with opaque type might end up being zero sized.
1597      if (!Ty->isSized())
1598        return true;
1599      // A global with an empty type might lie at the address of any other
1600      // global.
1601      if (Ty->isEmptyTy())
1602        return true;
1603    }
1604    return false;
1605  };
1606  // Don't try to decide equality of aliases.
1607  if (!isa<GlobalAlias>(GV1) && !isa<GlobalAlias>(GV2))
1608    if (!isGlobalUnsafeForEquality(GV1) && !isGlobalUnsafeForEquality(GV2))
1609      return ICmpInst::ICMP_NE;
1610  return ICmpInst::BAD_ICMP_PREDICATE;
1611}
1612
1613/// This function determines if there is anything we can decide about the two
1614/// constants provided. This doesn't need to handle simple things like integer
1615/// comparisons, but should instead handle ConstantExprs and GlobalValues.
1616/// If we can determine that the two constants have a particular relation to
1617/// each other, we should return the corresponding ICmp predicate, otherwise
1618/// return ICmpInst::BAD_ICMP_PREDICATE.
1619///
1620/// To simplify this code we canonicalize the relation so that the first
1621/// operand is always the most "complex" of the two.  We consider simple
1622/// constants (like ConstantInt) to be the simplest, followed by
1623/// GlobalValues, followed by ConstantExpr's (the most complex).
1624///
1625static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2,
1626                                                bool isSigned) {
1627  assert(V1->getType() == V2->getType() &&
1628         "Cannot compare different types of values!");
1629  if (V1 == V2) return ICmpInst::ICMP_EQ;
1630
1631  if (!isa<ConstantExpr>(V1) && !isa<GlobalValue>(V1) &&
1632      !isa<BlockAddress>(V1)) {
1633    if (!isa<GlobalValue>(V2) && !isa<ConstantExpr>(V2) &&
1634        !isa<BlockAddress>(V2)) {
1635      // We distilled this down to a simple case, use the standard constant
1636      // folder.
1637      ConstantInt *R = nullptr;
1638      ICmpInst::Predicate pred = ICmpInst::ICMP_EQ;
1639      R = dyn_cast<ConstantInt>(ConstantExpr::getICmp(pred, V1, V2));
1640      if (R && !R->isZero())
1641        return pred;
1642      pred = isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1643      R = dyn_cast<ConstantInt>(ConstantExpr::getICmp(pred, V1, V2));
1644      if (R && !R->isZero())
1645        return pred;
1646      pred = isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1647      R = dyn_cast<ConstantInt>(ConstantExpr::getICmp(pred, V1, V2));
1648      if (R && !R->isZero())
1649        return pred;
1650
1651      // If we couldn't figure it out, bail.
1652      return ICmpInst::BAD_ICMP_PREDICATE;
1653    }
1654
1655    // If the first operand is simple, swap operands.
1656    ICmpInst::Predicate SwappedRelation =
1657      evaluateICmpRelation(V2, V1, isSigned);
1658    if (SwappedRelation != ICmpInst::BAD_ICMP_PREDICATE)
1659      return ICmpInst::getSwappedPredicate(SwappedRelation);
1660
1661  } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(V1)) {
1662    if (isa<ConstantExpr>(V2)) {  // Swap as necessary.
1663      ICmpInst::Predicate SwappedRelation =
1664        evaluateICmpRelation(V2, V1, isSigned);
1665      if (SwappedRelation != ICmpInst::BAD_ICMP_PREDICATE)
1666        return ICmpInst::getSwappedPredicate(SwappedRelation);
1667      return ICmpInst::BAD_ICMP_PREDICATE;
1668    }
1669
1670    // Now we know that the RHS is a GlobalValue, BlockAddress or simple
1671    // constant (which, since the types must match, means that it's a
1672    // ConstantPointerNull).
1673    if (const GlobalValue *GV2 = dyn_cast<GlobalValue>(V2)) {
1674      return areGlobalsPotentiallyEqual(GV, GV2);
1675    } else if (isa<BlockAddress>(V2)) {
1676      return ICmpInst::ICMP_NE; // Globals never equal labels.
1677    } else {
1678      assert(isa<ConstantPointerNull>(V2) && "Canonicalization guarantee!");
1679      // GlobalVals can never be null unless they have external weak linkage.
1680      // We don't try to evaluate aliases here.
1681      // NOTE: We should not be doing this constant folding if null pointer
1682      // is considered valid for the function. But currently there is no way to
1683      // query it from the Constant type.
1684      if (!GV->hasExternalWeakLinkage() && !isa<GlobalAlias>(GV) &&
1685          !NullPointerIsDefined(nullptr /* F */,
1686                                GV->getType()->getAddressSpace()))
1687        return ICmpInst::ICMP_NE;
1688    }
1689  } else if (const BlockAddress *BA = dyn_cast<BlockAddress>(V1)) {
1690    if (isa<ConstantExpr>(V2)) {  // Swap as necessary.
1691      ICmpInst::Predicate SwappedRelation =
1692        evaluateICmpRelation(V2, V1, isSigned);
1693      if (SwappedRelation != ICmpInst::BAD_ICMP_PREDICATE)
1694        return ICmpInst::getSwappedPredicate(SwappedRelation);
1695      return ICmpInst::BAD_ICMP_PREDICATE;
1696    }
1697
1698    // Now we know that the RHS is a GlobalValue, BlockAddress or simple
1699    // constant (which, since the types must match, means that it is a
1700    // ConstantPointerNull).
1701    if (const BlockAddress *BA2 = dyn_cast<BlockAddress>(V2)) {
1702      // Block address in another function can't equal this one, but block
1703      // addresses in the current function might be the same if blocks are
1704      // empty.
1705      if (BA2->getFunction() != BA->getFunction())
1706        return ICmpInst::ICMP_NE;
1707    } else {
1708      // Block addresses aren't null, don't equal the address of globals.
1709      assert((isa<ConstantPointerNull>(V2) || isa<GlobalValue>(V2)) &&
1710             "Canonicalization guarantee!");
1711      return ICmpInst::ICMP_NE;
1712    }
1713  } else {
1714    // Ok, the LHS is known to be a constantexpr.  The RHS can be any of a
1715    // constantexpr, a global, block address, or a simple constant.
1716    ConstantExpr *CE1 = cast<ConstantExpr>(V1);
1717    Constant *CE1Op0 = CE1->getOperand(0);
1718
1719    switch (CE1->getOpcode()) {
1720    case Instruction::Trunc:
1721    case Instruction::FPTrunc:
1722    case Instruction::FPExt:
1723    case Instruction::FPToUI:
1724    case Instruction::FPToSI:
1725      break; // We can't evaluate floating point casts or truncations.
1726
1727    case Instruction::UIToFP:
1728    case Instruction::SIToFP:
1729    case Instruction::BitCast:
1730    case Instruction::ZExt:
1731    case Instruction::SExt:
1732      // We can't evaluate floating point casts or truncations.
1733      if (CE1Op0->getType()->isFPOrFPVectorTy())
1734        break;
1735
1736      // If the cast is not actually changing bits, and the second operand is a
1737      // null pointer, do the comparison with the pre-casted value.
1738      if (V2->isNullValue() && CE1->getType()->isIntOrPtrTy()) {
1739        if (CE1->getOpcode() == Instruction::ZExt) isSigned = false;
1740        if (CE1->getOpcode() == Instruction::SExt) isSigned = true;
1741        return evaluateICmpRelation(CE1Op0,
1742                                    Constant::getNullValue(CE1Op0->getType()),
1743                                    isSigned);
1744      }
1745      break;
1746
1747    case Instruction::GetElementPtr: {
1748      GEPOperator *CE1GEP = cast<GEPOperator>(CE1);
1749      // Ok, since this is a getelementptr, we know that the constant has a
1750      // pointer type.  Check the various cases.
1751      if (isa<ConstantPointerNull>(V2)) {
1752        // If we are comparing a GEP to a null pointer, check to see if the base
1753        // of the GEP equals the null pointer.
1754        if (const GlobalValue *GV = dyn_cast<GlobalValue>(CE1Op0)) {
1755          if (GV->hasExternalWeakLinkage())
1756            // Weak linkage GVals could be zero or not. We're comparing that
1757            // to null pointer so its greater-or-equal
1758            return isSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
1759          else
1760            // If its not weak linkage, the GVal must have a non-zero address
1761            // so the result is greater-than
1762            return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1763        } else if (isa<ConstantPointerNull>(CE1Op0)) {
1764          // If we are indexing from a null pointer, check to see if we have any
1765          // non-zero indices.
1766          for (unsigned i = 1, e = CE1->getNumOperands(); i != e; ++i)
1767            if (!CE1->getOperand(i)->isNullValue())
1768              // Offsetting from null, must not be equal.
1769              return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1770          // Only zero indexes from null, must still be zero.
1771          return ICmpInst::ICMP_EQ;
1772        }
1773        // Otherwise, we can't really say if the first operand is null or not.
1774      } else if (const GlobalValue *GV2 = dyn_cast<GlobalValue>(V2)) {
1775        if (isa<ConstantPointerNull>(CE1Op0)) {
1776          if (GV2->hasExternalWeakLinkage())
1777            // Weak linkage GVals could be zero or not. We're comparing it to
1778            // a null pointer, so its less-or-equal
1779            return isSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
1780          else
1781            // If its not weak linkage, the GVal must have a non-zero address
1782            // so the result is less-than
1783            return isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1784        } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(CE1Op0)) {
1785          if (GV == GV2) {
1786            // If this is a getelementptr of the same global, then it must be
1787            // different.  Because the types must match, the getelementptr could
1788            // only have at most one index, and because we fold getelementptr's
1789            // with a single zero index, it must be nonzero.
1790            assert(CE1->getNumOperands() == 2 &&
1791                   !CE1->getOperand(1)->isNullValue() &&
1792                   "Surprising getelementptr!");
1793            return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1794          } else {
1795            if (CE1GEP->hasAllZeroIndices())
1796              return areGlobalsPotentiallyEqual(GV, GV2);
1797            return ICmpInst::BAD_ICMP_PREDICATE;
1798          }
1799        }
1800      } else {
1801        ConstantExpr *CE2 = cast<ConstantExpr>(V2);
1802        Constant *CE2Op0 = CE2->getOperand(0);
1803
1804        // There are MANY other foldings that we could perform here.  They will
1805        // probably be added on demand, as they seem needed.
1806        switch (CE2->getOpcode()) {
1807        default: break;
1808        case Instruction::GetElementPtr:
1809          // By far the most common case to handle is when the base pointers are
1810          // obviously to the same global.
1811          if (isa<GlobalValue>(CE1Op0) && isa<GlobalValue>(CE2Op0)) {
1812            // Don't know relative ordering, but check for inequality.
1813            if (CE1Op0 != CE2Op0) {
1814              GEPOperator *CE2GEP = cast<GEPOperator>(CE2);
1815              if (CE1GEP->hasAllZeroIndices() && CE2GEP->hasAllZeroIndices())
1816                return areGlobalsPotentiallyEqual(cast<GlobalValue>(CE1Op0),
1817                                                  cast<GlobalValue>(CE2Op0));
1818              return ICmpInst::BAD_ICMP_PREDICATE;
1819            }
1820            // Ok, we know that both getelementptr instructions are based on the
1821            // same global.  From this, we can precisely determine the relative
1822            // ordering of the resultant pointers.
1823            unsigned i = 1;
1824
1825            // The logic below assumes that the result of the comparison
1826            // can be determined by finding the first index that differs.
1827            // This doesn't work if there is over-indexing in any
1828            // subsequent indices, so check for that case first.
1829            if (!CE1->isGEPWithNoNotionalOverIndexing() ||
1830                !CE2->isGEPWithNoNotionalOverIndexing())
1831               return ICmpInst::BAD_ICMP_PREDICATE; // Might be equal.
1832
1833            // Compare all of the operands the GEP's have in common.
1834            gep_type_iterator GTI = gep_type_begin(CE1);
1835            for (;i != CE1->getNumOperands() && i != CE2->getNumOperands();
1836                 ++i, ++GTI)
1837              switch (IdxCompare(CE1->getOperand(i),
1838                                 CE2->getOperand(i), GTI.getIndexedType())) {
1839              case -1: return isSigned ? ICmpInst::ICMP_SLT:ICmpInst::ICMP_ULT;
1840              case 1:  return isSigned ? ICmpInst::ICMP_SGT:ICmpInst::ICMP_UGT;
1841              case -2: return ICmpInst::BAD_ICMP_PREDICATE;
1842              }
1843
1844            // Ok, we ran out of things they have in common.  If any leftovers
1845            // are non-zero then we have a difference, otherwise we are equal.
1846            for (; i < CE1->getNumOperands(); ++i)
1847              if (!CE1->getOperand(i)->isNullValue()) {
1848                if (isa<ConstantInt>(CE1->getOperand(i)))
1849                  return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1850                else
1851                  return ICmpInst::BAD_ICMP_PREDICATE; // Might be equal.
1852              }
1853
1854            for (; i < CE2->getNumOperands(); ++i)
1855              if (!CE2->getOperand(i)->isNullValue()) {
1856                if (isa<ConstantInt>(CE2->getOperand(i)))
1857                  return isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1858                else
1859                  return ICmpInst::BAD_ICMP_PREDICATE; // Might be equal.
1860              }
1861            return ICmpInst::ICMP_EQ;
1862          }
1863        }
1864      }
1865      break;
1866    }
1867    default:
1868      break;
1869    }
1870  }
1871
1872  return ICmpInst::BAD_ICMP_PREDICATE;
1873}
1874
1875Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
1876                                               Constant *C1, Constant *C2) {
1877  Type *ResultTy;
1878  if (VectorType *VT = dyn_cast<VectorType>(C1->getType()))
1879    ResultTy = VectorType::get(Type::getInt1Ty(C1->getContext()),
1880                               VT->getElementCount());
1881  else
1882    ResultTy = Type::getInt1Ty(C1->getContext());
1883
1884  // Fold FCMP_FALSE/FCMP_TRUE unconditionally.
1885  if (pred == FCmpInst::FCMP_FALSE)
1886    return Constant::getNullValue(ResultTy);
1887
1888  if (pred == FCmpInst::FCMP_TRUE)
1889    return Constant::getAllOnesValue(ResultTy);
1890
1891  // Handle some degenerate cases first
1892  if (isa<UndefValue>(C1) || isa<UndefValue>(C2)) {
1893    CmpInst::Predicate Predicate = CmpInst::Predicate(pred);
1894    bool isIntegerPredicate = ICmpInst::isIntPredicate(Predicate);
1895    // For EQ and NE, we can always pick a value for the undef to make the
1896    // predicate pass or fail, so we can return undef.
1897    // Also, if both operands are undef, we can return undef for int comparison.
1898    if (ICmpInst::isEquality(Predicate) || (isIntegerPredicate && C1 == C2))
1899      return UndefValue::get(ResultTy);
1900
1901    // Otherwise, for integer compare, pick the same value as the non-undef
1902    // operand, and fold it to true or false.
1903    if (isIntegerPredicate)
1904      return ConstantInt::get(ResultTy, CmpInst::isTrueWhenEqual(Predicate));
1905
1906    // Choosing NaN for the undef will always make unordered comparison succeed
1907    // and ordered comparison fails.
1908    return ConstantInt::get(ResultTy, CmpInst::isUnordered(Predicate));
1909  }
1910
1911  // icmp eq/ne(null,GV) -> false/true
1912  if (C1->isNullValue()) {
1913    if (const GlobalValue *GV = dyn_cast<GlobalValue>(C2))
1914      // Don't try to evaluate aliases.  External weak GV can be null.
1915      if (!isa<GlobalAlias>(GV) && !GV->hasExternalWeakLinkage() &&
1916          !NullPointerIsDefined(nullptr /* F */,
1917                                GV->getType()->getAddressSpace())) {
1918        if (pred == ICmpInst::ICMP_EQ)
1919          return ConstantInt::getFalse(C1->getContext());
1920        else if (pred == ICmpInst::ICMP_NE)
1921          return ConstantInt::getTrue(C1->getContext());
1922      }
1923  // icmp eq/ne(GV,null) -> false/true
1924  } else if (C2->isNullValue()) {
1925    if (const GlobalValue *GV = dyn_cast<GlobalValue>(C1))
1926      // Don't try to evaluate aliases.  External weak GV can be null.
1927      if (!isa<GlobalAlias>(GV) && !GV->hasExternalWeakLinkage() &&
1928          !NullPointerIsDefined(nullptr /* F */,
1929                                GV->getType()->getAddressSpace())) {
1930        if (pred == ICmpInst::ICMP_EQ)
1931          return ConstantInt::getFalse(C1->getContext());
1932        else if (pred == ICmpInst::ICMP_NE)
1933          return ConstantInt::getTrue(C1->getContext());
1934      }
1935  }
1936
1937  // If the comparison is a comparison between two i1's, simplify it.
1938  if (C1->getType()->isIntegerTy(1)) {
1939    switch(pred) {
1940    case ICmpInst::ICMP_EQ:
1941      if (isa<ConstantInt>(C2))
1942        return ConstantExpr::getXor(C1, ConstantExpr::getNot(C2));
1943      return ConstantExpr::getXor(ConstantExpr::getNot(C1), C2);
1944    case ICmpInst::ICMP_NE:
1945      return ConstantExpr::getXor(C1, C2);
1946    default:
1947      break;
1948    }
1949  }
1950
1951  if (isa<ConstantInt>(C1) && isa<ConstantInt>(C2)) {
1952    const APInt &V1 = cast<ConstantInt>(C1)->getValue();
1953    const APInt &V2 = cast<ConstantInt>(C2)->getValue();
1954    switch (pred) {
1955    default: llvm_unreachable("Invalid ICmp Predicate");
1956    case ICmpInst::ICMP_EQ:  return ConstantInt::get(ResultTy, V1 == V2);
1957    case ICmpInst::ICMP_NE:  return ConstantInt::get(ResultTy, V1 != V2);
1958    case ICmpInst::ICMP_SLT: return ConstantInt::get(ResultTy, V1.slt(V2));
1959    case ICmpInst::ICMP_SGT: return ConstantInt::get(ResultTy, V1.sgt(V2));
1960    case ICmpInst::ICMP_SLE: return ConstantInt::get(ResultTy, V1.sle(V2));
1961    case ICmpInst::ICMP_SGE: return ConstantInt::get(ResultTy, V1.sge(V2));
1962    case ICmpInst::ICMP_ULT: return ConstantInt::get(ResultTy, V1.ult(V2));
1963    case ICmpInst::ICMP_UGT: return ConstantInt::get(ResultTy, V1.ugt(V2));
1964    case ICmpInst::ICMP_ULE: return ConstantInt::get(ResultTy, V1.ule(V2));
1965    case ICmpInst::ICMP_UGE: return ConstantInt::get(ResultTy, V1.uge(V2));
1966    }
1967  } else if (isa<ConstantFP>(C1) && isa<ConstantFP>(C2)) {
1968    const APFloat &C1V = cast<ConstantFP>(C1)->getValueAPF();
1969    const APFloat &C2V = cast<ConstantFP>(C2)->getValueAPF();
1970    APFloat::cmpResult R = C1V.compare(C2V);
1971    switch (pred) {
1972    default: llvm_unreachable("Invalid FCmp Predicate");
1973    case FCmpInst::FCMP_FALSE: return Constant::getNullValue(ResultTy);
1974    case FCmpInst::FCMP_TRUE:  return Constant::getAllOnesValue(ResultTy);
1975    case FCmpInst::FCMP_UNO:
1976      return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered);
1977    case FCmpInst::FCMP_ORD:
1978      return ConstantInt::get(ResultTy, R!=APFloat::cmpUnordered);
1979    case FCmpInst::FCMP_UEQ:
1980      return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered ||
1981                                        R==APFloat::cmpEqual);
1982    case FCmpInst::FCMP_OEQ:
1983      return ConstantInt::get(ResultTy, R==APFloat::cmpEqual);
1984    case FCmpInst::FCMP_UNE:
1985      return ConstantInt::get(ResultTy, R!=APFloat::cmpEqual);
1986    case FCmpInst::FCMP_ONE:
1987      return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan ||
1988                                        R==APFloat::cmpGreaterThan);
1989    case FCmpInst::FCMP_ULT:
1990      return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered ||
1991                                        R==APFloat::cmpLessThan);
1992    case FCmpInst::FCMP_OLT:
1993      return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan);
1994    case FCmpInst::FCMP_UGT:
1995      return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered ||
1996                                        R==APFloat::cmpGreaterThan);
1997    case FCmpInst::FCMP_OGT:
1998      return ConstantInt::get(ResultTy, R==APFloat::cmpGreaterThan);
1999    case FCmpInst::FCMP_ULE:
2000      return ConstantInt::get(ResultTy, R!=APFloat::cmpGreaterThan);
2001    case FCmpInst::FCMP_OLE:
2002      return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan ||
2003                                        R==APFloat::cmpEqual);
2004    case FCmpInst::FCMP_UGE:
2005      return ConstantInt::get(ResultTy, R!=APFloat::cmpLessThan);
2006    case FCmpInst::FCMP_OGE:
2007      return ConstantInt::get(ResultTy, R==APFloat::cmpGreaterThan ||
2008                                        R==APFloat::cmpEqual);
2009    }
2010  } else if (auto *C1VTy = dyn_cast<VectorType>(C1->getType())) {
2011
2012    // Do not iterate on scalable vector. The number of elements is unknown at
2013    // compile-time.
2014    if (isa<ScalableVectorType>(C1VTy))
2015      return nullptr;
2016
2017    // Fast path for splatted constants.
2018    if (Constant *C1Splat = C1->getSplatValue())
2019      if (Constant *C2Splat = C2->getSplatValue())
2020        return ConstantVector::getSplat(
2021            C1VTy->getElementCount(),
2022            ConstantExpr::getCompare(pred, C1Splat, C2Splat));
2023
2024    // If we can constant fold the comparison of each element, constant fold
2025    // the whole vector comparison.
2026    SmallVector<Constant*, 4> ResElts;
2027    Type *Ty = IntegerType::get(C1->getContext(), 32);
2028    // Compare the elements, producing an i1 result or constant expr.
2029    for (unsigned i = 0, e = C1VTy->getElementCount().Min; i != e; ++i) {
2030      Constant *C1E =
2031        ConstantExpr::getExtractElement(C1, ConstantInt::get(Ty, i));
2032      Constant *C2E =
2033        ConstantExpr::getExtractElement(C2, ConstantInt::get(Ty, i));
2034
2035      ResElts.push_back(ConstantExpr::getCompare(pred, C1E, C2E));
2036    }
2037
2038    return ConstantVector::get(ResElts);
2039  }
2040
2041  if (C1->getType()->isFloatingPointTy() &&
2042      // Only call evaluateFCmpRelation if we have a constant expr to avoid
2043      // infinite recursive loop
2044      (isa<ConstantExpr>(C1) || isa<ConstantExpr>(C2))) {
2045    int Result = -1;  // -1 = unknown, 0 = known false, 1 = known true.
2046    switch (evaluateFCmpRelation(C1, C2)) {
2047    default: llvm_unreachable("Unknown relation!");
2048    case FCmpInst::FCMP_UNO:
2049    case FCmpInst::FCMP_ORD:
2050    case FCmpInst::FCMP_UNE:
2051    case FCmpInst::FCMP_ULT:
2052    case FCmpInst::FCMP_UGT:
2053    case FCmpInst::FCMP_ULE:
2054    case FCmpInst::FCMP_UGE:
2055    case FCmpInst::FCMP_TRUE:
2056    case FCmpInst::FCMP_FALSE:
2057    case FCmpInst::BAD_FCMP_PREDICATE:
2058      break; // Couldn't determine anything about these constants.
2059    case FCmpInst::FCMP_OEQ: // We know that C1 == C2
2060      Result = (pred == FCmpInst::FCMP_UEQ || pred == FCmpInst::FCMP_OEQ ||
2061                pred == FCmpInst::FCMP_ULE || pred == FCmpInst::FCMP_OLE ||
2062                pred == FCmpInst::FCMP_UGE || pred == FCmpInst::FCMP_OGE);
2063      break;
2064    case FCmpInst::FCMP_OLT: // We know that C1 < C2
2065      Result = (pred == FCmpInst::FCMP_UNE || pred == FCmpInst::FCMP_ONE ||
2066                pred == FCmpInst::FCMP_ULT || pred == FCmpInst::FCMP_OLT ||
2067                pred == FCmpInst::FCMP_ULE || pred == FCmpInst::FCMP_OLE);
2068      break;
2069    case FCmpInst::FCMP_OGT: // We know that C1 > C2
2070      Result = (pred == FCmpInst::FCMP_UNE || pred == FCmpInst::FCMP_ONE ||
2071                pred == FCmpInst::FCMP_UGT || pred == FCmpInst::FCMP_OGT ||
2072                pred == FCmpInst::FCMP_UGE || pred == FCmpInst::FCMP_OGE);
2073      break;
2074    case FCmpInst::FCMP_OLE: // We know that C1 <= C2
2075      // We can only partially decide this relation.
2076      if (pred == FCmpInst::FCMP_UGT || pred == FCmpInst::FCMP_OGT)
2077        Result = 0;
2078      else if (pred == FCmpInst::FCMP_ULT || pred == FCmpInst::FCMP_OLT)
2079        Result = 1;
2080      break;
2081    case FCmpInst::FCMP_OGE: // We known that C1 >= C2
2082      // We can only partially decide this relation.
2083      if (pred == FCmpInst::FCMP_ULT || pred == FCmpInst::FCMP_OLT)
2084        Result = 0;
2085      else if (pred == FCmpInst::FCMP_UGT || pred == FCmpInst::FCMP_OGT)
2086        Result = 1;
2087      break;
2088    case FCmpInst::FCMP_ONE: // We know that C1 != C2
2089      // We can only partially decide this relation.
2090      if (pred == FCmpInst::FCMP_OEQ || pred == FCmpInst::FCMP_UEQ)
2091        Result = 0;
2092      else if (pred == FCmpInst::FCMP_ONE || pred == FCmpInst::FCMP_UNE)
2093        Result = 1;
2094      break;
2095    case FCmpInst::FCMP_UEQ: // We know that C1 == C2 || isUnordered(C1, C2).
2096      // We can only partially decide this relation.
2097      if (pred == FCmpInst::FCMP_ONE)
2098        Result = 0;
2099      else if (pred == FCmpInst::FCMP_UEQ)
2100        Result = 1;
2101      break;
2102    }
2103
2104    // If we evaluated the result, return it now.
2105    if (Result != -1)
2106      return ConstantInt::get(ResultTy, Result);
2107
2108  } else {
2109    // Evaluate the relation between the two constants, per the predicate.
2110    int Result = -1;  // -1 = unknown, 0 = known false, 1 = known true.
2111    switch (evaluateICmpRelation(C1, C2,
2112                                 CmpInst::isSigned((CmpInst::Predicate)pred))) {
2113    default: llvm_unreachable("Unknown relational!");
2114    case ICmpInst::BAD_ICMP_PREDICATE:
2115      break;  // Couldn't determine anything about these constants.
2116    case ICmpInst::ICMP_EQ:   // We know the constants are equal!
2117      // If we know the constants are equal, we can decide the result of this
2118      // computation precisely.
2119      Result = ICmpInst::isTrueWhenEqual((ICmpInst::Predicate)pred);
2120      break;
2121    case ICmpInst::ICMP_ULT:
2122      switch (pred) {
2123      case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_ULE:
2124        Result = 1; break;
2125      case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_UGE:
2126        Result = 0; break;
2127      }
2128      break;
2129    case ICmpInst::ICMP_SLT:
2130      switch (pred) {
2131      case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_SLE:
2132        Result = 1; break;
2133      case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_SGE:
2134        Result = 0; break;
2135      }
2136      break;
2137    case ICmpInst::ICMP_UGT:
2138      switch (pred) {
2139      case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_UGE:
2140        Result = 1; break;
2141      case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_ULE:
2142        Result = 0; break;
2143      }
2144      break;
2145    case ICmpInst::ICMP_SGT:
2146      switch (pred) {
2147      case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_SGE:
2148        Result = 1; break;
2149      case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_SLE:
2150        Result = 0; break;
2151      }
2152      break;
2153    case ICmpInst::ICMP_ULE:
2154      if (pred == ICmpInst::ICMP_UGT) Result = 0;
2155      if (pred == ICmpInst::ICMP_ULT || pred == ICmpInst::ICMP_ULE) Result = 1;
2156      break;
2157    case ICmpInst::ICMP_SLE:
2158      if (pred == ICmpInst::ICMP_SGT) Result = 0;
2159      if (pred == ICmpInst::ICMP_SLT || pred == ICmpInst::ICMP_SLE) Result = 1;
2160      break;
2161    case ICmpInst::ICMP_UGE:
2162      if (pred == ICmpInst::ICMP_ULT) Result = 0;
2163      if (pred == ICmpInst::ICMP_UGT || pred == ICmpInst::ICMP_UGE) Result = 1;
2164      break;
2165    case ICmpInst::ICMP_SGE:
2166      if (pred == ICmpInst::ICMP_SLT) Result = 0;
2167      if (pred == ICmpInst::ICMP_SGT || pred == ICmpInst::ICMP_SGE) Result = 1;
2168      break;
2169    case ICmpInst::ICMP_NE:
2170      if (pred == ICmpInst::ICMP_EQ) Result = 0;
2171      if (pred == ICmpInst::ICMP_NE) Result = 1;
2172      break;
2173    }
2174
2175    // If we evaluated the result, return it now.
2176    if (Result != -1)
2177      return ConstantInt::get(ResultTy, Result);
2178
2179    // If the right hand side is a bitcast, try using its inverse to simplify
2180    // it by moving it to the left hand side.  We can't do this if it would turn
2181    // a vector compare into a scalar compare or visa versa, or if it would turn
2182    // the operands into FP values.
2183    if (ConstantExpr *CE2 = dyn_cast<ConstantExpr>(C2)) {
2184      Constant *CE2Op0 = CE2->getOperand(0);
2185      if (CE2->getOpcode() == Instruction::BitCast &&
2186          CE2->getType()->isVectorTy() == CE2Op0->getType()->isVectorTy() &&
2187          !CE2Op0->getType()->isFPOrFPVectorTy()) {
2188        Constant *Inverse = ConstantExpr::getBitCast(C1, CE2Op0->getType());
2189        return ConstantExpr::getICmp(pred, Inverse, CE2Op0);
2190      }
2191    }
2192
2193    // If the left hand side is an extension, try eliminating it.
2194    if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
2195      if ((CE1->getOpcode() == Instruction::SExt &&
2196           ICmpInst::isSigned((ICmpInst::Predicate)pred)) ||
2197          (CE1->getOpcode() == Instruction::ZExt &&
2198           !ICmpInst::isSigned((ICmpInst::Predicate)pred))){
2199        Constant *CE1Op0 = CE1->getOperand(0);
2200        Constant *CE1Inverse = ConstantExpr::getTrunc(CE1, CE1Op0->getType());
2201        if (CE1Inverse == CE1Op0) {
2202          // Check whether we can safely truncate the right hand side.
2203          Constant *C2Inverse = ConstantExpr::getTrunc(C2, CE1Op0->getType());
2204          if (ConstantExpr::getCast(CE1->getOpcode(), C2Inverse,
2205                                    C2->getType()) == C2)
2206            return ConstantExpr::getICmp(pred, CE1Inverse, C2Inverse);
2207        }
2208      }
2209    }
2210
2211    if ((!isa<ConstantExpr>(C1) && isa<ConstantExpr>(C2)) ||
2212        (C1->isNullValue() && !C2->isNullValue())) {
2213      // If C2 is a constant expr and C1 isn't, flip them around and fold the
2214      // other way if possible.
2215      // Also, if C1 is null and C2 isn't, flip them around.
2216      pred = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)pred);
2217      return ConstantExpr::getICmp(pred, C2, C1);
2218    }
2219  }
2220  return nullptr;
2221}
2222
2223/// Test whether the given sequence of *normalized* indices is "inbounds".
2224template<typename IndexTy>
2225static bool isInBoundsIndices(ArrayRef<IndexTy> Idxs) {
2226  // No indices means nothing that could be out of bounds.
2227  if (Idxs.empty()) return true;
2228
2229  // If the first index is zero, it's in bounds.
2230  if (cast<Constant>(Idxs[0])->isNullValue()) return true;
2231
2232  // If the first index is one and all the rest are zero, it's in bounds,
2233  // by the one-past-the-end rule.
2234  if (auto *CI = dyn_cast<ConstantInt>(Idxs[0])) {
2235    if (!CI->isOne())
2236      return false;
2237  } else {
2238    auto *CV = cast<ConstantDataVector>(Idxs[0]);
2239    CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue());
2240    if (!CI || !CI->isOne())
2241      return false;
2242  }
2243
2244  for (unsigned i = 1, e = Idxs.size(); i != e; ++i)
2245    if (!cast<Constant>(Idxs[i])->isNullValue())
2246      return false;
2247  return true;
2248}
2249
2250/// Test whether a given ConstantInt is in-range for a SequentialType.
2251static bool isIndexInRangeOfArrayType(uint64_t NumElements,
2252                                      const ConstantInt *CI) {
2253  // We cannot bounds check the index if it doesn't fit in an int64_t.
2254  if (CI->getValue().getMinSignedBits() > 64)
2255    return false;
2256
2257  // A negative index or an index past the end of our sequential type is
2258  // considered out-of-range.
2259  int64_t IndexVal = CI->getSExtValue();
2260  if (IndexVal < 0 || (NumElements > 0 && (uint64_t)IndexVal >= NumElements))
2261    return false;
2262
2263  // Otherwise, it is in-range.
2264  return true;
2265}
2266
2267Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
2268                                          bool InBounds,
2269                                          Optional<unsigned> InRangeIndex,
2270                                          ArrayRef<Value *> Idxs) {
2271  if (Idxs.empty()) return C;
2272
2273  Type *GEPTy = GetElementPtrInst::getGEPReturnType(
2274      PointeeTy, C, makeArrayRef((Value *const *)Idxs.data(), Idxs.size()));
2275
2276  if (isa<UndefValue>(C))
2277    return UndefValue::get(GEPTy);
2278
2279  Constant *Idx0 = cast<Constant>(Idxs[0]);
2280  if (Idxs.size() == 1 && (Idx0->isNullValue() || isa<UndefValue>(Idx0)))
2281    return GEPTy->isVectorTy() && !C->getType()->isVectorTy()
2282               ? ConstantVector::getSplat(
2283                     cast<VectorType>(GEPTy)->getElementCount(), C)
2284               : C;
2285
2286  if (C->isNullValue()) {
2287    bool isNull = true;
2288    for (unsigned i = 0, e = Idxs.size(); i != e; ++i)
2289      if (!isa<UndefValue>(Idxs[i]) &&
2290          !cast<Constant>(Idxs[i])->isNullValue()) {
2291        isNull = false;
2292        break;
2293      }
2294    if (isNull) {
2295      PointerType *PtrTy = cast<PointerType>(C->getType()->getScalarType());
2296      Type *Ty = GetElementPtrInst::getIndexedType(PointeeTy, Idxs);
2297
2298      assert(Ty && "Invalid indices for GEP!");
2299      Type *OrigGEPTy = PointerType::get(Ty, PtrTy->getAddressSpace());
2300      Type *GEPTy = PointerType::get(Ty, PtrTy->getAddressSpace());
2301      if (VectorType *VT = dyn_cast<VectorType>(C->getType()))
2302        GEPTy = VectorType::get(OrigGEPTy, VT->getElementCount());
2303
2304      // The GEP returns a vector of pointers when one of more of
2305      // its arguments is a vector.
2306      for (unsigned i = 0, e = Idxs.size(); i != e; ++i) {
2307        if (auto *VT = dyn_cast<VectorType>(Idxs[i]->getType())) {
2308          assert((!isa<VectorType>(GEPTy) || isa<ScalableVectorType>(GEPTy) ==
2309                                                 isa<ScalableVectorType>(VT)) &&
2310                 "Mismatched GEPTy vector types");
2311          GEPTy = VectorType::get(OrigGEPTy, VT->getElementCount());
2312          break;
2313        }
2314      }
2315
2316      return Constant::getNullValue(GEPTy);
2317    }
2318  }
2319
2320  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
2321    // Combine Indices - If the source pointer to this getelementptr instruction
2322    // is a getelementptr instruction, combine the indices of the two
2323    // getelementptr instructions into a single instruction.
2324    //
2325    if (CE->getOpcode() == Instruction::GetElementPtr) {
2326      gep_type_iterator LastI = gep_type_end(CE);
2327      for (gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE);
2328           I != E; ++I)
2329        LastI = I;
2330
2331      // We cannot combine indices if doing so would take us outside of an
2332      // array or vector.  Doing otherwise could trick us if we evaluated such a
2333      // GEP as part of a load.
2334      //
2335      // e.g. Consider if the original GEP was:
2336      // i8* getelementptr ({ [2 x i8], i32, i8, [3 x i8] }* @main.c,
2337      //                    i32 0, i32 0, i64 0)
2338      //
2339      // If we then tried to offset it by '8' to get to the third element,
2340      // an i8, we should *not* get:
2341      // i8* getelementptr ({ [2 x i8], i32, i8, [3 x i8] }* @main.c,
2342      //                    i32 0, i32 0, i64 8)
2343      //
2344      // This GEP tries to index array element '8  which runs out-of-bounds.
2345      // Subsequent evaluation would get confused and produce erroneous results.
2346      //
2347      // The following prohibits such a GEP from being formed by checking to see
2348      // if the index is in-range with respect to an array.
2349      // TODO: This code may be extended to handle vectors as well.
2350      bool PerformFold = false;
2351      if (Idx0->isNullValue())
2352        PerformFold = true;
2353      else if (LastI.isSequential())
2354        if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx0))
2355          PerformFold = (!LastI.isBoundedSequential() ||
2356                         isIndexInRangeOfArrayType(
2357                             LastI.getSequentialNumElements(), CI)) &&
2358                        !CE->getOperand(CE->getNumOperands() - 1)
2359                             ->getType()
2360                             ->isVectorTy();
2361
2362      if (PerformFold) {
2363        SmallVector<Value*, 16> NewIndices;
2364        NewIndices.reserve(Idxs.size() + CE->getNumOperands());
2365        NewIndices.append(CE->op_begin() + 1, CE->op_end() - 1);
2366
2367        // Add the last index of the source with the first index of the new GEP.
2368        // Make sure to handle the case when they are actually different types.
2369        Constant *Combined = CE->getOperand(CE->getNumOperands()-1);
2370        // Otherwise it must be an array.
2371        if (!Idx0->isNullValue()) {
2372          Type *IdxTy = Combined->getType();
2373          if (IdxTy != Idx0->getType()) {
2374            unsigned CommonExtendedWidth =
2375                std::max(IdxTy->getIntegerBitWidth(),
2376                         Idx0->getType()->getIntegerBitWidth());
2377            CommonExtendedWidth = std::max(CommonExtendedWidth, 64U);
2378
2379            Type *CommonTy =
2380                Type::getIntNTy(IdxTy->getContext(), CommonExtendedWidth);
2381            Constant *C1 = ConstantExpr::getSExtOrBitCast(Idx0, CommonTy);
2382            Constant *C2 = ConstantExpr::getSExtOrBitCast(Combined, CommonTy);
2383            Combined = ConstantExpr::get(Instruction::Add, C1, C2);
2384          } else {
2385            Combined =
2386              ConstantExpr::get(Instruction::Add, Idx0, Combined);
2387          }
2388        }
2389
2390        NewIndices.push_back(Combined);
2391        NewIndices.append(Idxs.begin() + 1, Idxs.end());
2392
2393        // The combined GEP normally inherits its index inrange attribute from
2394        // the inner GEP, but if the inner GEP's last index was adjusted by the
2395        // outer GEP, any inbounds attribute on that index is invalidated.
2396        Optional<unsigned> IRIndex = cast<GEPOperator>(CE)->getInRangeIndex();
2397        if (IRIndex && *IRIndex == CE->getNumOperands() - 2 && !Idx0->isNullValue())
2398          IRIndex = None;
2399
2400        return ConstantExpr::getGetElementPtr(
2401            cast<GEPOperator>(CE)->getSourceElementType(), CE->getOperand(0),
2402            NewIndices, InBounds && cast<GEPOperator>(CE)->isInBounds(),
2403            IRIndex);
2404      }
2405    }
2406
2407    // Attempt to fold casts to the same type away.  For example, folding:
2408    //
2409    //   i32* getelementptr ([2 x i32]* bitcast ([3 x i32]* %X to [2 x i32]*),
2410    //                       i64 0, i64 0)
2411    // into:
2412    //
2413    //   i32* getelementptr ([3 x i32]* %X, i64 0, i64 0)
2414    //
2415    // Don't fold if the cast is changing address spaces.
2416    if (CE->isCast() && Idxs.size() > 1 && Idx0->isNullValue()) {
2417      PointerType *SrcPtrTy =
2418        dyn_cast<PointerType>(CE->getOperand(0)->getType());
2419      PointerType *DstPtrTy = dyn_cast<PointerType>(CE->getType());
2420      if (SrcPtrTy && DstPtrTy) {
2421        ArrayType *SrcArrayTy =
2422          dyn_cast<ArrayType>(SrcPtrTy->getElementType());
2423        ArrayType *DstArrayTy =
2424          dyn_cast<ArrayType>(DstPtrTy->getElementType());
2425        if (SrcArrayTy && DstArrayTy
2426            && SrcArrayTy->getElementType() == DstArrayTy->getElementType()
2427            && SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
2428          return ConstantExpr::getGetElementPtr(SrcArrayTy,
2429                                                (Constant *)CE->getOperand(0),
2430                                                Idxs, InBounds, InRangeIndex);
2431      }
2432    }
2433  }
2434
2435  // Check to see if any array indices are not within the corresponding
2436  // notional array or vector bounds. If so, try to determine if they can be
2437  // factored out into preceding dimensions.
2438  SmallVector<Constant *, 8> NewIdxs;
2439  Type *Ty = PointeeTy;
2440  Type *Prev = C->getType();
2441  auto GEPIter = gep_type_begin(PointeeTy, Idxs);
2442  bool Unknown =
2443      !isa<ConstantInt>(Idxs[0]) && !isa<ConstantDataVector>(Idxs[0]);
2444  for (unsigned i = 1, e = Idxs.size(); i != e;
2445       Prev = Ty, Ty = (++GEPIter).getIndexedType(), ++i) {
2446    if (!isa<ConstantInt>(Idxs[i]) && !isa<ConstantDataVector>(Idxs[i])) {
2447      // We don't know if it's in range or not.
2448      Unknown = true;
2449      continue;
2450    }
2451    if (!isa<ConstantInt>(Idxs[i - 1]) && !isa<ConstantDataVector>(Idxs[i - 1]))
2452      // Skip if the type of the previous index is not supported.
2453      continue;
2454    if (InRangeIndex && i == *InRangeIndex + 1) {
2455      // If an index is marked inrange, we cannot apply this canonicalization to
2456      // the following index, as that will cause the inrange index to point to
2457      // the wrong element.
2458      continue;
2459    }
2460    if (isa<StructType>(Ty)) {
2461      // The verify makes sure that GEPs into a struct are in range.
2462      continue;
2463    }
2464    if (isa<VectorType>(Ty)) {
2465      // There can be awkward padding in after a non-power of two vector.
2466      Unknown = true;
2467      continue;
2468    }
2469    auto *STy = cast<ArrayType>(Ty);
2470    if (ConstantInt *CI = dyn_cast<ConstantInt>(Idxs[i])) {
2471      if (isIndexInRangeOfArrayType(STy->getNumElements(), CI))
2472        // It's in range, skip to the next index.
2473        continue;
2474      if (CI->getSExtValue() < 0) {
2475        // It's out of range and negative, don't try to factor it.
2476        Unknown = true;
2477        continue;
2478      }
2479    } else {
2480      auto *CV = cast<ConstantDataVector>(Idxs[i]);
2481      bool InRange = true;
2482      for (unsigned I = 0, E = CV->getNumElements(); I != E; ++I) {
2483        auto *CI = cast<ConstantInt>(CV->getElementAsConstant(I));
2484        InRange &= isIndexInRangeOfArrayType(STy->getNumElements(), CI);
2485        if (CI->getSExtValue() < 0) {
2486          Unknown = true;
2487          break;
2488        }
2489      }
2490      if (InRange || Unknown)
2491        // It's in range, skip to the next index.
2492        // It's out of range and negative, don't try to factor it.
2493        continue;
2494    }
2495    if (isa<StructType>(Prev)) {
2496      // It's out of range, but the prior dimension is a struct
2497      // so we can't do anything about it.
2498      Unknown = true;
2499      continue;
2500    }
2501    // It's out of range, but we can factor it into the prior
2502    // dimension.
2503    NewIdxs.resize(Idxs.size());
2504    // Determine the number of elements in our sequential type.
2505    uint64_t NumElements = STy->getArrayNumElements();
2506
2507    // Expand the current index or the previous index to a vector from a scalar
2508    // if necessary.
2509    Constant *CurrIdx = cast<Constant>(Idxs[i]);
2510    auto *PrevIdx =
2511        NewIdxs[i - 1] ? NewIdxs[i - 1] : cast<Constant>(Idxs[i - 1]);
2512    bool IsCurrIdxVector = CurrIdx->getType()->isVectorTy();
2513    bool IsPrevIdxVector = PrevIdx->getType()->isVectorTy();
2514    bool UseVector = IsCurrIdxVector || IsPrevIdxVector;
2515
2516    if (!IsCurrIdxVector && IsPrevIdxVector)
2517      CurrIdx = ConstantDataVector::getSplat(
2518          cast<FixedVectorType>(PrevIdx->getType())->getNumElements(), CurrIdx);
2519
2520    if (!IsPrevIdxVector && IsCurrIdxVector)
2521      PrevIdx = ConstantDataVector::getSplat(
2522          cast<FixedVectorType>(CurrIdx->getType())->getNumElements(), PrevIdx);
2523
2524    Constant *Factor =
2525        ConstantInt::get(CurrIdx->getType()->getScalarType(), NumElements);
2526    if (UseVector)
2527      Factor = ConstantDataVector::getSplat(
2528          IsPrevIdxVector
2529              ? cast<FixedVectorType>(PrevIdx->getType())->getNumElements()
2530              : cast<FixedVectorType>(CurrIdx->getType())->getNumElements(),
2531          Factor);
2532
2533    NewIdxs[i] = ConstantExpr::getSRem(CurrIdx, Factor);
2534
2535    Constant *Div = ConstantExpr::getSDiv(CurrIdx, Factor);
2536
2537    unsigned CommonExtendedWidth =
2538        std::max(PrevIdx->getType()->getScalarSizeInBits(),
2539                 Div->getType()->getScalarSizeInBits());
2540    CommonExtendedWidth = std::max(CommonExtendedWidth, 64U);
2541
2542    // Before adding, extend both operands to i64 to avoid
2543    // overflow trouble.
2544    Type *ExtendedTy = Type::getIntNTy(Div->getContext(), CommonExtendedWidth);
2545    if (UseVector)
2546      ExtendedTy = FixedVectorType::get(
2547          ExtendedTy,
2548          IsPrevIdxVector
2549              ? cast<FixedVectorType>(PrevIdx->getType())->getNumElements()
2550              : cast<FixedVectorType>(CurrIdx->getType())->getNumElements());
2551
2552    if (!PrevIdx->getType()->isIntOrIntVectorTy(CommonExtendedWidth))
2553      PrevIdx = ConstantExpr::getSExt(PrevIdx, ExtendedTy);
2554
2555    if (!Div->getType()->isIntOrIntVectorTy(CommonExtendedWidth))
2556      Div = ConstantExpr::getSExt(Div, ExtendedTy);
2557
2558    NewIdxs[i - 1] = ConstantExpr::getAdd(PrevIdx, Div);
2559  }
2560
2561  // If we did any factoring, start over with the adjusted indices.
2562  if (!NewIdxs.empty()) {
2563    for (unsigned i = 0, e = Idxs.size(); i != e; ++i)
2564      if (!NewIdxs[i]) NewIdxs[i] = cast<Constant>(Idxs[i]);
2565    return ConstantExpr::getGetElementPtr(PointeeTy, C, NewIdxs, InBounds,
2566                                          InRangeIndex);
2567  }
2568
2569  // If all indices are known integers and normalized, we can do a simple
2570  // check for the "inbounds" property.
2571  if (!Unknown && !InBounds)
2572    if (auto *GV = dyn_cast<GlobalVariable>(C))
2573      if (!GV->hasExternalWeakLinkage() && isInBoundsIndices(Idxs))
2574        return ConstantExpr::getGetElementPtr(PointeeTy, C, Idxs,
2575                                              /*InBounds=*/true, InRangeIndex);
2576
2577  return nullptr;
2578}
2579