1//===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines routines for folding instructions into constants.
10//
11// Also, to supplement the basic IR ConstantExpr simplifications,
12// this file defines some additional folding routines that can make use of
13// DataLayout information. These functions cannot go in IR due to library
14// dependency issues.
15//
16//===----------------------------------------------------------------------===//
17
18#include "llvm/Analysis/ConstantFolding.h"
19#include "llvm/ADT/APFloat.h"
20#include "llvm/ADT/APInt.h"
21#include "llvm/ADT/APSInt.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/DenseMap.h"
24#include "llvm/ADT/STLExtras.h"
25#include "llvm/ADT/SmallVector.h"
26#include "llvm/ADT/StringRef.h"
27#include "llvm/Analysis/TargetFolder.h"
28#include "llvm/Analysis/TargetLibraryInfo.h"
29#include "llvm/Analysis/ValueTracking.h"
30#include "llvm/Analysis/VectorUtils.h"
31#include "llvm/Config/config.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/ConstantFold.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/DerivedTypes.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/GlobalValue.h"
39#include "llvm/IR/GlobalVariable.h"
40#include "llvm/IR/InstrTypes.h"
41#include "llvm/IR/Instruction.h"
42#include "llvm/IR/Instructions.h"
43#include "llvm/IR/IntrinsicInst.h"
44#include "llvm/IR/Intrinsics.h"
45#include "llvm/IR/IntrinsicsAArch64.h"
46#include "llvm/IR/IntrinsicsAMDGPU.h"
47#include "llvm/IR/IntrinsicsARM.h"
48#include "llvm/IR/IntrinsicsWebAssembly.h"
49#include "llvm/IR/IntrinsicsX86.h"
50#include "llvm/IR/Operator.h"
51#include "llvm/IR/Type.h"
52#include "llvm/IR/Value.h"
53#include "llvm/Support/Casting.h"
54#include "llvm/Support/ErrorHandling.h"
55#include "llvm/Support/KnownBits.h"
56#include "llvm/Support/MathExtras.h"
57#include <cassert>
58#include <cerrno>
59#include <cfenv>
60#include <cmath>
61#include <cstdint>
62
63using namespace llvm;
64
65namespace {
66
67//===----------------------------------------------------------------------===//
68// Constant Folding internal helper functions
69//===----------------------------------------------------------------------===//
70
71static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
72                                        Constant *C, Type *SrcEltTy,
73                                        unsigned NumSrcElts,
74                                        const DataLayout &DL) {
75  // Now that we know that the input value is a vector of integers, just shift
76  // and insert them into our result.
77  unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
78  for (unsigned i = 0; i != NumSrcElts; ++i) {
79    Constant *Element;
80    if (DL.isLittleEndian())
81      Element = C->getAggregateElement(NumSrcElts - i - 1);
82    else
83      Element = C->getAggregateElement(i);
84
85    if (Element && isa<UndefValue>(Element)) {
86      Result <<= BitShift;
87      continue;
88    }
89
90    auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
91    if (!ElementCI)
92      return ConstantExpr::getBitCast(C, DestTy);
93
94    Result <<= BitShift;
95    Result |= ElementCI->getValue().zext(Result.getBitWidth());
96  }
97
98  return nullptr;
99}
100
101/// Constant fold bitcast, symbolically evaluating it with DataLayout.
102/// This always returns a non-null constant, but it may be a
103/// ConstantExpr if unfoldable.
104Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
105  assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) &&
106         "Invalid constantexpr bitcast!");
107
108  // Catch the obvious splat cases.
109  if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy))
110    return Res;
111
112  if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
113    // Handle a vector->scalar integer/fp cast.
114    if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
115      unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements();
116      Type *SrcEltTy = VTy->getElementType();
117
118      // If the vector is a vector of floating point, convert it to vector of int
119      // to simplify things.
120      if (SrcEltTy->isFloatingPointTy()) {
121        unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
122        auto *SrcIVTy = FixedVectorType::get(
123            IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
124        // Ask IR to do the conversion now that #elts line up.
125        C = ConstantExpr::getBitCast(C, SrcIVTy);
126      }
127
128      APInt Result(DL.getTypeSizeInBits(DestTy), 0);
129      if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
130                                                SrcEltTy, NumSrcElts, DL))
131        return CE;
132
133      if (isa<IntegerType>(DestTy))
134        return ConstantInt::get(DestTy, Result);
135
136      APFloat FP(DestTy->getFltSemantics(), Result);
137      return ConstantFP::get(DestTy->getContext(), FP);
138    }
139  }
140
141  // The code below only handles casts to vectors currently.
142  auto *DestVTy = dyn_cast<VectorType>(DestTy);
143  if (!DestVTy)
144    return ConstantExpr::getBitCast(C, DestTy);
145
146  // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
147  // vector so the code below can handle it uniformly.
148  if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
149    Constant *Ops = C; // don't take the address of C!
150    return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
151  }
152
153  // If this is a bitcast from constant vector -> vector, fold it.
154  if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
155    return ConstantExpr::getBitCast(C, DestTy);
156
157  // If the element types match, IR can fold it.
158  unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements();
159  unsigned NumSrcElt = cast<FixedVectorType>(C->getType())->getNumElements();
160  if (NumDstElt == NumSrcElt)
161    return ConstantExpr::getBitCast(C, DestTy);
162
163  Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType();
164  Type *DstEltTy = DestVTy->getElementType();
165
166  // Otherwise, we're changing the number of elements in a vector, which
167  // requires endianness information to do the right thing.  For example,
168  //    bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
169  // folds to (little endian):
170  //    <4 x i32> <i32 0, i32 0, i32 1, i32 0>
171  // and to (big endian):
172  //    <4 x i32> <i32 0, i32 0, i32 0, i32 1>
173
174  // First thing is first.  We only want to think about integer here, so if
175  // we have something in FP form, recast it as integer.
176  if (DstEltTy->isFloatingPointTy()) {
177    // Fold to an vector of integers with same size as our FP type.
178    unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
179    auto *DestIVTy = FixedVectorType::get(
180        IntegerType::get(C->getContext(), FPWidth), NumDstElt);
181    // Recursively handle this integer conversion, if possible.
182    C = FoldBitCast(C, DestIVTy, DL);
183
184    // Finally, IR can handle this now that #elts line up.
185    return ConstantExpr::getBitCast(C, DestTy);
186  }
187
188  // Okay, we know the destination is integer, if the input is FP, convert
189  // it to integer first.
190  if (SrcEltTy->isFloatingPointTy()) {
191    unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
192    auto *SrcIVTy = FixedVectorType::get(
193        IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
194    // Ask IR to do the conversion now that #elts line up.
195    C = ConstantExpr::getBitCast(C, SrcIVTy);
196    // If IR wasn't able to fold it, bail out.
197    if (!isa<ConstantVector>(C) &&  // FIXME: Remove ConstantVector.
198        !isa<ConstantDataVector>(C))
199      return C;
200  }
201
202  // Now we know that the input and output vectors are both integer vectors
203  // of the same size, and that their #elements is not the same.  Do the
204  // conversion here, which depends on whether the input or output has
205  // more elements.
206  bool isLittleEndian = DL.isLittleEndian();
207
208  SmallVector<Constant*, 32> Result;
209  if (NumDstElt < NumSrcElt) {
210    // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
211    Constant *Zero = Constant::getNullValue(DstEltTy);
212    unsigned Ratio = NumSrcElt/NumDstElt;
213    unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
214    unsigned SrcElt = 0;
215    for (unsigned i = 0; i != NumDstElt; ++i) {
216      // Build each element of the result.
217      Constant *Elt = Zero;
218      unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
219      for (unsigned j = 0; j != Ratio; ++j) {
220        Constant *Src = C->getAggregateElement(SrcElt++);
221        if (Src && isa<UndefValue>(Src))
222          Src = Constant::getNullValue(
223              cast<VectorType>(C->getType())->getElementType());
224        else
225          Src = dyn_cast_or_null<ConstantInt>(Src);
226        if (!Src)  // Reject constantexpr elements.
227          return ConstantExpr::getBitCast(C, DestTy);
228
229        // Zero extend the element to the right size.
230        Src = ConstantFoldCastOperand(Instruction::ZExt, Src, Elt->getType(),
231                                      DL);
232        assert(Src && "Constant folding cannot fail on plain integers");
233
234        // Shift it to the right place, depending on endianness.
235        Src = ConstantFoldBinaryOpOperands(
236            Instruction::Shl, Src, ConstantInt::get(Src->getType(), ShiftAmt),
237            DL);
238        assert(Src && "Constant folding cannot fail on plain integers");
239
240        ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
241
242        // Mix it in.
243        Elt = ConstantFoldBinaryOpOperands(Instruction::Or, Elt, Src, DL);
244        assert(Elt && "Constant folding cannot fail on plain integers");
245      }
246      Result.push_back(Elt);
247    }
248    return ConstantVector::get(Result);
249  }
250
251  // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
252  unsigned Ratio = NumDstElt/NumSrcElt;
253  unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
254
255  // Loop over each source value, expanding into multiple results.
256  for (unsigned i = 0; i != NumSrcElt; ++i) {
257    auto *Element = C->getAggregateElement(i);
258
259    if (!Element) // Reject constantexpr elements.
260      return ConstantExpr::getBitCast(C, DestTy);
261
262    if (isa<UndefValue>(Element)) {
263      // Correctly Propagate undef values.
264      Result.append(Ratio, UndefValue::get(DstEltTy));
265      continue;
266    }
267
268    auto *Src = dyn_cast<ConstantInt>(Element);
269    if (!Src)
270      return ConstantExpr::getBitCast(C, DestTy);
271
272    unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
273    for (unsigned j = 0; j != Ratio; ++j) {
274      // Shift the piece of the value into the right place, depending on
275      // endianness.
276      APInt Elt = Src->getValue().lshr(ShiftAmt);
277      ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
278
279      // Truncate and remember this piece.
280      Result.push_back(ConstantInt::get(DstEltTy, Elt.trunc(DstBitSize)));
281    }
282  }
283
284  return ConstantVector::get(Result);
285}
286
287} // end anonymous namespace
288
289/// If this constant is a constant offset from a global, return the global and
290/// the constant. Because of constantexprs, this function is recursive.
291bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
292                                      APInt &Offset, const DataLayout &DL,
293                                      DSOLocalEquivalent **DSOEquiv) {
294  if (DSOEquiv)
295    *DSOEquiv = nullptr;
296
297  // Trivial case, constant is the global.
298  if ((GV = dyn_cast<GlobalValue>(C))) {
299    unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
300    Offset = APInt(BitWidth, 0);
301    return true;
302  }
303
304  if (auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(C)) {
305    if (DSOEquiv)
306      *DSOEquiv = FoundDSOEquiv;
307    GV = FoundDSOEquiv->getGlobalValue();
308    unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
309    Offset = APInt(BitWidth, 0);
310    return true;
311  }
312
313  // Otherwise, if this isn't a constant expr, bail out.
314  auto *CE = dyn_cast<ConstantExpr>(C);
315  if (!CE) return false;
316
317  // Look through ptr->int and ptr->ptr casts.
318  if (CE->getOpcode() == Instruction::PtrToInt ||
319      CE->getOpcode() == Instruction::BitCast)
320    return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL,
321                                      DSOEquiv);
322
323  // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
324  auto *GEP = dyn_cast<GEPOperator>(CE);
325  if (!GEP)
326    return false;
327
328  unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
329  APInt TmpOffset(BitWidth, 0);
330
331  // If the base isn't a global+constant, we aren't either.
332  if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL,
333                                  DSOEquiv))
334    return false;
335
336  // Otherwise, add any offset that our operands provide.
337  if (!GEP->accumulateConstantOffset(DL, TmpOffset))
338    return false;
339
340  Offset = TmpOffset;
341  return true;
342}
343
344Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
345                                         const DataLayout &DL) {
346  do {
347    Type *SrcTy = C->getType();
348    if (SrcTy == DestTy)
349      return C;
350
351    TypeSize DestSize = DL.getTypeSizeInBits(DestTy);
352    TypeSize SrcSize = DL.getTypeSizeInBits(SrcTy);
353    if (!TypeSize::isKnownGE(SrcSize, DestSize))
354      return nullptr;
355
356    // Catch the obvious splat cases (since all-zeros can coerce non-integral
357    // pointers legally).
358    if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy))
359      return Res;
360
361    // If the type sizes are the same and a cast is legal, just directly
362    // cast the constant.
363    // But be careful not to coerce non-integral pointers illegally.
364    if (SrcSize == DestSize &&
365        DL.isNonIntegralPointerType(SrcTy->getScalarType()) ==
366            DL.isNonIntegralPointerType(DestTy->getScalarType())) {
367      Instruction::CastOps Cast = Instruction::BitCast;
368      // If we are going from a pointer to int or vice versa, we spell the cast
369      // differently.
370      if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
371        Cast = Instruction::IntToPtr;
372      else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
373        Cast = Instruction::PtrToInt;
374
375      if (CastInst::castIsValid(Cast, C, DestTy))
376        return ConstantFoldCastOperand(Cast, C, DestTy, DL);
377    }
378
379    // If this isn't an aggregate type, there is nothing we can do to drill down
380    // and find a bitcastable constant.
381    if (!SrcTy->isAggregateType() && !SrcTy->isVectorTy())
382      return nullptr;
383
384    // We're simulating a load through a pointer that was bitcast to point to
385    // a different type, so we can try to walk down through the initial
386    // elements of an aggregate to see if some part of the aggregate is
387    // castable to implement the "load" semantic model.
388    if (SrcTy->isStructTy()) {
389      // Struct types might have leading zero-length elements like [0 x i32],
390      // which are certainly not what we are looking for, so skip them.
391      unsigned Elem = 0;
392      Constant *ElemC;
393      do {
394        ElemC = C->getAggregateElement(Elem++);
395      } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()).isZero());
396      C = ElemC;
397    } else {
398      // For non-byte-sized vector elements, the first element is not
399      // necessarily located at the vector base address.
400      if (auto *VT = dyn_cast<VectorType>(SrcTy))
401        if (!DL.typeSizeEqualsStoreSize(VT->getElementType()))
402          return nullptr;
403
404      C = C->getAggregateElement(0u);
405    }
406  } while (C);
407
408  return nullptr;
409}
410
411namespace {
412
413/// Recursive helper to read bits out of global. C is the constant being copied
414/// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
415/// results into and BytesLeft is the number of bytes left in
416/// the CurPtr buffer. DL is the DataLayout.
417bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
418                        unsigned BytesLeft, const DataLayout &DL) {
419  assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
420         "Out of range access");
421
422  // If this element is zero or undefined, we can just return since *CurPtr is
423  // zero initialized.
424  if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
425    return true;
426
427  if (auto *CI = dyn_cast<ConstantInt>(C)) {
428    if ((CI->getBitWidth() & 7) != 0)
429      return false;
430    const APInt &Val = CI->getValue();
431    unsigned IntBytes = unsigned(CI->getBitWidth()/8);
432
433    for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
434      unsigned n = ByteOffset;
435      if (!DL.isLittleEndian())
436        n = IntBytes - n - 1;
437      CurPtr[i] = Val.extractBits(8, n * 8).getZExtValue();
438      ++ByteOffset;
439    }
440    return true;
441  }
442
443  if (auto *CFP = dyn_cast<ConstantFP>(C)) {
444    if (CFP->getType()->isDoubleTy()) {
445      C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
446      return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
447    }
448    if (CFP->getType()->isFloatTy()){
449      C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
450      return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
451    }
452    if (CFP->getType()->isHalfTy()){
453      C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
454      return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
455    }
456    return false;
457  }
458
459  if (auto *CS = dyn_cast<ConstantStruct>(C)) {
460    const StructLayout *SL = DL.getStructLayout(CS->getType());
461    unsigned Index = SL->getElementContainingOffset(ByteOffset);
462    uint64_t CurEltOffset = SL->getElementOffset(Index);
463    ByteOffset -= CurEltOffset;
464
465    while (true) {
466      // If the element access is to the element itself and not to tail padding,
467      // read the bytes from the element.
468      uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
469
470      if (ByteOffset < EltSize &&
471          !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
472                              BytesLeft, DL))
473        return false;
474
475      ++Index;
476
477      // Check to see if we read from the last struct element, if so we're done.
478      if (Index == CS->getType()->getNumElements())
479        return true;
480
481      // If we read all of the bytes we needed from this element we're done.
482      uint64_t NextEltOffset = SL->getElementOffset(Index);
483
484      if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
485        return true;
486
487      // Move to the next element of the struct.
488      CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
489      BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
490      ByteOffset = 0;
491      CurEltOffset = NextEltOffset;
492    }
493    // not reached.
494  }
495
496  if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
497      isa<ConstantDataSequential>(C)) {
498    uint64_t NumElts, EltSize;
499    Type *EltTy;
500    if (auto *AT = dyn_cast<ArrayType>(C->getType())) {
501      NumElts = AT->getNumElements();
502      EltTy = AT->getElementType();
503      EltSize = DL.getTypeAllocSize(EltTy);
504    } else {
505      NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
506      EltTy = cast<FixedVectorType>(C->getType())->getElementType();
507      // TODO: For non-byte-sized vectors, current implementation assumes there is
508      // padding to the next byte boundary between elements.
509      if (!DL.typeSizeEqualsStoreSize(EltTy))
510        return false;
511
512      EltSize = DL.getTypeStoreSize(EltTy);
513    }
514    uint64_t Index = ByteOffset / EltSize;
515    uint64_t Offset = ByteOffset - Index * EltSize;
516
517    for (; Index != NumElts; ++Index) {
518      if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
519                              BytesLeft, DL))
520        return false;
521
522      uint64_t BytesWritten = EltSize - Offset;
523      assert(BytesWritten <= EltSize && "Not indexing into this element?");
524      if (BytesWritten >= BytesLeft)
525        return true;
526
527      Offset = 0;
528      BytesLeft -= BytesWritten;
529      CurPtr += BytesWritten;
530    }
531    return true;
532  }
533
534  if (auto *CE = dyn_cast<ConstantExpr>(C)) {
535    if (CE->getOpcode() == Instruction::IntToPtr &&
536        CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
537      return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
538                                BytesLeft, DL);
539    }
540  }
541
542  // Otherwise, unknown initializer type.
543  return false;
544}
545
546Constant *FoldReinterpretLoadFromConst(Constant *C, Type *LoadTy,
547                                       int64_t Offset, const DataLayout &DL) {
548  // Bail out early. Not expect to load from scalable global variable.
549  if (isa<ScalableVectorType>(LoadTy))
550    return nullptr;
551
552  auto *IntType = dyn_cast<IntegerType>(LoadTy);
553
554  // If this isn't an integer load we can't fold it directly.
555  if (!IntType) {
556    // If this is a non-integer load, we can try folding it as an int load and
557    // then bitcast the result.  This can be useful for union cases.  Note
558    // that address spaces don't matter here since we're not going to result in
559    // an actual new load.
560    if (!LoadTy->isFloatingPointTy() && !LoadTy->isPointerTy() &&
561        !LoadTy->isVectorTy())
562      return nullptr;
563
564    Type *MapTy = Type::getIntNTy(C->getContext(),
565                                  DL.getTypeSizeInBits(LoadTy).getFixedValue());
566    if (Constant *Res = FoldReinterpretLoadFromConst(C, MapTy, Offset, DL)) {
567      if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
568          !LoadTy->isX86_AMXTy())
569        // Materializing a zero can be done trivially without a bitcast
570        return Constant::getNullValue(LoadTy);
571      Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy;
572      Res = FoldBitCast(Res, CastTy, DL);
573      if (LoadTy->isPtrOrPtrVectorTy()) {
574        // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr
575        if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
576            !LoadTy->isX86_AMXTy())
577          return Constant::getNullValue(LoadTy);
578        if (DL.isNonIntegralPointerType(LoadTy->getScalarType()))
579          // Be careful not to replace a load of an addrspace value with an inttoptr here
580          return nullptr;
581        Res = ConstantExpr::getIntToPtr(Res, LoadTy);
582      }
583      return Res;
584    }
585    return nullptr;
586  }
587
588  unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
589  if (BytesLoaded > 32 || BytesLoaded == 0)
590    return nullptr;
591
592  // If we're not accessing anything in this constant, the result is undefined.
593  if (Offset <= -1 * static_cast<int64_t>(BytesLoaded))
594    return PoisonValue::get(IntType);
595
596  // TODO: We should be able to support scalable types.
597  TypeSize InitializerSize = DL.getTypeAllocSize(C->getType());
598  if (InitializerSize.isScalable())
599    return nullptr;
600
601  // If we're not accessing anything in this constant, the result is undefined.
602  if (Offset >= (int64_t)InitializerSize.getFixedValue())
603    return PoisonValue::get(IntType);
604
605  unsigned char RawBytes[32] = {0};
606  unsigned char *CurPtr = RawBytes;
607  unsigned BytesLeft = BytesLoaded;
608
609  // If we're loading off the beginning of the global, some bytes may be valid.
610  if (Offset < 0) {
611    CurPtr += -Offset;
612    BytesLeft += Offset;
613    Offset = 0;
614  }
615
616  if (!ReadDataFromGlobal(C, Offset, CurPtr, BytesLeft, DL))
617    return nullptr;
618
619  APInt ResultVal = APInt(IntType->getBitWidth(), 0);
620  if (DL.isLittleEndian()) {
621    ResultVal = RawBytes[BytesLoaded - 1];
622    for (unsigned i = 1; i != BytesLoaded; ++i) {
623      ResultVal <<= 8;
624      ResultVal |= RawBytes[BytesLoaded - 1 - i];
625    }
626  } else {
627    ResultVal = RawBytes[0];
628    for (unsigned i = 1; i != BytesLoaded; ++i) {
629      ResultVal <<= 8;
630      ResultVal |= RawBytes[i];
631    }
632  }
633
634  return ConstantInt::get(IntType->getContext(), ResultVal);
635}
636
637} // anonymous namespace
638
639// If GV is a constant with an initializer read its representation starting
640// at Offset and return it as a constant array of unsigned char.  Otherwise
641// return null.
642Constant *llvm::ReadByteArrayFromGlobal(const GlobalVariable *GV,
643                                        uint64_t Offset) {
644  if (!GV->isConstant() || !GV->hasDefinitiveInitializer())
645    return nullptr;
646
647  const DataLayout &DL = GV->getParent()->getDataLayout();
648  Constant *Init = const_cast<Constant *>(GV->getInitializer());
649  TypeSize InitSize = DL.getTypeAllocSize(Init->getType());
650  if (InitSize < Offset)
651    return nullptr;
652
653  uint64_t NBytes = InitSize - Offset;
654  if (NBytes > UINT16_MAX)
655    // Bail for large initializers in excess of 64K to avoid allocating
656    // too much memory.
657    // Offset is assumed to be less than or equal than InitSize (this
658    // is enforced in ReadDataFromGlobal).
659    return nullptr;
660
661  SmallVector<unsigned char, 256> RawBytes(static_cast<size_t>(NBytes));
662  unsigned char *CurPtr = RawBytes.data();
663
664  if (!ReadDataFromGlobal(Init, Offset, CurPtr, NBytes, DL))
665    return nullptr;
666
667  return ConstantDataArray::get(GV->getContext(), RawBytes);
668}
669
670/// If this Offset points exactly to the start of an aggregate element, return
671/// that element, otherwise return nullptr.
672Constant *getConstantAtOffset(Constant *Base, APInt Offset,
673                              const DataLayout &DL) {
674  if (Offset.isZero())
675    return Base;
676
677  if (!isa<ConstantAggregate>(Base) && !isa<ConstantDataSequential>(Base))
678    return nullptr;
679
680  Type *ElemTy = Base->getType();
681  SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(ElemTy, Offset);
682  if (!Offset.isZero() || !Indices[0].isZero())
683    return nullptr;
684
685  Constant *C = Base;
686  for (const APInt &Index : drop_begin(Indices)) {
687    if (Index.isNegative() || Index.getActiveBits() >= 32)
688      return nullptr;
689
690    C = C->getAggregateElement(Index.getZExtValue());
691    if (!C)
692      return nullptr;
693  }
694
695  return C;
696}
697
698Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
699                                          const APInt &Offset,
700                                          const DataLayout &DL) {
701  if (Constant *AtOffset = getConstantAtOffset(C, Offset, DL))
702    if (Constant *Result = ConstantFoldLoadThroughBitcast(AtOffset, Ty, DL))
703      return Result;
704
705  // Explicitly check for out-of-bounds access, so we return poison even if the
706  // constant is a uniform value.
707  TypeSize Size = DL.getTypeAllocSize(C->getType());
708  if (!Size.isScalable() && Offset.sge(Size.getFixedValue()))
709    return PoisonValue::get(Ty);
710
711  // Try an offset-independent fold of a uniform value.
712  if (Constant *Result = ConstantFoldLoadFromUniformValue(C, Ty))
713    return Result;
714
715  // Try hard to fold loads from bitcasted strange and non-type-safe things.
716  if (Offset.getSignificantBits() <= 64)
717    if (Constant *Result =
718            FoldReinterpretLoadFromConst(C, Ty, Offset.getSExtValue(), DL))
719      return Result;
720
721  return nullptr;
722}
723
724Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
725                                          const DataLayout &DL) {
726  return ConstantFoldLoadFromConst(C, Ty, APInt(64, 0), DL);
727}
728
729Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
730                                             APInt Offset,
731                                             const DataLayout &DL) {
732  // We can only fold loads from constant globals with a definitive initializer.
733  // Check this upfront, to skip expensive offset calculations.
734  auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(C));
735  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
736    return nullptr;
737
738  C = cast<Constant>(C->stripAndAccumulateConstantOffsets(
739          DL, Offset, /* AllowNonInbounds */ true));
740
741  if (C == GV)
742    if (Constant *Result = ConstantFoldLoadFromConst(GV->getInitializer(), Ty,
743                                                     Offset, DL))
744      return Result;
745
746  // If this load comes from anywhere in a uniform constant global, the value
747  // is always the same, regardless of the loaded offset.
748  return ConstantFoldLoadFromUniformValue(GV->getInitializer(), Ty);
749}
750
751Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
752                                             const DataLayout &DL) {
753  APInt Offset(DL.getIndexTypeSizeInBits(C->getType()), 0);
754  return ConstantFoldLoadFromConstPtr(C, Ty, Offset, DL);
755}
756
757Constant *llvm::ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty) {
758  if (isa<PoisonValue>(C))
759    return PoisonValue::get(Ty);
760  if (isa<UndefValue>(C))
761    return UndefValue::get(Ty);
762  if (C->isNullValue() && !Ty->isX86_MMXTy() && !Ty->isX86_AMXTy())
763    return Constant::getNullValue(Ty);
764  if (C->isAllOnesValue() &&
765      (Ty->isIntOrIntVectorTy() || Ty->isFPOrFPVectorTy()))
766    return Constant::getAllOnesValue(Ty);
767  return nullptr;
768}
769
770namespace {
771
772/// One of Op0/Op1 is a constant expression.
773/// Attempt to symbolically evaluate the result of a binary operator merging
774/// these together.  If target data info is available, it is provided as DL,
775/// otherwise DL is null.
776Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
777                                    const DataLayout &DL) {
778  // SROA
779
780  // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
781  // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
782  // bits.
783
784  if (Opc == Instruction::And) {
785    KnownBits Known0 = computeKnownBits(Op0, DL);
786    KnownBits Known1 = computeKnownBits(Op1, DL);
787    if ((Known1.One | Known0.Zero).isAllOnes()) {
788      // All the bits of Op0 that the 'and' could be masking are already zero.
789      return Op0;
790    }
791    if ((Known0.One | Known1.Zero).isAllOnes()) {
792      // All the bits of Op1 that the 'and' could be masking are already zero.
793      return Op1;
794    }
795
796    Known0 &= Known1;
797    if (Known0.isConstant())
798      return ConstantInt::get(Op0->getType(), Known0.getConstant());
799  }
800
801  // If the constant expr is something like &A[123] - &A[4].f, fold this into a
802  // constant.  This happens frequently when iterating over a global array.
803  if (Opc == Instruction::Sub) {
804    GlobalValue *GV1, *GV2;
805    APInt Offs1, Offs2;
806
807    if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
808      if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
809        unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
810
811        // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
812        // PtrToInt may change the bitwidth so we have convert to the right size
813        // first.
814        return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
815                                                Offs2.zextOrTrunc(OpSize));
816      }
817  }
818
819  return nullptr;
820}
821
822/// If array indices are not pointer-sized integers, explicitly cast them so
823/// that they aren't implicitly casted by the getelementptr.
824Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
825                         Type *ResultTy, bool InBounds,
826                         std::optional<unsigned> InRangeIndex,
827                         const DataLayout &DL, const TargetLibraryInfo *TLI) {
828  Type *IntIdxTy = DL.getIndexType(ResultTy);
829  Type *IntIdxScalarTy = IntIdxTy->getScalarType();
830
831  bool Any = false;
832  SmallVector<Constant*, 32> NewIdxs;
833  for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
834    if ((i == 1 ||
835         !isa<StructType>(GetElementPtrInst::getIndexedType(
836             SrcElemTy, Ops.slice(1, i - 1)))) &&
837        Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
838      Any = true;
839      Type *NewType =
840          Ops[i]->getType()->isVectorTy() ? IntIdxTy : IntIdxScalarTy;
841      Constant *NewIdx = ConstantFoldCastOperand(
842          CastInst::getCastOpcode(Ops[i], true, NewType, true), Ops[i], NewType,
843          DL);
844      if (!NewIdx)
845        return nullptr;
846      NewIdxs.push_back(NewIdx);
847    } else
848      NewIdxs.push_back(Ops[i]);
849  }
850
851  if (!Any)
852    return nullptr;
853
854  Constant *C = ConstantExpr::getGetElementPtr(
855      SrcElemTy, Ops[0], NewIdxs, InBounds, InRangeIndex);
856  return ConstantFoldConstant(C, DL, TLI);
857}
858
859/// If we can symbolically evaluate the GEP constant expression, do so.
860Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
861                                  ArrayRef<Constant *> Ops,
862                                  const DataLayout &DL,
863                                  const TargetLibraryInfo *TLI) {
864  const GEPOperator *InnermostGEP = GEP;
865  bool InBounds = GEP->isInBounds();
866
867  Type *SrcElemTy = GEP->getSourceElementType();
868  Type *ResElemTy = GEP->getResultElementType();
869  Type *ResTy = GEP->getType();
870  if (!SrcElemTy->isSized() || isa<ScalableVectorType>(SrcElemTy))
871    return nullptr;
872
873  if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
874                                   GEP->isInBounds(), GEP->getInRangeIndex(),
875                                   DL, TLI))
876    return C;
877
878  Constant *Ptr = Ops[0];
879  if (!Ptr->getType()->isPointerTy())
880    return nullptr;
881
882  Type *IntIdxTy = DL.getIndexType(Ptr->getType());
883
884  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
885    if (!isa<ConstantInt>(Ops[i]))
886      return nullptr;
887
888  unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy);
889  APInt Offset = APInt(
890      BitWidth,
891      DL.getIndexedOffsetInType(
892          SrcElemTy, ArrayRef((Value *const *)Ops.data() + 1, Ops.size() - 1)));
893
894  // If this is a GEP of a GEP, fold it all into a single GEP.
895  while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
896    InnermostGEP = GEP;
897    InBounds &= GEP->isInBounds();
898
899    SmallVector<Value *, 4> NestedOps(llvm::drop_begin(GEP->operands()));
900
901    // Do not try the incorporate the sub-GEP if some index is not a number.
902    bool AllConstantInt = true;
903    for (Value *NestedOp : NestedOps)
904      if (!isa<ConstantInt>(NestedOp)) {
905        AllConstantInt = false;
906        break;
907      }
908    if (!AllConstantInt)
909      break;
910
911    Ptr = cast<Constant>(GEP->getOperand(0));
912    SrcElemTy = GEP->getSourceElementType();
913    Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
914  }
915
916  // If the base value for this address is a literal integer value, fold the
917  // getelementptr to the resulting integer value casted to the pointer type.
918  APInt BasePtr(BitWidth, 0);
919  if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
920    if (CE->getOpcode() == Instruction::IntToPtr) {
921      if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
922        BasePtr = Base->getValue().zextOrTrunc(BitWidth);
923    }
924  }
925
926  auto *PTy = cast<PointerType>(Ptr->getType());
927  if ((Ptr->isNullValue() || BasePtr != 0) &&
928      !DL.isNonIntegralPointerType(PTy)) {
929    Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
930    return ConstantExpr::getIntToPtr(C, ResTy);
931  }
932
933  // Otherwise form a regular getelementptr. Recompute the indices so that
934  // we eliminate over-indexing of the notional static type array bounds.
935  // This makes it easy to determine if the getelementptr is "inbounds".
936
937  // For GEPs of GlobalValues, use the value type, otherwise use an i8 GEP.
938  if (auto *GV = dyn_cast<GlobalValue>(Ptr))
939    SrcElemTy = GV->getValueType();
940  else
941    SrcElemTy = Type::getInt8Ty(Ptr->getContext());
942
943  if (!SrcElemTy->isSized())
944    return nullptr;
945
946  Type *ElemTy = SrcElemTy;
947  SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(ElemTy, Offset);
948  if (Offset != 0)
949    return nullptr;
950
951  // Try to add additional zero indices to reach the desired result element
952  // type.
953  // TODO: Should we avoid extra zero indices if ResElemTy can't be reached and
954  // we'll have to insert a bitcast anyway?
955  while (ElemTy != ResElemTy) {
956    Type *NextTy = GetElementPtrInst::getTypeAtIndex(ElemTy, (uint64_t)0);
957    if (!NextTy)
958      break;
959
960    Indices.push_back(APInt::getZero(isa<StructType>(ElemTy) ? 32 : BitWidth));
961    ElemTy = NextTy;
962  }
963
964  SmallVector<Constant *, 32> NewIdxs;
965  for (const APInt &Index : Indices)
966    NewIdxs.push_back(ConstantInt::get(
967        Type::getIntNTy(Ptr->getContext(), Index.getBitWidth()), Index));
968
969  // Preserve the inrange index from the innermost GEP if possible. We must
970  // have calculated the same indices up to and including the inrange index.
971  std::optional<unsigned> InRangeIndex;
972  if (std::optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
973    if (SrcElemTy == InnermostGEP->getSourceElementType() &&
974        NewIdxs.size() > *LastIRIndex) {
975      InRangeIndex = LastIRIndex;
976      for (unsigned I = 0; I <= *LastIRIndex; ++I)
977        if (NewIdxs[I] != InnermostGEP->getOperand(I + 1))
978          return nullptr;
979    }
980
981  // Create a GEP.
982  return ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs, InBounds,
983                                        InRangeIndex);
984}
985
986/// Attempt to constant fold an instruction with the
987/// specified opcode and operands.  If successful, the constant result is
988/// returned, if not, null is returned.  Note that this function can fail when
989/// attempting to fold instructions like loads and stores, which have no
990/// constant expression form.
991Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
992                                       ArrayRef<Constant *> Ops,
993                                       const DataLayout &DL,
994                                       const TargetLibraryInfo *TLI) {
995  Type *DestTy = InstOrCE->getType();
996
997  if (Instruction::isUnaryOp(Opcode))
998    return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL);
999
1000  if (Instruction::isBinaryOp(Opcode)) {
1001    switch (Opcode) {
1002    default:
1003      break;
1004    case Instruction::FAdd:
1005    case Instruction::FSub:
1006    case Instruction::FMul:
1007    case Instruction::FDiv:
1008    case Instruction::FRem:
1009      // Handle floating point instructions separately to account for denormals
1010      // TODO: If a constant expression is being folded rather than an
1011      // instruction, denormals will not be flushed/treated as zero
1012      if (const auto *I = dyn_cast<Instruction>(InstOrCE)) {
1013        return ConstantFoldFPInstOperands(Opcode, Ops[0], Ops[1], DL, I);
1014      }
1015    }
1016    return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
1017  }
1018
1019  if (Instruction::isCast(Opcode))
1020    return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
1021
1022  if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1023    Type *SrcElemTy = GEP->getSourceElementType();
1024    if (!ConstantExpr::isSupportedGetElementPtr(SrcElemTy))
1025      return nullptr;
1026
1027    if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1028      return C;
1029
1030    return ConstantExpr::getGetElementPtr(SrcElemTy, Ops[0], Ops.slice(1),
1031                                          GEP->isInBounds(),
1032                                          GEP->getInRangeIndex());
1033  }
1034
1035  if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE)) {
1036    if (CE->isCompare())
1037      return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
1038                                             DL, TLI);
1039    return CE->getWithOperands(Ops);
1040  }
1041
1042  switch (Opcode) {
1043  default: return nullptr;
1044  case Instruction::ICmp:
1045  case Instruction::FCmp: {
1046    auto *C = cast<CmpInst>(InstOrCE);
1047    return ConstantFoldCompareInstOperands(C->getPredicate(), Ops[0], Ops[1],
1048                                           DL, TLI, C);
1049  }
1050  case Instruction::Freeze:
1051    return isGuaranteedNotToBeUndefOrPoison(Ops[0]) ? Ops[0] : nullptr;
1052  case Instruction::Call:
1053    if (auto *F = dyn_cast<Function>(Ops.back())) {
1054      const auto *Call = cast<CallBase>(InstOrCE);
1055      if (canConstantFoldCallTo(Call, F))
1056        return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI);
1057    }
1058    return nullptr;
1059  case Instruction::Select:
1060    return ConstantFoldSelectInstruction(Ops[0], Ops[1], Ops[2]);
1061  case Instruction::ExtractElement:
1062    return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1063  case Instruction::ExtractValue:
1064    return ConstantFoldExtractValueInstruction(
1065        Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1066  case Instruction::InsertElement:
1067    return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1068  case Instruction::InsertValue:
1069    return ConstantFoldInsertValueInstruction(
1070        Ops[0], Ops[1], cast<InsertValueInst>(InstOrCE)->getIndices());
1071  case Instruction::ShuffleVector:
1072    return ConstantExpr::getShuffleVector(
1073        Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1074  case Instruction::Load: {
1075    const auto *LI = dyn_cast<LoadInst>(InstOrCE);
1076    if (LI->isVolatile())
1077      return nullptr;
1078    return ConstantFoldLoadFromConstPtr(Ops[0], LI->getType(), DL);
1079  }
1080  }
1081}
1082
1083} // end anonymous namespace
1084
1085//===----------------------------------------------------------------------===//
1086// Constant Folding public APIs
1087//===----------------------------------------------------------------------===//
1088
1089namespace {
1090
1091Constant *
1092ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1093                         const TargetLibraryInfo *TLI,
1094                         SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1095  if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1096    return const_cast<Constant *>(C);
1097
1098  SmallVector<Constant *, 8> Ops;
1099  for (const Use &OldU : C->operands()) {
1100    Constant *OldC = cast<Constant>(&OldU);
1101    Constant *NewC = OldC;
1102    // Recursively fold the ConstantExpr's operands. If we have already folded
1103    // a ConstantExpr, we don't have to process it again.
1104    if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1105      auto It = FoldedOps.find(OldC);
1106      if (It == FoldedOps.end()) {
1107        NewC = ConstantFoldConstantImpl(OldC, DL, TLI, FoldedOps);
1108        FoldedOps.insert({OldC, NewC});
1109      } else {
1110        NewC = It->second;
1111      }
1112    }
1113    Ops.push_back(NewC);
1114  }
1115
1116  if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1117    if (Constant *Res =
1118            ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI))
1119      return Res;
1120    return const_cast<Constant *>(C);
1121  }
1122
1123  assert(isa<ConstantVector>(C));
1124  return ConstantVector::get(Ops);
1125}
1126
1127} // end anonymous namespace
1128
1129Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
1130                                        const TargetLibraryInfo *TLI) {
1131  // Handle PHI nodes quickly here...
1132  if (auto *PN = dyn_cast<PHINode>(I)) {
1133    Constant *CommonValue = nullptr;
1134
1135    SmallDenseMap<Constant *, Constant *> FoldedOps;
1136    for (Value *Incoming : PN->incoming_values()) {
1137      // If the incoming value is undef then skip it.  Note that while we could
1138      // skip the value if it is equal to the phi node itself we choose not to
1139      // because that would break the rule that constant folding only applies if
1140      // all operands are constants.
1141      if (isa<UndefValue>(Incoming))
1142        continue;
1143      // If the incoming value is not a constant, then give up.
1144      auto *C = dyn_cast<Constant>(Incoming);
1145      if (!C)
1146        return nullptr;
1147      // Fold the PHI's operands.
1148      C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1149      // If the incoming value is a different constant to
1150      // the one we saw previously, then give up.
1151      if (CommonValue && C != CommonValue)
1152        return nullptr;
1153      CommonValue = C;
1154    }
1155
1156    // If we reach here, all incoming values are the same constant or undef.
1157    return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1158  }
1159
1160  // Scan the operand list, checking to see if they are all constants, if so,
1161  // hand off to ConstantFoldInstOperandsImpl.
1162  if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1163    return nullptr;
1164
1165  SmallDenseMap<Constant *, Constant *> FoldedOps;
1166  SmallVector<Constant *, 8> Ops;
1167  for (const Use &OpU : I->operands()) {
1168    auto *Op = cast<Constant>(&OpU);
1169    // Fold the Instruction's operands.
1170    Op = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps);
1171    Ops.push_back(Op);
1172  }
1173
1174  return ConstantFoldInstOperands(I, Ops, DL, TLI);
1175}
1176
1177Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1178                                     const TargetLibraryInfo *TLI) {
1179  SmallDenseMap<Constant *, Constant *> FoldedOps;
1180  return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1181}
1182
1183Constant *llvm::ConstantFoldInstOperands(Instruction *I,
1184                                         ArrayRef<Constant *> Ops,
1185                                         const DataLayout &DL,
1186                                         const TargetLibraryInfo *TLI) {
1187  return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
1188}
1189
1190Constant *llvm::ConstantFoldCompareInstOperands(
1191    unsigned IntPredicate, Constant *Ops0, Constant *Ops1, const DataLayout &DL,
1192    const TargetLibraryInfo *TLI, const Instruction *I) {
1193  CmpInst::Predicate Predicate = (CmpInst::Predicate)IntPredicate;
1194  // fold: icmp (inttoptr x), null         -> icmp x, 0
1195  // fold: icmp null, (inttoptr x)         -> icmp 0, x
1196  // fold: icmp (ptrtoint x), 0            -> icmp x, null
1197  // fold: icmp 0, (ptrtoint x)            -> icmp null, x
1198  // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1199  // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1200  //
1201  // FIXME: The following comment is out of data and the DataLayout is here now.
1202  // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1203  // around to know if bit truncation is happening.
1204  if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1205    if (Ops1->isNullValue()) {
1206      if (CE0->getOpcode() == Instruction::IntToPtr) {
1207        Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1208        // Convert the integer value to the right size to ensure we get the
1209        // proper extension or truncation.
1210        if (Constant *C = ConstantFoldIntegerCast(CE0->getOperand(0), IntPtrTy,
1211                                                  /*IsSigned*/ false, DL)) {
1212          Constant *Null = Constant::getNullValue(C->getType());
1213          return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1214        }
1215      }
1216
1217      // Only do this transformation if the int is intptrty in size, otherwise
1218      // there is a truncation or extension that we aren't modeling.
1219      if (CE0->getOpcode() == Instruction::PtrToInt) {
1220        Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1221        if (CE0->getType() == IntPtrTy) {
1222          Constant *C = CE0->getOperand(0);
1223          Constant *Null = Constant::getNullValue(C->getType());
1224          return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1225        }
1226      }
1227    }
1228
1229    if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1230      if (CE0->getOpcode() == CE1->getOpcode()) {
1231        if (CE0->getOpcode() == Instruction::IntToPtr) {
1232          Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1233
1234          // Convert the integer value to the right size to ensure we get the
1235          // proper extension or truncation.
1236          Constant *C0 = ConstantFoldIntegerCast(CE0->getOperand(0), IntPtrTy,
1237                                                 /*IsSigned*/ false, DL);
1238          Constant *C1 = ConstantFoldIntegerCast(CE1->getOperand(0), IntPtrTy,
1239                                                 /*IsSigned*/ false, DL);
1240          if (C0 && C1)
1241            return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1242        }
1243
1244        // Only do this transformation if the int is intptrty in size, otherwise
1245        // there is a truncation or extension that we aren't modeling.
1246        if (CE0->getOpcode() == Instruction::PtrToInt) {
1247          Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1248          if (CE0->getType() == IntPtrTy &&
1249              CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1250            return ConstantFoldCompareInstOperands(
1251                Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1252          }
1253        }
1254      }
1255    }
1256
1257    // Convert pointer comparison (base+offset1) pred (base+offset2) into
1258    // offset1 pred offset2, for the case where the offset is inbounds. This
1259    // only works for equality and unsigned comparison, as inbounds permits
1260    // crossing the sign boundary. However, the offset comparison itself is
1261    // signed.
1262    if (Ops0->getType()->isPointerTy() && !ICmpInst::isSigned(Predicate)) {
1263      unsigned IndexWidth = DL.getIndexTypeSizeInBits(Ops0->getType());
1264      APInt Offset0(IndexWidth, 0);
1265      Value *Stripped0 =
1266          Ops0->stripAndAccumulateInBoundsConstantOffsets(DL, Offset0);
1267      APInt Offset1(IndexWidth, 0);
1268      Value *Stripped1 =
1269          Ops1->stripAndAccumulateInBoundsConstantOffsets(DL, Offset1);
1270      if (Stripped0 == Stripped1)
1271        return ConstantExpr::getCompare(
1272            ICmpInst::getSignedPredicate(Predicate),
1273            ConstantInt::get(CE0->getContext(), Offset0),
1274            ConstantInt::get(CE0->getContext(), Offset1));
1275    }
1276  } else if (isa<ConstantExpr>(Ops1)) {
1277    // If RHS is a constant expression, but the left side isn't, swap the
1278    // operands and try again.
1279    Predicate = ICmpInst::getSwappedPredicate(Predicate);
1280    return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
1281  }
1282
1283  // Flush any denormal constant float input according to denormal handling
1284  // mode.
1285  Ops0 = FlushFPConstant(Ops0, I, /* IsOutput */ false);
1286  if (!Ops0)
1287    return nullptr;
1288  Ops1 = FlushFPConstant(Ops1, I, /* IsOutput */ false);
1289  if (!Ops1)
1290    return nullptr;
1291
1292  return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1293}
1294
1295Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
1296                                           const DataLayout &DL) {
1297  assert(Instruction::isUnaryOp(Opcode));
1298
1299  return ConstantFoldUnaryInstruction(Opcode, Op);
1300}
1301
1302Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1303                                             Constant *RHS,
1304                                             const DataLayout &DL) {
1305  assert(Instruction::isBinaryOp(Opcode));
1306  if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1307    if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1308      return C;
1309
1310  if (ConstantExpr::isDesirableBinOp(Opcode))
1311    return ConstantExpr::get(Opcode, LHS, RHS);
1312  return ConstantFoldBinaryInstruction(Opcode, LHS, RHS);
1313}
1314
1315Constant *llvm::FlushFPConstant(Constant *Operand, const Instruction *I,
1316                                bool IsOutput) {
1317  if (!I || !I->getParent() || !I->getFunction())
1318    return Operand;
1319
1320  ConstantFP *CFP = dyn_cast<ConstantFP>(Operand);
1321  if (!CFP)
1322    return Operand;
1323
1324  const APFloat &APF = CFP->getValueAPF();
1325  // TODO: Should this canonicalize nans?
1326  if (!APF.isDenormal())
1327    return Operand;
1328
1329  Type *Ty = CFP->getType();
1330  DenormalMode DenormMode =
1331      I->getFunction()->getDenormalMode(Ty->getFltSemantics());
1332  DenormalMode::DenormalModeKind Mode =
1333      IsOutput ? DenormMode.Output : DenormMode.Input;
1334  switch (Mode) {
1335  default:
1336    llvm_unreachable("unknown denormal mode");
1337  case DenormalMode::Dynamic:
1338    return nullptr;
1339  case DenormalMode::IEEE:
1340    return Operand;
1341  case DenormalMode::PreserveSign:
1342    if (APF.isDenormal()) {
1343      return ConstantFP::get(
1344          Ty->getContext(),
1345          APFloat::getZero(Ty->getFltSemantics(), APF.isNegative()));
1346    }
1347    return Operand;
1348  case DenormalMode::PositiveZero:
1349    if (APF.isDenormal()) {
1350      return ConstantFP::get(Ty->getContext(),
1351                             APFloat::getZero(Ty->getFltSemantics(), false));
1352    }
1353    return Operand;
1354  }
1355  return Operand;
1356}
1357
1358Constant *llvm::ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS,
1359                                           Constant *RHS, const DataLayout &DL,
1360                                           const Instruction *I) {
1361  if (Instruction::isBinaryOp(Opcode)) {
1362    // Flush denormal inputs if needed.
1363    Constant *Op0 = FlushFPConstant(LHS, I, /* IsOutput */ false);
1364    if (!Op0)
1365      return nullptr;
1366    Constant *Op1 = FlushFPConstant(RHS, I, /* IsOutput */ false);
1367    if (!Op1)
1368      return nullptr;
1369
1370    // Calculate constant result.
1371    Constant *C = ConstantFoldBinaryOpOperands(Opcode, Op0, Op1, DL);
1372    if (!C)
1373      return nullptr;
1374
1375    // Flush denormal output if needed.
1376    return FlushFPConstant(C, I, /* IsOutput */ true);
1377  }
1378  // If instruction lacks a parent/function and the denormal mode cannot be
1379  // determined, use the default (IEEE).
1380  return ConstantFoldBinaryOpOperands(Opcode, LHS, RHS, DL);
1381}
1382
1383Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1384                                        Type *DestTy, const DataLayout &DL) {
1385  assert(Instruction::isCast(Opcode));
1386  switch (Opcode) {
1387  default:
1388    llvm_unreachable("Missing case");
1389  case Instruction::PtrToInt:
1390    if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1391      Constant *FoldedValue = nullptr;
1392      // If the input is a inttoptr, eliminate the pair.  This requires knowing
1393      // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1394      if (CE->getOpcode() == Instruction::IntToPtr) {
1395        // zext/trunc the inttoptr to pointer size.
1396        FoldedValue = ConstantFoldIntegerCast(CE->getOperand(0),
1397                                              DL.getIntPtrType(CE->getType()),
1398                                              /*IsSigned=*/false, DL);
1399      } else if (auto *GEP = dyn_cast<GEPOperator>(CE)) {
1400        // If we have GEP, we can perform the following folds:
1401        // (ptrtoint (gep null, x)) -> x
1402        // (ptrtoint (gep (gep null, x), y) -> x + y, etc.
1403        unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
1404        APInt BaseOffset(BitWidth, 0);
1405        auto *Base = cast<Constant>(GEP->stripAndAccumulateConstantOffsets(
1406            DL, BaseOffset, /*AllowNonInbounds=*/true));
1407        if (Base->isNullValue()) {
1408          FoldedValue = ConstantInt::get(CE->getContext(), BaseOffset);
1409        } else {
1410          // ptrtoint (gep i8, Ptr, (sub 0, V)) -> sub (ptrtoint Ptr), V
1411          if (GEP->getNumIndices() == 1 &&
1412              GEP->getSourceElementType()->isIntegerTy(8)) {
1413            auto *Ptr = cast<Constant>(GEP->getPointerOperand());
1414            auto *Sub = dyn_cast<ConstantExpr>(GEP->getOperand(1));
1415            Type *IntIdxTy = DL.getIndexType(Ptr->getType());
1416            if (Sub && Sub->getType() == IntIdxTy &&
1417                Sub->getOpcode() == Instruction::Sub &&
1418                Sub->getOperand(0)->isNullValue())
1419              FoldedValue = ConstantExpr::getSub(
1420                  ConstantExpr::getPtrToInt(Ptr, IntIdxTy), Sub->getOperand(1));
1421          }
1422        }
1423      }
1424      if (FoldedValue) {
1425        // Do a zext or trunc to get to the ptrtoint dest size.
1426        return ConstantFoldIntegerCast(FoldedValue, DestTy, /*IsSigned=*/false,
1427                                       DL);
1428      }
1429    }
1430    break;
1431  case Instruction::IntToPtr:
1432    // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1433    // the int size is >= the ptr size and the address spaces are the same.
1434    // This requires knowing the width of a pointer, so it can't be done in
1435    // ConstantExpr::getCast.
1436    if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1437      if (CE->getOpcode() == Instruction::PtrToInt) {
1438        Constant *SrcPtr = CE->getOperand(0);
1439        unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1440        unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1441
1442        if (MidIntSize >= SrcPtrSize) {
1443          unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1444          if (SrcAS == DestTy->getPointerAddressSpace())
1445            return FoldBitCast(CE->getOperand(0), DestTy, DL);
1446        }
1447      }
1448    }
1449    break;
1450  case Instruction::Trunc:
1451  case Instruction::ZExt:
1452  case Instruction::SExt:
1453  case Instruction::FPTrunc:
1454  case Instruction::FPExt:
1455  case Instruction::UIToFP:
1456  case Instruction::SIToFP:
1457  case Instruction::FPToUI:
1458  case Instruction::FPToSI:
1459  case Instruction::AddrSpaceCast:
1460    break;
1461  case Instruction::BitCast:
1462    return FoldBitCast(C, DestTy, DL);
1463  }
1464
1465  if (ConstantExpr::isDesirableCastOp(Opcode))
1466    return ConstantExpr::getCast(Opcode, C, DestTy);
1467  return ConstantFoldCastInstruction(Opcode, C, DestTy);
1468}
1469
1470Constant *llvm::ConstantFoldIntegerCast(Constant *C, Type *DestTy,
1471                                        bool IsSigned, const DataLayout &DL) {
1472  Type *SrcTy = C->getType();
1473  if (SrcTy == DestTy)
1474    return C;
1475  if (SrcTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
1476    return ConstantFoldCastOperand(Instruction::Trunc, C, DestTy, DL);
1477  if (IsSigned)
1478    return ConstantFoldCastOperand(Instruction::SExt, C, DestTy, DL);
1479  return ConstantFoldCastOperand(Instruction::ZExt, C, DestTy, DL);
1480}
1481
1482//===----------------------------------------------------------------------===//
1483//  Constant Folding for Calls
1484//
1485
1486bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
1487  if (Call->isNoBuiltin())
1488    return false;
1489  if (Call->getFunctionType() != F->getFunctionType())
1490    return false;
1491  switch (F->getIntrinsicID()) {
1492  // Operations that do not operate floating-point numbers and do not depend on
1493  // FP environment can be folded even in strictfp functions.
1494  case Intrinsic::bswap:
1495  case Intrinsic::ctpop:
1496  case Intrinsic::ctlz:
1497  case Intrinsic::cttz:
1498  case Intrinsic::fshl:
1499  case Intrinsic::fshr:
1500  case Intrinsic::launder_invariant_group:
1501  case Intrinsic::strip_invariant_group:
1502  case Intrinsic::masked_load:
1503  case Intrinsic::get_active_lane_mask:
1504  case Intrinsic::abs:
1505  case Intrinsic::smax:
1506  case Intrinsic::smin:
1507  case Intrinsic::umax:
1508  case Intrinsic::umin:
1509  case Intrinsic::sadd_with_overflow:
1510  case Intrinsic::uadd_with_overflow:
1511  case Intrinsic::ssub_with_overflow:
1512  case Intrinsic::usub_with_overflow:
1513  case Intrinsic::smul_with_overflow:
1514  case Intrinsic::umul_with_overflow:
1515  case Intrinsic::sadd_sat:
1516  case Intrinsic::uadd_sat:
1517  case Intrinsic::ssub_sat:
1518  case Intrinsic::usub_sat:
1519  case Intrinsic::smul_fix:
1520  case Intrinsic::smul_fix_sat:
1521  case Intrinsic::bitreverse:
1522  case Intrinsic::is_constant:
1523  case Intrinsic::vector_reduce_add:
1524  case Intrinsic::vector_reduce_mul:
1525  case Intrinsic::vector_reduce_and:
1526  case Intrinsic::vector_reduce_or:
1527  case Intrinsic::vector_reduce_xor:
1528  case Intrinsic::vector_reduce_smin:
1529  case Intrinsic::vector_reduce_smax:
1530  case Intrinsic::vector_reduce_umin:
1531  case Intrinsic::vector_reduce_umax:
1532  // Target intrinsics
1533  case Intrinsic::amdgcn_perm:
1534  case Intrinsic::amdgcn_wave_reduce_umin:
1535  case Intrinsic::amdgcn_wave_reduce_umax:
1536  case Intrinsic::amdgcn_s_wqm:
1537  case Intrinsic::amdgcn_s_quadmask:
1538  case Intrinsic::amdgcn_s_bitreplicate:
1539  case Intrinsic::arm_mve_vctp8:
1540  case Intrinsic::arm_mve_vctp16:
1541  case Intrinsic::arm_mve_vctp32:
1542  case Intrinsic::arm_mve_vctp64:
1543  case Intrinsic::aarch64_sve_convert_from_svbool:
1544  // WebAssembly float semantics are always known
1545  case Intrinsic::wasm_trunc_signed:
1546  case Intrinsic::wasm_trunc_unsigned:
1547    return true;
1548
1549  // Floating point operations cannot be folded in strictfp functions in
1550  // general case. They can be folded if FP environment is known to compiler.
1551  case Intrinsic::minnum:
1552  case Intrinsic::maxnum:
1553  case Intrinsic::minimum:
1554  case Intrinsic::maximum:
1555  case Intrinsic::log:
1556  case Intrinsic::log2:
1557  case Intrinsic::log10:
1558  case Intrinsic::exp:
1559  case Intrinsic::exp2:
1560  case Intrinsic::exp10:
1561  case Intrinsic::sqrt:
1562  case Intrinsic::sin:
1563  case Intrinsic::cos:
1564  case Intrinsic::pow:
1565  case Intrinsic::powi:
1566  case Intrinsic::ldexp:
1567  case Intrinsic::fma:
1568  case Intrinsic::fmuladd:
1569  case Intrinsic::frexp:
1570  case Intrinsic::fptoui_sat:
1571  case Intrinsic::fptosi_sat:
1572  case Intrinsic::convert_from_fp16:
1573  case Intrinsic::convert_to_fp16:
1574  case Intrinsic::amdgcn_cos:
1575  case Intrinsic::amdgcn_cubeid:
1576  case Intrinsic::amdgcn_cubema:
1577  case Intrinsic::amdgcn_cubesc:
1578  case Intrinsic::amdgcn_cubetc:
1579  case Intrinsic::amdgcn_fmul_legacy:
1580  case Intrinsic::amdgcn_fma_legacy:
1581  case Intrinsic::amdgcn_fract:
1582  case Intrinsic::amdgcn_sin:
1583  // The intrinsics below depend on rounding mode in MXCSR.
1584  case Intrinsic::x86_sse_cvtss2si:
1585  case Intrinsic::x86_sse_cvtss2si64:
1586  case Intrinsic::x86_sse_cvttss2si:
1587  case Intrinsic::x86_sse_cvttss2si64:
1588  case Intrinsic::x86_sse2_cvtsd2si:
1589  case Intrinsic::x86_sse2_cvtsd2si64:
1590  case Intrinsic::x86_sse2_cvttsd2si:
1591  case Intrinsic::x86_sse2_cvttsd2si64:
1592  case Intrinsic::x86_avx512_vcvtss2si32:
1593  case Intrinsic::x86_avx512_vcvtss2si64:
1594  case Intrinsic::x86_avx512_cvttss2si:
1595  case Intrinsic::x86_avx512_cvttss2si64:
1596  case Intrinsic::x86_avx512_vcvtsd2si32:
1597  case Intrinsic::x86_avx512_vcvtsd2si64:
1598  case Intrinsic::x86_avx512_cvttsd2si:
1599  case Intrinsic::x86_avx512_cvttsd2si64:
1600  case Intrinsic::x86_avx512_vcvtss2usi32:
1601  case Intrinsic::x86_avx512_vcvtss2usi64:
1602  case Intrinsic::x86_avx512_cvttss2usi:
1603  case Intrinsic::x86_avx512_cvttss2usi64:
1604  case Intrinsic::x86_avx512_vcvtsd2usi32:
1605  case Intrinsic::x86_avx512_vcvtsd2usi64:
1606  case Intrinsic::x86_avx512_cvttsd2usi:
1607  case Intrinsic::x86_avx512_cvttsd2usi64:
1608    return !Call->isStrictFP();
1609
1610  // Sign operations are actually bitwise operations, they do not raise
1611  // exceptions even for SNANs.
1612  case Intrinsic::fabs:
1613  case Intrinsic::copysign:
1614  case Intrinsic::is_fpclass:
1615  // Non-constrained variants of rounding operations means default FP
1616  // environment, they can be folded in any case.
1617  case Intrinsic::ceil:
1618  case Intrinsic::floor:
1619  case Intrinsic::round:
1620  case Intrinsic::roundeven:
1621  case Intrinsic::trunc:
1622  case Intrinsic::nearbyint:
1623  case Intrinsic::rint:
1624  case Intrinsic::canonicalize:
1625  // Constrained intrinsics can be folded if FP environment is known
1626  // to compiler.
1627  case Intrinsic::experimental_constrained_fma:
1628  case Intrinsic::experimental_constrained_fmuladd:
1629  case Intrinsic::experimental_constrained_fadd:
1630  case Intrinsic::experimental_constrained_fsub:
1631  case Intrinsic::experimental_constrained_fmul:
1632  case Intrinsic::experimental_constrained_fdiv:
1633  case Intrinsic::experimental_constrained_frem:
1634  case Intrinsic::experimental_constrained_ceil:
1635  case Intrinsic::experimental_constrained_floor:
1636  case Intrinsic::experimental_constrained_round:
1637  case Intrinsic::experimental_constrained_roundeven:
1638  case Intrinsic::experimental_constrained_trunc:
1639  case Intrinsic::experimental_constrained_nearbyint:
1640  case Intrinsic::experimental_constrained_rint:
1641  case Intrinsic::experimental_constrained_fcmp:
1642  case Intrinsic::experimental_constrained_fcmps:
1643    return true;
1644  default:
1645    return false;
1646  case Intrinsic::not_intrinsic: break;
1647  }
1648
1649  if (!F->hasName() || Call->isStrictFP())
1650    return false;
1651
1652  // In these cases, the check of the length is required.  We don't want to
1653  // return true for a name like "cos\0blah" which strcmp would return equal to
1654  // "cos", but has length 8.
1655  StringRef Name = F->getName();
1656  switch (Name[0]) {
1657  default:
1658    return false;
1659  case 'a':
1660    return Name == "acos" || Name == "acosf" ||
1661           Name == "asin" || Name == "asinf" ||
1662           Name == "atan" || Name == "atanf" ||
1663           Name == "atan2" || Name == "atan2f";
1664  case 'c':
1665    return Name == "ceil" || Name == "ceilf" ||
1666           Name == "cos" || Name == "cosf" ||
1667           Name == "cosh" || Name == "coshf";
1668  case 'e':
1669    return Name == "exp" || Name == "expf" ||
1670           Name == "exp2" || Name == "exp2f";
1671  case 'f':
1672    return Name == "fabs" || Name == "fabsf" ||
1673           Name == "floor" || Name == "floorf" ||
1674           Name == "fmod" || Name == "fmodf";
1675  case 'l':
1676    return Name == "log" || Name == "logf" ||
1677           Name == "log2" || Name == "log2f" ||
1678           Name == "log10" || Name == "log10f";
1679  case 'n':
1680    return Name == "nearbyint" || Name == "nearbyintf";
1681  case 'p':
1682    return Name == "pow" || Name == "powf";
1683  case 'r':
1684    return Name == "remainder" || Name == "remainderf" ||
1685           Name == "rint" || Name == "rintf" ||
1686           Name == "round" || Name == "roundf";
1687  case 's':
1688    return Name == "sin" || Name == "sinf" ||
1689           Name == "sinh" || Name == "sinhf" ||
1690           Name == "sqrt" || Name == "sqrtf";
1691  case 't':
1692    return Name == "tan" || Name == "tanf" ||
1693           Name == "tanh" || Name == "tanhf" ||
1694           Name == "trunc" || Name == "truncf";
1695  case '_':
1696    // Check for various function names that get used for the math functions
1697    // when the header files are preprocessed with the macro
1698    // __FINITE_MATH_ONLY__ enabled.
1699    // The '12' here is the length of the shortest name that can match.
1700    // We need to check the size before looking at Name[1] and Name[2]
1701    // so we may as well check a limit that will eliminate mismatches.
1702    if (Name.size() < 12 || Name[1] != '_')
1703      return false;
1704    switch (Name[2]) {
1705    default:
1706      return false;
1707    case 'a':
1708      return Name == "__acos_finite" || Name == "__acosf_finite" ||
1709             Name == "__asin_finite" || Name == "__asinf_finite" ||
1710             Name == "__atan2_finite" || Name == "__atan2f_finite";
1711    case 'c':
1712      return Name == "__cosh_finite" || Name == "__coshf_finite";
1713    case 'e':
1714      return Name == "__exp_finite" || Name == "__expf_finite" ||
1715             Name == "__exp2_finite" || Name == "__exp2f_finite";
1716    case 'l':
1717      return Name == "__log_finite" || Name == "__logf_finite" ||
1718             Name == "__log10_finite" || Name == "__log10f_finite";
1719    case 'p':
1720      return Name == "__pow_finite" || Name == "__powf_finite";
1721    case 's':
1722      return Name == "__sinh_finite" || Name == "__sinhf_finite";
1723    }
1724  }
1725}
1726
1727namespace {
1728
1729Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1730  if (Ty->isHalfTy() || Ty->isFloatTy()) {
1731    APFloat APF(V);
1732    bool unused;
1733    APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1734    return ConstantFP::get(Ty->getContext(), APF);
1735  }
1736  if (Ty->isDoubleTy())
1737    return ConstantFP::get(Ty->getContext(), APFloat(V));
1738  llvm_unreachable("Can only constant fold half/float/double");
1739}
1740
1741/// Clear the floating-point exception state.
1742inline void llvm_fenv_clearexcept() {
1743#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1744  feclearexcept(FE_ALL_EXCEPT);
1745#endif
1746  errno = 0;
1747}
1748
1749/// Test if a floating-point exception was raised.
1750inline bool llvm_fenv_testexcept() {
1751  int errno_val = errno;
1752  if (errno_val == ERANGE || errno_val == EDOM)
1753    return true;
1754#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1755  if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1756    return true;
1757#endif
1758  return false;
1759}
1760
1761Constant *ConstantFoldFP(double (*NativeFP)(double), const APFloat &V,
1762                         Type *Ty) {
1763  llvm_fenv_clearexcept();
1764  double Result = NativeFP(V.convertToDouble());
1765  if (llvm_fenv_testexcept()) {
1766    llvm_fenv_clearexcept();
1767    return nullptr;
1768  }
1769
1770  return GetConstantFoldFPValue(Result, Ty);
1771}
1772
1773Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
1774                               const APFloat &V, const APFloat &W, Type *Ty) {
1775  llvm_fenv_clearexcept();
1776  double Result = NativeFP(V.convertToDouble(), W.convertToDouble());
1777  if (llvm_fenv_testexcept()) {
1778    llvm_fenv_clearexcept();
1779    return nullptr;
1780  }
1781
1782  return GetConstantFoldFPValue(Result, Ty);
1783}
1784
1785Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
1786  FixedVectorType *VT = dyn_cast<FixedVectorType>(Op->getType());
1787  if (!VT)
1788    return nullptr;
1789
1790  // This isn't strictly necessary, but handle the special/common case of zero:
1791  // all integer reductions of a zero input produce zero.
1792  if (isa<ConstantAggregateZero>(Op))
1793    return ConstantInt::get(VT->getElementType(), 0);
1794
1795  // This is the same as the underlying binops - poison propagates.
1796  if (isa<PoisonValue>(Op) || Op->containsPoisonElement())
1797    return PoisonValue::get(VT->getElementType());
1798
1799  // TODO: Handle undef.
1800  if (!isa<ConstantVector>(Op) && !isa<ConstantDataVector>(Op))
1801    return nullptr;
1802
1803  auto *EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(0U));
1804  if (!EltC)
1805    return nullptr;
1806
1807  APInt Acc = EltC->getValue();
1808  for (unsigned I = 1, E = VT->getNumElements(); I != E; I++) {
1809    if (!(EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(I))))
1810      return nullptr;
1811    const APInt &X = EltC->getValue();
1812    switch (IID) {
1813    case Intrinsic::vector_reduce_add:
1814      Acc = Acc + X;
1815      break;
1816    case Intrinsic::vector_reduce_mul:
1817      Acc = Acc * X;
1818      break;
1819    case Intrinsic::vector_reduce_and:
1820      Acc = Acc & X;
1821      break;
1822    case Intrinsic::vector_reduce_or:
1823      Acc = Acc | X;
1824      break;
1825    case Intrinsic::vector_reduce_xor:
1826      Acc = Acc ^ X;
1827      break;
1828    case Intrinsic::vector_reduce_smin:
1829      Acc = APIntOps::smin(Acc, X);
1830      break;
1831    case Intrinsic::vector_reduce_smax:
1832      Acc = APIntOps::smax(Acc, X);
1833      break;
1834    case Intrinsic::vector_reduce_umin:
1835      Acc = APIntOps::umin(Acc, X);
1836      break;
1837    case Intrinsic::vector_reduce_umax:
1838      Acc = APIntOps::umax(Acc, X);
1839      break;
1840    }
1841  }
1842
1843  return ConstantInt::get(Op->getContext(), Acc);
1844}
1845
1846/// Attempt to fold an SSE floating point to integer conversion of a constant
1847/// floating point. If roundTowardZero is false, the default IEEE rounding is
1848/// used (toward nearest, ties to even). This matches the behavior of the
1849/// non-truncating SSE instructions in the default rounding mode. The desired
1850/// integer type Ty is used to select how many bits are available for the
1851/// result. Returns null if the conversion cannot be performed, otherwise
1852/// returns the Constant value resulting from the conversion.
1853Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
1854                                      Type *Ty, bool IsSigned) {
1855  // All of these conversion intrinsics form an integer of at most 64bits.
1856  unsigned ResultWidth = Ty->getIntegerBitWidth();
1857  assert(ResultWidth <= 64 &&
1858         "Can only constant fold conversions to 64 and 32 bit ints");
1859
1860  uint64_t UIntVal;
1861  bool isExact = false;
1862  APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1863                                              : APFloat::rmNearestTiesToEven;
1864  APFloat::opStatus status =
1865      Val.convertToInteger(MutableArrayRef(UIntVal), ResultWidth,
1866                           IsSigned, mode, &isExact);
1867  if (status != APFloat::opOK &&
1868      (!roundTowardZero || status != APFloat::opInexact))
1869    return nullptr;
1870  return ConstantInt::get(Ty, UIntVal, IsSigned);
1871}
1872
1873double getValueAsDouble(ConstantFP *Op) {
1874  Type *Ty = Op->getType();
1875
1876  if (Ty->isBFloatTy() || Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())
1877    return Op->getValueAPF().convertToDouble();
1878
1879  bool unused;
1880  APFloat APF = Op->getValueAPF();
1881  APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1882  return APF.convertToDouble();
1883}
1884
1885static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
1886  if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1887    C = &CI->getValue();
1888    return true;
1889  }
1890  if (isa<UndefValue>(Op)) {
1891    C = nullptr;
1892    return true;
1893  }
1894  return false;
1895}
1896
1897/// Checks if the given intrinsic call, which evaluates to constant, is allowed
1898/// to be folded.
1899///
1900/// \param CI Constrained intrinsic call.
1901/// \param St Exception flags raised during constant evaluation.
1902static bool mayFoldConstrained(ConstrainedFPIntrinsic *CI,
1903                               APFloat::opStatus St) {
1904  std::optional<RoundingMode> ORM = CI->getRoundingMode();
1905  std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
1906
1907  // If the operation does not change exception status flags, it is safe
1908  // to fold.
1909  if (St == APFloat::opStatus::opOK)
1910    return true;
1911
1912  // If evaluation raised FP exception, the result can depend on rounding
1913  // mode. If the latter is unknown, folding is not possible.
1914  if (ORM && *ORM == RoundingMode::Dynamic)
1915    return false;
1916
1917  // If FP exceptions are ignored, fold the call, even if such exception is
1918  // raised.
1919  if (EB && *EB != fp::ExceptionBehavior::ebStrict)
1920    return true;
1921
1922  // Leave the calculation for runtime so that exception flags be correctly set
1923  // in hardware.
1924  return false;
1925}
1926
1927/// Returns the rounding mode that should be used for constant evaluation.
1928static RoundingMode
1929getEvaluationRoundingMode(const ConstrainedFPIntrinsic *CI) {
1930  std::optional<RoundingMode> ORM = CI->getRoundingMode();
1931  if (!ORM || *ORM == RoundingMode::Dynamic)
1932    // Even if the rounding mode is unknown, try evaluating the operation.
1933    // If it does not raise inexact exception, rounding was not applied,
1934    // so the result is exact and does not depend on rounding mode. Whether
1935    // other FP exceptions are raised, it does not depend on rounding mode.
1936    return RoundingMode::NearestTiesToEven;
1937  return *ORM;
1938}
1939
1940/// Try to constant fold llvm.canonicalize for the given caller and value.
1941static Constant *constantFoldCanonicalize(const Type *Ty, const CallBase *CI,
1942                                          const APFloat &Src) {
1943  // Zero, positive and negative, is always OK to fold.
1944  if (Src.isZero()) {
1945    // Get a fresh 0, since ppc_fp128 does have non-canonical zeros.
1946    return ConstantFP::get(
1947        CI->getContext(),
1948        APFloat::getZero(Src.getSemantics(), Src.isNegative()));
1949  }
1950
1951  if (!Ty->isIEEELikeFPTy())
1952    return nullptr;
1953
1954  // Zero is always canonical and the sign must be preserved.
1955  //
1956  // Denorms and nans may have special encodings, but it should be OK to fold a
1957  // totally average number.
1958  if (Src.isNormal() || Src.isInfinity())
1959    return ConstantFP::get(CI->getContext(), Src);
1960
1961  if (Src.isDenormal() && CI->getParent() && CI->getFunction()) {
1962    DenormalMode DenormMode =
1963        CI->getFunction()->getDenormalMode(Src.getSemantics());
1964
1965    if (DenormMode == DenormalMode::getIEEE())
1966      return ConstantFP::get(CI->getContext(), Src);
1967
1968    if (DenormMode.Input == DenormalMode::Dynamic)
1969      return nullptr;
1970
1971    // If we know if either input or output is flushed, we can fold.
1972    if ((DenormMode.Input == DenormalMode::Dynamic &&
1973         DenormMode.Output == DenormalMode::IEEE) ||
1974        (DenormMode.Input == DenormalMode::IEEE &&
1975         DenormMode.Output == DenormalMode::Dynamic))
1976      return nullptr;
1977
1978    bool IsPositive =
1979        (!Src.isNegative() || DenormMode.Input == DenormalMode::PositiveZero ||
1980         (DenormMode.Output == DenormalMode::PositiveZero &&
1981          DenormMode.Input == DenormalMode::IEEE));
1982
1983    return ConstantFP::get(CI->getContext(),
1984                           APFloat::getZero(Src.getSemantics(), !IsPositive));
1985  }
1986
1987  return nullptr;
1988}
1989
1990static Constant *ConstantFoldScalarCall1(StringRef Name,
1991                                         Intrinsic::ID IntrinsicID,
1992                                         Type *Ty,
1993                                         ArrayRef<Constant *> Operands,
1994                                         const TargetLibraryInfo *TLI,
1995                                         const CallBase *Call) {
1996  assert(Operands.size() == 1 && "Wrong number of operands.");
1997
1998  if (IntrinsicID == Intrinsic::is_constant) {
1999    // We know we have a "Constant" argument. But we want to only
2000    // return true for manifest constants, not those that depend on
2001    // constants with unknowable values, e.g. GlobalValue or BlockAddress.
2002    if (Operands[0]->isManifestConstant())
2003      return ConstantInt::getTrue(Ty->getContext());
2004    return nullptr;
2005  }
2006
2007  if (isa<PoisonValue>(Operands[0])) {
2008    // TODO: All of these operations should probably propagate poison.
2009    if (IntrinsicID == Intrinsic::canonicalize)
2010      return PoisonValue::get(Ty);
2011  }
2012
2013  if (isa<UndefValue>(Operands[0])) {
2014    // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
2015    // ctpop() is between 0 and bitwidth, pick 0 for undef.
2016    // fptoui.sat and fptosi.sat can always fold to zero (for a zero input).
2017    if (IntrinsicID == Intrinsic::cos ||
2018        IntrinsicID == Intrinsic::ctpop ||
2019        IntrinsicID == Intrinsic::fptoui_sat ||
2020        IntrinsicID == Intrinsic::fptosi_sat ||
2021        IntrinsicID == Intrinsic::canonicalize)
2022      return Constant::getNullValue(Ty);
2023    if (IntrinsicID == Intrinsic::bswap ||
2024        IntrinsicID == Intrinsic::bitreverse ||
2025        IntrinsicID == Intrinsic::launder_invariant_group ||
2026        IntrinsicID == Intrinsic::strip_invariant_group)
2027      return Operands[0];
2028  }
2029
2030  if (isa<ConstantPointerNull>(Operands[0])) {
2031    // launder(null) == null == strip(null) iff in addrspace 0
2032    if (IntrinsicID == Intrinsic::launder_invariant_group ||
2033        IntrinsicID == Intrinsic::strip_invariant_group) {
2034      // If instruction is not yet put in a basic block (e.g. when cloning
2035      // a function during inlining), Call's caller may not be available.
2036      // So check Call's BB first before querying Call->getCaller.
2037      const Function *Caller =
2038          Call->getParent() ? Call->getCaller() : nullptr;
2039      if (Caller &&
2040          !NullPointerIsDefined(
2041              Caller, Operands[0]->getType()->getPointerAddressSpace())) {
2042        return Operands[0];
2043      }
2044      return nullptr;
2045    }
2046  }
2047
2048  if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
2049    if (IntrinsicID == Intrinsic::convert_to_fp16) {
2050      APFloat Val(Op->getValueAPF());
2051
2052      bool lost = false;
2053      Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
2054
2055      return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
2056    }
2057
2058    APFloat U = Op->getValueAPF();
2059
2060    if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
2061        IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
2062      bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
2063
2064      if (U.isNaN())
2065        return nullptr;
2066
2067      unsigned Width = Ty->getIntegerBitWidth();
2068      APSInt Int(Width, !Signed);
2069      bool IsExact = false;
2070      APFloat::opStatus Status =
2071          U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
2072
2073      if (Status == APFloat::opOK || Status == APFloat::opInexact)
2074        return ConstantInt::get(Ty, Int);
2075
2076      return nullptr;
2077    }
2078
2079    if (IntrinsicID == Intrinsic::fptoui_sat ||
2080        IntrinsicID == Intrinsic::fptosi_sat) {
2081      // convertToInteger() already has the desired saturation semantics.
2082      APSInt Int(Ty->getIntegerBitWidth(),
2083                 IntrinsicID == Intrinsic::fptoui_sat);
2084      bool IsExact;
2085      U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
2086      return ConstantInt::get(Ty, Int);
2087    }
2088
2089    if (IntrinsicID == Intrinsic::canonicalize)
2090      return constantFoldCanonicalize(Ty, Call, U);
2091
2092    if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2093      return nullptr;
2094
2095    // Use internal versions of these intrinsics.
2096
2097    if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
2098      U.roundToIntegral(APFloat::rmNearestTiesToEven);
2099      return ConstantFP::get(Ty->getContext(), U);
2100    }
2101
2102    if (IntrinsicID == Intrinsic::round) {
2103      U.roundToIntegral(APFloat::rmNearestTiesToAway);
2104      return ConstantFP::get(Ty->getContext(), U);
2105    }
2106
2107    if (IntrinsicID == Intrinsic::roundeven) {
2108      U.roundToIntegral(APFloat::rmNearestTiesToEven);
2109      return ConstantFP::get(Ty->getContext(), U);
2110    }
2111
2112    if (IntrinsicID == Intrinsic::ceil) {
2113      U.roundToIntegral(APFloat::rmTowardPositive);
2114      return ConstantFP::get(Ty->getContext(), U);
2115    }
2116
2117    if (IntrinsicID == Intrinsic::floor) {
2118      U.roundToIntegral(APFloat::rmTowardNegative);
2119      return ConstantFP::get(Ty->getContext(), U);
2120    }
2121
2122    if (IntrinsicID == Intrinsic::trunc) {
2123      U.roundToIntegral(APFloat::rmTowardZero);
2124      return ConstantFP::get(Ty->getContext(), U);
2125    }
2126
2127    if (IntrinsicID == Intrinsic::fabs) {
2128      U.clearSign();
2129      return ConstantFP::get(Ty->getContext(), U);
2130    }
2131
2132    if (IntrinsicID == Intrinsic::amdgcn_fract) {
2133      // The v_fract instruction behaves like the OpenCL spec, which defines
2134      // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is
2135      //   there to prevent fract(-small) from returning 1.0. It returns the
2136      //   largest positive floating-point number less than 1.0."
2137      APFloat FloorU(U);
2138      FloorU.roundToIntegral(APFloat::rmTowardNegative);
2139      APFloat FractU(U - FloorU);
2140      APFloat AlmostOne(U.getSemantics(), 1);
2141      AlmostOne.next(/*nextDown*/ true);
2142      return ConstantFP::get(Ty->getContext(), minimum(FractU, AlmostOne));
2143    }
2144
2145    // Rounding operations (floor, trunc, ceil, round and nearbyint) do not
2146    // raise FP exceptions, unless the argument is signaling NaN.
2147
2148    std::optional<APFloat::roundingMode> RM;
2149    switch (IntrinsicID) {
2150    default:
2151      break;
2152    case Intrinsic::experimental_constrained_nearbyint:
2153    case Intrinsic::experimental_constrained_rint: {
2154      auto CI = cast<ConstrainedFPIntrinsic>(Call);
2155      RM = CI->getRoundingMode();
2156      if (!RM || *RM == RoundingMode::Dynamic)
2157        return nullptr;
2158      break;
2159    }
2160    case Intrinsic::experimental_constrained_round:
2161      RM = APFloat::rmNearestTiesToAway;
2162      break;
2163    case Intrinsic::experimental_constrained_ceil:
2164      RM = APFloat::rmTowardPositive;
2165      break;
2166    case Intrinsic::experimental_constrained_floor:
2167      RM = APFloat::rmTowardNegative;
2168      break;
2169    case Intrinsic::experimental_constrained_trunc:
2170      RM = APFloat::rmTowardZero;
2171      break;
2172    }
2173    if (RM) {
2174      auto CI = cast<ConstrainedFPIntrinsic>(Call);
2175      if (U.isFinite()) {
2176        APFloat::opStatus St = U.roundToIntegral(*RM);
2177        if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2178            St == APFloat::opInexact) {
2179          std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2180          if (EB && *EB == fp::ebStrict)
2181            return nullptr;
2182        }
2183      } else if (U.isSignaling()) {
2184        std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2185        if (EB && *EB != fp::ebIgnore)
2186          return nullptr;
2187        U = APFloat::getQNaN(U.getSemantics());
2188      }
2189      return ConstantFP::get(Ty->getContext(), U);
2190    }
2191
2192    /// We only fold functions with finite arguments. Folding NaN and inf is
2193    /// likely to be aborted with an exception anyway, and some host libms
2194    /// have known errors raising exceptions.
2195    if (!U.isFinite())
2196      return nullptr;
2197
2198    /// Currently APFloat versions of these functions do not exist, so we use
2199    /// the host native double versions.  Float versions are not called
2200    /// directly but for all these it is true (float)(f((double)arg)) ==
2201    /// f(arg).  Long double not supported yet.
2202    const APFloat &APF = Op->getValueAPF();
2203
2204    switch (IntrinsicID) {
2205      default: break;
2206      case Intrinsic::log:
2207        return ConstantFoldFP(log, APF, Ty);
2208      case Intrinsic::log2:
2209        // TODO: What about hosts that lack a C99 library?
2210        return ConstantFoldFP(log2, APF, Ty);
2211      case Intrinsic::log10:
2212        // TODO: What about hosts that lack a C99 library?
2213        return ConstantFoldFP(log10, APF, Ty);
2214      case Intrinsic::exp:
2215        return ConstantFoldFP(exp, APF, Ty);
2216      case Intrinsic::exp2:
2217        // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2218        return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2219      case Intrinsic::exp10:
2220        // Fold exp10(x) as pow(10, x), in case the host lacks a C99 library.
2221        return ConstantFoldBinaryFP(pow, APFloat(10.0), APF, Ty);
2222      case Intrinsic::sin:
2223        return ConstantFoldFP(sin, APF, Ty);
2224      case Intrinsic::cos:
2225        return ConstantFoldFP(cos, APF, Ty);
2226      case Intrinsic::sqrt:
2227        return ConstantFoldFP(sqrt, APF, Ty);
2228      case Intrinsic::amdgcn_cos:
2229      case Intrinsic::amdgcn_sin: {
2230        double V = getValueAsDouble(Op);
2231        if (V < -256.0 || V > 256.0)
2232          // The gfx8 and gfx9 architectures handle arguments outside the range
2233          // [-256, 256] differently. This should be a rare case so bail out
2234          // rather than trying to handle the difference.
2235          return nullptr;
2236        bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2237        double V4 = V * 4.0;
2238        if (V4 == floor(V4)) {
2239          // Force exact results for quarter-integer inputs.
2240          const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2241          V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2242        } else {
2243          if (IsCos)
2244            V = cos(V * 2.0 * numbers::pi);
2245          else
2246            V = sin(V * 2.0 * numbers::pi);
2247        }
2248        return GetConstantFoldFPValue(V, Ty);
2249      }
2250    }
2251
2252    if (!TLI)
2253      return nullptr;
2254
2255    LibFunc Func = NotLibFunc;
2256    if (!TLI->getLibFunc(Name, Func))
2257      return nullptr;
2258
2259    switch (Func) {
2260    default:
2261      break;
2262    case LibFunc_acos:
2263    case LibFunc_acosf:
2264    case LibFunc_acos_finite:
2265    case LibFunc_acosf_finite:
2266      if (TLI->has(Func))
2267        return ConstantFoldFP(acos, APF, Ty);
2268      break;
2269    case LibFunc_asin:
2270    case LibFunc_asinf:
2271    case LibFunc_asin_finite:
2272    case LibFunc_asinf_finite:
2273      if (TLI->has(Func))
2274        return ConstantFoldFP(asin, APF, Ty);
2275      break;
2276    case LibFunc_atan:
2277    case LibFunc_atanf:
2278      if (TLI->has(Func))
2279        return ConstantFoldFP(atan, APF, Ty);
2280      break;
2281    case LibFunc_ceil:
2282    case LibFunc_ceilf:
2283      if (TLI->has(Func)) {
2284        U.roundToIntegral(APFloat::rmTowardPositive);
2285        return ConstantFP::get(Ty->getContext(), U);
2286      }
2287      break;
2288    case LibFunc_cos:
2289    case LibFunc_cosf:
2290      if (TLI->has(Func))
2291        return ConstantFoldFP(cos, APF, Ty);
2292      break;
2293    case LibFunc_cosh:
2294    case LibFunc_coshf:
2295    case LibFunc_cosh_finite:
2296    case LibFunc_coshf_finite:
2297      if (TLI->has(Func))
2298        return ConstantFoldFP(cosh, APF, Ty);
2299      break;
2300    case LibFunc_exp:
2301    case LibFunc_expf:
2302    case LibFunc_exp_finite:
2303    case LibFunc_expf_finite:
2304      if (TLI->has(Func))
2305        return ConstantFoldFP(exp, APF, Ty);
2306      break;
2307    case LibFunc_exp2:
2308    case LibFunc_exp2f:
2309    case LibFunc_exp2_finite:
2310    case LibFunc_exp2f_finite:
2311      if (TLI->has(Func))
2312        // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2313        return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2314      break;
2315    case LibFunc_fabs:
2316    case LibFunc_fabsf:
2317      if (TLI->has(Func)) {
2318        U.clearSign();
2319        return ConstantFP::get(Ty->getContext(), U);
2320      }
2321      break;
2322    case LibFunc_floor:
2323    case LibFunc_floorf:
2324      if (TLI->has(Func)) {
2325        U.roundToIntegral(APFloat::rmTowardNegative);
2326        return ConstantFP::get(Ty->getContext(), U);
2327      }
2328      break;
2329    case LibFunc_log:
2330    case LibFunc_logf:
2331    case LibFunc_log_finite:
2332    case LibFunc_logf_finite:
2333      if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2334        return ConstantFoldFP(log, APF, Ty);
2335      break;
2336    case LibFunc_log2:
2337    case LibFunc_log2f:
2338    case LibFunc_log2_finite:
2339    case LibFunc_log2f_finite:
2340      if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2341        // TODO: What about hosts that lack a C99 library?
2342        return ConstantFoldFP(log2, APF, Ty);
2343      break;
2344    case LibFunc_log10:
2345    case LibFunc_log10f:
2346    case LibFunc_log10_finite:
2347    case LibFunc_log10f_finite:
2348      if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2349        // TODO: What about hosts that lack a C99 library?
2350        return ConstantFoldFP(log10, APF, Ty);
2351      break;
2352    case LibFunc_nearbyint:
2353    case LibFunc_nearbyintf:
2354    case LibFunc_rint:
2355    case LibFunc_rintf:
2356      if (TLI->has(Func)) {
2357        U.roundToIntegral(APFloat::rmNearestTiesToEven);
2358        return ConstantFP::get(Ty->getContext(), U);
2359      }
2360      break;
2361    case LibFunc_round:
2362    case LibFunc_roundf:
2363      if (TLI->has(Func)) {
2364        U.roundToIntegral(APFloat::rmNearestTiesToAway);
2365        return ConstantFP::get(Ty->getContext(), U);
2366      }
2367      break;
2368    case LibFunc_sin:
2369    case LibFunc_sinf:
2370      if (TLI->has(Func))
2371        return ConstantFoldFP(sin, APF, Ty);
2372      break;
2373    case LibFunc_sinh:
2374    case LibFunc_sinhf:
2375    case LibFunc_sinh_finite:
2376    case LibFunc_sinhf_finite:
2377      if (TLI->has(Func))
2378        return ConstantFoldFP(sinh, APF, Ty);
2379      break;
2380    case LibFunc_sqrt:
2381    case LibFunc_sqrtf:
2382      if (!APF.isNegative() && TLI->has(Func))
2383        return ConstantFoldFP(sqrt, APF, Ty);
2384      break;
2385    case LibFunc_tan:
2386    case LibFunc_tanf:
2387      if (TLI->has(Func))
2388        return ConstantFoldFP(tan, APF, Ty);
2389      break;
2390    case LibFunc_tanh:
2391    case LibFunc_tanhf:
2392      if (TLI->has(Func))
2393        return ConstantFoldFP(tanh, APF, Ty);
2394      break;
2395    case LibFunc_trunc:
2396    case LibFunc_truncf:
2397      if (TLI->has(Func)) {
2398        U.roundToIntegral(APFloat::rmTowardZero);
2399        return ConstantFP::get(Ty->getContext(), U);
2400      }
2401      break;
2402    }
2403    return nullptr;
2404  }
2405
2406  if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
2407    switch (IntrinsicID) {
2408    case Intrinsic::bswap:
2409      return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
2410    case Intrinsic::ctpop:
2411      return ConstantInt::get(Ty, Op->getValue().popcount());
2412    case Intrinsic::bitreverse:
2413      return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
2414    case Intrinsic::convert_from_fp16: {
2415      APFloat Val(APFloat::IEEEhalf(), Op->getValue());
2416
2417      bool lost = false;
2418      APFloat::opStatus status = Val.convert(
2419          Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
2420
2421      // Conversion is always precise.
2422      (void)status;
2423      assert(status != APFloat::opInexact && !lost &&
2424             "Precision lost during fp16 constfolding");
2425
2426      return ConstantFP::get(Ty->getContext(), Val);
2427    }
2428
2429    case Intrinsic::amdgcn_s_wqm: {
2430      uint64_t Val = Op->getZExtValue();
2431      Val |= (Val & 0x5555555555555555ULL) << 1 |
2432             ((Val >> 1) & 0x5555555555555555ULL);
2433      Val |= (Val & 0x3333333333333333ULL) << 2 |
2434             ((Val >> 2) & 0x3333333333333333ULL);
2435      return ConstantInt::get(Ty, Val);
2436    }
2437
2438    case Intrinsic::amdgcn_s_quadmask: {
2439      uint64_t Val = Op->getZExtValue();
2440      uint64_t QuadMask = 0;
2441      for (unsigned I = 0; I < Op->getBitWidth() / 4; ++I, Val >>= 4) {
2442        if (!(Val & 0xF))
2443          continue;
2444
2445        QuadMask |= (1ULL << I);
2446      }
2447      return ConstantInt::get(Ty, QuadMask);
2448    }
2449
2450    case Intrinsic::amdgcn_s_bitreplicate: {
2451      uint64_t Val = Op->getZExtValue();
2452      Val = (Val & 0x000000000000FFFFULL) | (Val & 0x00000000FFFF0000ULL) << 16;
2453      Val = (Val & 0x000000FF000000FFULL) | (Val & 0x0000FF000000FF00ULL) << 8;
2454      Val = (Val & 0x000F000F000F000FULL) | (Val & 0x00F000F000F000F0ULL) << 4;
2455      Val = (Val & 0x0303030303030303ULL) | (Val & 0x0C0C0C0C0C0C0C0CULL) << 2;
2456      Val = (Val & 0x1111111111111111ULL) | (Val & 0x2222222222222222ULL) << 1;
2457      Val = Val | Val << 1;
2458      return ConstantInt::get(Ty, Val);
2459    }
2460
2461    default:
2462      return nullptr;
2463    }
2464  }
2465
2466  switch (IntrinsicID) {
2467  default: break;
2468  case Intrinsic::vector_reduce_add:
2469  case Intrinsic::vector_reduce_mul:
2470  case Intrinsic::vector_reduce_and:
2471  case Intrinsic::vector_reduce_or:
2472  case Intrinsic::vector_reduce_xor:
2473  case Intrinsic::vector_reduce_smin:
2474  case Intrinsic::vector_reduce_smax:
2475  case Intrinsic::vector_reduce_umin:
2476  case Intrinsic::vector_reduce_umax:
2477    if (Constant *C = constantFoldVectorReduce(IntrinsicID, Operands[0]))
2478      return C;
2479    break;
2480  }
2481
2482  // Support ConstantVector in case we have an Undef in the top.
2483  if (isa<ConstantVector>(Operands[0]) ||
2484      isa<ConstantDataVector>(Operands[0])) {
2485    auto *Op = cast<Constant>(Operands[0]);
2486    switch (IntrinsicID) {
2487    default: break;
2488    case Intrinsic::x86_sse_cvtss2si:
2489    case Intrinsic::x86_sse_cvtss2si64:
2490    case Intrinsic::x86_sse2_cvtsd2si:
2491    case Intrinsic::x86_sse2_cvtsd2si64:
2492      if (ConstantFP *FPOp =
2493              dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2494        return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2495                                           /*roundTowardZero=*/false, Ty,
2496                                           /*IsSigned*/true);
2497      break;
2498    case Intrinsic::x86_sse_cvttss2si:
2499    case Intrinsic::x86_sse_cvttss2si64:
2500    case Intrinsic::x86_sse2_cvttsd2si:
2501    case Intrinsic::x86_sse2_cvttsd2si64:
2502      if (ConstantFP *FPOp =
2503              dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2504        return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2505                                           /*roundTowardZero=*/true, Ty,
2506                                           /*IsSigned*/true);
2507      break;
2508    }
2509  }
2510
2511  return nullptr;
2512}
2513
2514static Constant *evaluateCompare(const APFloat &Op1, const APFloat &Op2,
2515                                 const ConstrainedFPIntrinsic *Call) {
2516  APFloat::opStatus St = APFloat::opOK;
2517  auto *FCmp = cast<ConstrainedFPCmpIntrinsic>(Call);
2518  FCmpInst::Predicate Cond = FCmp->getPredicate();
2519  if (FCmp->isSignaling()) {
2520    if (Op1.isNaN() || Op2.isNaN())
2521      St = APFloat::opInvalidOp;
2522  } else {
2523    if (Op1.isSignaling() || Op2.isSignaling())
2524      St = APFloat::opInvalidOp;
2525  }
2526  bool Result = FCmpInst::compare(Op1, Op2, Cond);
2527  if (mayFoldConstrained(const_cast<ConstrainedFPCmpIntrinsic *>(FCmp), St))
2528    return ConstantInt::get(Call->getType()->getScalarType(), Result);
2529  return nullptr;
2530}
2531
2532static Constant *ConstantFoldScalarCall2(StringRef Name,
2533                                         Intrinsic::ID IntrinsicID,
2534                                         Type *Ty,
2535                                         ArrayRef<Constant *> Operands,
2536                                         const TargetLibraryInfo *TLI,
2537                                         const CallBase *Call) {
2538  assert(Operands.size() == 2 && "Wrong number of operands.");
2539
2540  if (Ty->isFloatingPointTy()) {
2541    // TODO: We should have undef handling for all of the FP intrinsics that
2542    //       are attempted to be folded in this function.
2543    bool IsOp0Undef = isa<UndefValue>(Operands[0]);
2544    bool IsOp1Undef = isa<UndefValue>(Operands[1]);
2545    switch (IntrinsicID) {
2546    case Intrinsic::maxnum:
2547    case Intrinsic::minnum:
2548    case Intrinsic::maximum:
2549    case Intrinsic::minimum:
2550      // If one argument is undef, return the other argument.
2551      if (IsOp0Undef)
2552        return Operands[1];
2553      if (IsOp1Undef)
2554        return Operands[0];
2555      break;
2556    }
2557  }
2558
2559  if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2560    const APFloat &Op1V = Op1->getValueAPF();
2561
2562    if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2563      if (Op2->getType() != Op1->getType())
2564        return nullptr;
2565      const APFloat &Op2V = Op2->getValueAPF();
2566
2567      if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
2568        RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
2569        APFloat Res = Op1V;
2570        APFloat::opStatus St;
2571        switch (IntrinsicID) {
2572        default:
2573          return nullptr;
2574        case Intrinsic::experimental_constrained_fadd:
2575          St = Res.add(Op2V, RM);
2576          break;
2577        case Intrinsic::experimental_constrained_fsub:
2578          St = Res.subtract(Op2V, RM);
2579          break;
2580        case Intrinsic::experimental_constrained_fmul:
2581          St = Res.multiply(Op2V, RM);
2582          break;
2583        case Intrinsic::experimental_constrained_fdiv:
2584          St = Res.divide(Op2V, RM);
2585          break;
2586        case Intrinsic::experimental_constrained_frem:
2587          St = Res.mod(Op2V);
2588          break;
2589        case Intrinsic::experimental_constrained_fcmp:
2590        case Intrinsic::experimental_constrained_fcmps:
2591          return evaluateCompare(Op1V, Op2V, ConstrIntr);
2592        }
2593        if (mayFoldConstrained(const_cast<ConstrainedFPIntrinsic *>(ConstrIntr),
2594                               St))
2595          return ConstantFP::get(Ty->getContext(), Res);
2596        return nullptr;
2597      }
2598
2599      switch (IntrinsicID) {
2600      default:
2601        break;
2602      case Intrinsic::copysign:
2603        return ConstantFP::get(Ty->getContext(), APFloat::copySign(Op1V, Op2V));
2604      case Intrinsic::minnum:
2605        return ConstantFP::get(Ty->getContext(), minnum(Op1V, Op2V));
2606      case Intrinsic::maxnum:
2607        return ConstantFP::get(Ty->getContext(), maxnum(Op1V, Op2V));
2608      case Intrinsic::minimum:
2609        return ConstantFP::get(Ty->getContext(), minimum(Op1V, Op2V));
2610      case Intrinsic::maximum:
2611        return ConstantFP::get(Ty->getContext(), maximum(Op1V, Op2V));
2612      }
2613
2614      if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2615        return nullptr;
2616
2617      switch (IntrinsicID) {
2618      default:
2619        break;
2620      case Intrinsic::pow:
2621        return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2622      case Intrinsic::amdgcn_fmul_legacy:
2623        // The legacy behaviour is that multiplying +/- 0.0 by anything, even
2624        // NaN or infinity, gives +0.0.
2625        if (Op1V.isZero() || Op2V.isZero())
2626          return ConstantFP::getZero(Ty);
2627        return ConstantFP::get(Ty->getContext(), Op1V * Op2V);
2628      }
2629
2630      if (!TLI)
2631        return nullptr;
2632
2633      LibFunc Func = NotLibFunc;
2634      if (!TLI->getLibFunc(Name, Func))
2635        return nullptr;
2636
2637      switch (Func) {
2638      default:
2639        break;
2640      case LibFunc_pow:
2641      case LibFunc_powf:
2642      case LibFunc_pow_finite:
2643      case LibFunc_powf_finite:
2644        if (TLI->has(Func))
2645          return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2646        break;
2647      case LibFunc_fmod:
2648      case LibFunc_fmodf:
2649        if (TLI->has(Func)) {
2650          APFloat V = Op1->getValueAPF();
2651          if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF()))
2652            return ConstantFP::get(Ty->getContext(), V);
2653        }
2654        break;
2655      case LibFunc_remainder:
2656      case LibFunc_remainderf:
2657        if (TLI->has(Func)) {
2658          APFloat V = Op1->getValueAPF();
2659          if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF()))
2660            return ConstantFP::get(Ty->getContext(), V);
2661        }
2662        break;
2663      case LibFunc_atan2:
2664      case LibFunc_atan2f:
2665        // atan2(+/-0.0, +/-0.0) is known to raise an exception on some libm
2666        // (Solaris), so we do not assume a known result for that.
2667        if (Op1V.isZero() && Op2V.isZero())
2668          return nullptr;
2669        [[fallthrough]];
2670      case LibFunc_atan2_finite:
2671      case LibFunc_atan2f_finite:
2672        if (TLI->has(Func))
2673          return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2674        break;
2675      }
2676    } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
2677      switch (IntrinsicID) {
2678      case Intrinsic::ldexp: {
2679        return ConstantFP::get(
2680            Ty->getContext(),
2681            scalbn(Op1V, Op2C->getSExtValue(), APFloat::rmNearestTiesToEven));
2682      }
2683      case Intrinsic::is_fpclass: {
2684        FPClassTest Mask = static_cast<FPClassTest>(Op2C->getZExtValue());
2685        bool Result =
2686          ((Mask & fcSNan) && Op1V.isNaN() && Op1V.isSignaling()) ||
2687          ((Mask & fcQNan) && Op1V.isNaN() && !Op1V.isSignaling()) ||
2688          ((Mask & fcNegInf) && Op1V.isNegInfinity()) ||
2689          ((Mask & fcNegNormal) && Op1V.isNormal() && Op1V.isNegative()) ||
2690          ((Mask & fcNegSubnormal) && Op1V.isDenormal() && Op1V.isNegative()) ||
2691          ((Mask & fcNegZero) && Op1V.isZero() && Op1V.isNegative()) ||
2692          ((Mask & fcPosZero) && Op1V.isZero() && !Op1V.isNegative()) ||
2693          ((Mask & fcPosSubnormal) && Op1V.isDenormal() && !Op1V.isNegative()) ||
2694          ((Mask & fcPosNormal) && Op1V.isNormal() && !Op1V.isNegative()) ||
2695          ((Mask & fcPosInf) && Op1V.isPosInfinity());
2696        return ConstantInt::get(Ty, Result);
2697      }
2698      default:
2699        break;
2700      }
2701
2702      if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2703        return nullptr;
2704      if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
2705        return ConstantFP::get(
2706            Ty->getContext(),
2707            APFloat((float)std::pow((float)Op1V.convertToDouble(),
2708                                    (int)Op2C->getZExtValue())));
2709      if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
2710        return ConstantFP::get(
2711            Ty->getContext(),
2712            APFloat((float)std::pow((float)Op1V.convertToDouble(),
2713                                    (int)Op2C->getZExtValue())));
2714      if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
2715        return ConstantFP::get(
2716            Ty->getContext(),
2717            APFloat((double)std::pow(Op1V.convertToDouble(),
2718                                     (int)Op2C->getZExtValue())));
2719    }
2720    return nullptr;
2721  }
2722
2723  if (Operands[0]->getType()->isIntegerTy() &&
2724      Operands[1]->getType()->isIntegerTy()) {
2725    const APInt *C0, *C1;
2726    if (!getConstIntOrUndef(Operands[0], C0) ||
2727        !getConstIntOrUndef(Operands[1], C1))
2728      return nullptr;
2729
2730    switch (IntrinsicID) {
2731    default: break;
2732    case Intrinsic::smax:
2733    case Intrinsic::smin:
2734    case Intrinsic::umax:
2735    case Intrinsic::umin:
2736      // This is the same as for binary ops - poison propagates.
2737      // TODO: Poison handling should be consolidated.
2738      if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2739        return PoisonValue::get(Ty);
2740
2741      if (!C0 && !C1)
2742        return UndefValue::get(Ty);
2743      if (!C0 || !C1)
2744        return MinMaxIntrinsic::getSaturationPoint(IntrinsicID, Ty);
2745      return ConstantInt::get(
2746          Ty, ICmpInst::compare(*C0, *C1,
2747                                MinMaxIntrinsic::getPredicate(IntrinsicID))
2748                  ? *C0
2749                  : *C1);
2750
2751    case Intrinsic::usub_with_overflow:
2752    case Intrinsic::ssub_with_overflow:
2753      // X - undef -> { 0, false }
2754      // undef - X -> { 0, false }
2755      if (!C0 || !C1)
2756        return Constant::getNullValue(Ty);
2757      [[fallthrough]];
2758    case Intrinsic::uadd_with_overflow:
2759    case Intrinsic::sadd_with_overflow:
2760      // X + undef -> { -1, false }
2761      // undef + x -> { -1, false }
2762      if (!C0 || !C1) {
2763        return ConstantStruct::get(
2764            cast<StructType>(Ty),
2765            {Constant::getAllOnesValue(Ty->getStructElementType(0)),
2766             Constant::getNullValue(Ty->getStructElementType(1))});
2767      }
2768      [[fallthrough]];
2769    case Intrinsic::smul_with_overflow:
2770    case Intrinsic::umul_with_overflow: {
2771      // undef * X -> { 0, false }
2772      // X * undef -> { 0, false }
2773      if (!C0 || !C1)
2774        return Constant::getNullValue(Ty);
2775
2776      APInt Res;
2777      bool Overflow;
2778      switch (IntrinsicID) {
2779      default: llvm_unreachable("Invalid case");
2780      case Intrinsic::sadd_with_overflow:
2781        Res = C0->sadd_ov(*C1, Overflow);
2782        break;
2783      case Intrinsic::uadd_with_overflow:
2784        Res = C0->uadd_ov(*C1, Overflow);
2785        break;
2786      case Intrinsic::ssub_with_overflow:
2787        Res = C0->ssub_ov(*C1, Overflow);
2788        break;
2789      case Intrinsic::usub_with_overflow:
2790        Res = C0->usub_ov(*C1, Overflow);
2791        break;
2792      case Intrinsic::smul_with_overflow:
2793        Res = C0->smul_ov(*C1, Overflow);
2794        break;
2795      case Intrinsic::umul_with_overflow:
2796        Res = C0->umul_ov(*C1, Overflow);
2797        break;
2798      }
2799      Constant *Ops[] = {
2800        ConstantInt::get(Ty->getContext(), Res),
2801        ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
2802      };
2803      return ConstantStruct::get(cast<StructType>(Ty), Ops);
2804    }
2805    case Intrinsic::uadd_sat:
2806    case Intrinsic::sadd_sat:
2807      // This is the same as for binary ops - poison propagates.
2808      // TODO: Poison handling should be consolidated.
2809      if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2810        return PoisonValue::get(Ty);
2811
2812      if (!C0 && !C1)
2813        return UndefValue::get(Ty);
2814      if (!C0 || !C1)
2815        return Constant::getAllOnesValue(Ty);
2816      if (IntrinsicID == Intrinsic::uadd_sat)
2817        return ConstantInt::get(Ty, C0->uadd_sat(*C1));
2818      else
2819        return ConstantInt::get(Ty, C0->sadd_sat(*C1));
2820    case Intrinsic::usub_sat:
2821    case Intrinsic::ssub_sat:
2822      // This is the same as for binary ops - poison propagates.
2823      // TODO: Poison handling should be consolidated.
2824      if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2825        return PoisonValue::get(Ty);
2826
2827      if (!C0 && !C1)
2828        return UndefValue::get(Ty);
2829      if (!C0 || !C1)
2830        return Constant::getNullValue(Ty);
2831      if (IntrinsicID == Intrinsic::usub_sat)
2832        return ConstantInt::get(Ty, C0->usub_sat(*C1));
2833      else
2834        return ConstantInt::get(Ty, C0->ssub_sat(*C1));
2835    case Intrinsic::cttz:
2836    case Intrinsic::ctlz:
2837      assert(C1 && "Must be constant int");
2838
2839      // cttz(0, 1) and ctlz(0, 1) are poison.
2840      if (C1->isOne() && (!C0 || C0->isZero()))
2841        return PoisonValue::get(Ty);
2842      if (!C0)
2843        return Constant::getNullValue(Ty);
2844      if (IntrinsicID == Intrinsic::cttz)
2845        return ConstantInt::get(Ty, C0->countr_zero());
2846      else
2847        return ConstantInt::get(Ty, C0->countl_zero());
2848
2849    case Intrinsic::abs:
2850      assert(C1 && "Must be constant int");
2851      assert((C1->isOne() || C1->isZero()) && "Must be 0 or 1");
2852
2853      // Undef or minimum val operand with poison min --> undef
2854      if (C1->isOne() && (!C0 || C0->isMinSignedValue()))
2855        return UndefValue::get(Ty);
2856
2857      // Undef operand with no poison min --> 0 (sign bit must be clear)
2858      if (!C0)
2859        return Constant::getNullValue(Ty);
2860
2861      return ConstantInt::get(Ty, C0->abs());
2862    case Intrinsic::amdgcn_wave_reduce_umin:
2863    case Intrinsic::amdgcn_wave_reduce_umax:
2864      return dyn_cast<Constant>(Operands[0]);
2865    }
2866
2867    return nullptr;
2868  }
2869
2870  // Support ConstantVector in case we have an Undef in the top.
2871  if ((isa<ConstantVector>(Operands[0]) ||
2872       isa<ConstantDataVector>(Operands[0])) &&
2873      // Check for default rounding mode.
2874      // FIXME: Support other rounding modes?
2875      isa<ConstantInt>(Operands[1]) &&
2876      cast<ConstantInt>(Operands[1])->getValue() == 4) {
2877    auto *Op = cast<Constant>(Operands[0]);
2878    switch (IntrinsicID) {
2879    default: break;
2880    case Intrinsic::x86_avx512_vcvtss2si32:
2881    case Intrinsic::x86_avx512_vcvtss2si64:
2882    case Intrinsic::x86_avx512_vcvtsd2si32:
2883    case Intrinsic::x86_avx512_vcvtsd2si64:
2884      if (ConstantFP *FPOp =
2885              dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2886        return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2887                                           /*roundTowardZero=*/false, Ty,
2888                                           /*IsSigned*/true);
2889      break;
2890    case Intrinsic::x86_avx512_vcvtss2usi32:
2891    case Intrinsic::x86_avx512_vcvtss2usi64:
2892    case Intrinsic::x86_avx512_vcvtsd2usi32:
2893    case Intrinsic::x86_avx512_vcvtsd2usi64:
2894      if (ConstantFP *FPOp =
2895              dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2896        return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2897                                           /*roundTowardZero=*/false, Ty,
2898                                           /*IsSigned*/false);
2899      break;
2900    case Intrinsic::x86_avx512_cvttss2si:
2901    case Intrinsic::x86_avx512_cvttss2si64:
2902    case Intrinsic::x86_avx512_cvttsd2si:
2903    case Intrinsic::x86_avx512_cvttsd2si64:
2904      if (ConstantFP *FPOp =
2905              dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2906        return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2907                                           /*roundTowardZero=*/true, Ty,
2908                                           /*IsSigned*/true);
2909      break;
2910    case Intrinsic::x86_avx512_cvttss2usi:
2911    case Intrinsic::x86_avx512_cvttss2usi64:
2912    case Intrinsic::x86_avx512_cvttsd2usi:
2913    case Intrinsic::x86_avx512_cvttsd2usi64:
2914      if (ConstantFP *FPOp =
2915              dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2916        return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2917                                           /*roundTowardZero=*/true, Ty,
2918                                           /*IsSigned*/false);
2919      break;
2920    }
2921  }
2922  return nullptr;
2923}
2924
2925static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,
2926                                               const APFloat &S0,
2927                                               const APFloat &S1,
2928                                               const APFloat &S2) {
2929  unsigned ID;
2930  const fltSemantics &Sem = S0.getSemantics();
2931  APFloat MA(Sem), SC(Sem), TC(Sem);
2932  if (abs(S2) >= abs(S0) && abs(S2) >= abs(S1)) {
2933    if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) {
2934      // S2 < 0
2935      ID = 5;
2936      SC = -S0;
2937    } else {
2938      ID = 4;
2939      SC = S0;
2940    }
2941    MA = S2;
2942    TC = -S1;
2943  } else if (abs(S1) >= abs(S0)) {
2944    if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) {
2945      // S1 < 0
2946      ID = 3;
2947      TC = -S2;
2948    } else {
2949      ID = 2;
2950      TC = S2;
2951    }
2952    MA = S1;
2953    SC = S0;
2954  } else {
2955    if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) {
2956      // S0 < 0
2957      ID = 1;
2958      SC = S2;
2959    } else {
2960      ID = 0;
2961      SC = -S2;
2962    }
2963    MA = S0;
2964    TC = -S1;
2965  }
2966  switch (IntrinsicID) {
2967  default:
2968    llvm_unreachable("unhandled amdgcn cube intrinsic");
2969  case Intrinsic::amdgcn_cubeid:
2970    return APFloat(Sem, ID);
2971  case Intrinsic::amdgcn_cubema:
2972    return MA + MA;
2973  case Intrinsic::amdgcn_cubesc:
2974    return SC;
2975  case Intrinsic::amdgcn_cubetc:
2976    return TC;
2977  }
2978}
2979
2980static Constant *ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant *> Operands,
2981                                                 Type *Ty) {
2982  const APInt *C0, *C1, *C2;
2983  if (!getConstIntOrUndef(Operands[0], C0) ||
2984      !getConstIntOrUndef(Operands[1], C1) ||
2985      !getConstIntOrUndef(Operands[2], C2))
2986    return nullptr;
2987
2988  if (!C2)
2989    return UndefValue::get(Ty);
2990
2991  APInt Val(32, 0);
2992  unsigned NumUndefBytes = 0;
2993  for (unsigned I = 0; I < 32; I += 8) {
2994    unsigned Sel = C2->extractBitsAsZExtValue(8, I);
2995    unsigned B = 0;
2996
2997    if (Sel >= 13)
2998      B = 0xff;
2999    else if (Sel == 12)
3000      B = 0x00;
3001    else {
3002      const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
3003      if (!Src)
3004        ++NumUndefBytes;
3005      else if (Sel < 8)
3006        B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
3007      else
3008        B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
3009    }
3010
3011    Val.insertBits(B, I, 8);
3012  }
3013
3014  if (NumUndefBytes == 4)
3015    return UndefValue::get(Ty);
3016
3017  return ConstantInt::get(Ty, Val);
3018}
3019
3020static Constant *ConstantFoldScalarCall3(StringRef Name,
3021                                         Intrinsic::ID IntrinsicID,
3022                                         Type *Ty,
3023                                         ArrayRef<Constant *> Operands,
3024                                         const TargetLibraryInfo *TLI,
3025                                         const CallBase *Call) {
3026  assert(Operands.size() == 3 && "Wrong number of operands.");
3027
3028  if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
3029    if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
3030      if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
3031        const APFloat &C1 = Op1->getValueAPF();
3032        const APFloat &C2 = Op2->getValueAPF();
3033        const APFloat &C3 = Op3->getValueAPF();
3034
3035        if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
3036          RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
3037          APFloat Res = C1;
3038          APFloat::opStatus St;
3039          switch (IntrinsicID) {
3040          default:
3041            return nullptr;
3042          case Intrinsic::experimental_constrained_fma:
3043          case Intrinsic::experimental_constrained_fmuladd:
3044            St = Res.fusedMultiplyAdd(C2, C3, RM);
3045            break;
3046          }
3047          if (mayFoldConstrained(
3048                  const_cast<ConstrainedFPIntrinsic *>(ConstrIntr), St))
3049            return ConstantFP::get(Ty->getContext(), Res);
3050          return nullptr;
3051        }
3052
3053        switch (IntrinsicID) {
3054        default: break;
3055        case Intrinsic::amdgcn_fma_legacy: {
3056          // The legacy behaviour is that multiplying +/- 0.0 by anything, even
3057          // NaN or infinity, gives +0.0.
3058          if (C1.isZero() || C2.isZero()) {
3059            // It's tempting to just return C3 here, but that would give the
3060            // wrong result if C3 was -0.0.
3061            return ConstantFP::get(Ty->getContext(), APFloat(0.0f) + C3);
3062          }
3063          [[fallthrough]];
3064        }
3065        case Intrinsic::fma:
3066        case Intrinsic::fmuladd: {
3067          APFloat V = C1;
3068          V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven);
3069          return ConstantFP::get(Ty->getContext(), V);
3070        }
3071        case Intrinsic::amdgcn_cubeid:
3072        case Intrinsic::amdgcn_cubema:
3073        case Intrinsic::amdgcn_cubesc:
3074        case Intrinsic::amdgcn_cubetc: {
3075          APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
3076          return ConstantFP::get(Ty->getContext(), V);
3077        }
3078        }
3079      }
3080    }
3081  }
3082
3083  if (IntrinsicID == Intrinsic::smul_fix ||
3084      IntrinsicID == Intrinsic::smul_fix_sat) {
3085    // poison * C -> poison
3086    // C * poison -> poison
3087    if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
3088      return PoisonValue::get(Ty);
3089
3090    const APInt *C0, *C1;
3091    if (!getConstIntOrUndef(Operands[0], C0) ||
3092        !getConstIntOrUndef(Operands[1], C1))
3093      return nullptr;
3094
3095    // undef * C -> 0
3096    // C * undef -> 0
3097    if (!C0 || !C1)
3098      return Constant::getNullValue(Ty);
3099
3100    // This code performs rounding towards negative infinity in case the result
3101    // cannot be represented exactly for the given scale. Targets that do care
3102    // about rounding should use a target hook for specifying how rounding
3103    // should be done, and provide their own folding to be consistent with
3104    // rounding. This is the same approach as used by
3105    // DAGTypeLegalizer::ExpandIntRes_MULFIX.
3106    unsigned Scale = cast<ConstantInt>(Operands[2])->getZExtValue();
3107    unsigned Width = C0->getBitWidth();
3108    assert(Scale < Width && "Illegal scale.");
3109    unsigned ExtendedWidth = Width * 2;
3110    APInt Product =
3111        (C0->sext(ExtendedWidth) * C1->sext(ExtendedWidth)).ashr(Scale);
3112    if (IntrinsicID == Intrinsic::smul_fix_sat) {
3113      APInt Max = APInt::getSignedMaxValue(Width).sext(ExtendedWidth);
3114      APInt Min = APInt::getSignedMinValue(Width).sext(ExtendedWidth);
3115      Product = APIntOps::smin(Product, Max);
3116      Product = APIntOps::smax(Product, Min);
3117    }
3118    return ConstantInt::get(Ty->getContext(), Product.sextOrTrunc(Width));
3119  }
3120
3121  if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
3122    const APInt *C0, *C1, *C2;
3123    if (!getConstIntOrUndef(Operands[0], C0) ||
3124        !getConstIntOrUndef(Operands[1], C1) ||
3125        !getConstIntOrUndef(Operands[2], C2))
3126      return nullptr;
3127
3128    bool IsRight = IntrinsicID == Intrinsic::fshr;
3129    if (!C2)
3130      return Operands[IsRight ? 1 : 0];
3131    if (!C0 && !C1)
3132      return UndefValue::get(Ty);
3133
3134    // The shift amount is interpreted as modulo the bitwidth. If the shift
3135    // amount is effectively 0, avoid UB due to oversized inverse shift below.
3136    unsigned BitWidth = C2->getBitWidth();
3137    unsigned ShAmt = C2->urem(BitWidth);
3138    if (!ShAmt)
3139      return Operands[IsRight ? 1 : 0];
3140
3141    // (C0 << ShlAmt) | (C1 >> LshrAmt)
3142    unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
3143    unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
3144    if (!C0)
3145      return ConstantInt::get(Ty, C1->lshr(LshrAmt));
3146    if (!C1)
3147      return ConstantInt::get(Ty, C0->shl(ShlAmt));
3148    return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
3149  }
3150
3151  if (IntrinsicID == Intrinsic::amdgcn_perm)
3152    return ConstantFoldAMDGCNPermIntrinsic(Operands, Ty);
3153
3154  return nullptr;
3155}
3156
3157static Constant *ConstantFoldScalarCall(StringRef Name,
3158                                        Intrinsic::ID IntrinsicID,
3159                                        Type *Ty,
3160                                        ArrayRef<Constant *> Operands,
3161                                        const TargetLibraryInfo *TLI,
3162                                        const CallBase *Call) {
3163  if (Operands.size() == 1)
3164    return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call);
3165
3166  if (Operands.size() == 2)
3167    return ConstantFoldScalarCall2(Name, IntrinsicID, Ty, Operands, TLI, Call);
3168
3169  if (Operands.size() == 3)
3170    return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call);
3171
3172  return nullptr;
3173}
3174
3175static Constant *ConstantFoldFixedVectorCall(
3176    StringRef Name, Intrinsic::ID IntrinsicID, FixedVectorType *FVTy,
3177    ArrayRef<Constant *> Operands, const DataLayout &DL,
3178    const TargetLibraryInfo *TLI, const CallBase *Call) {
3179  SmallVector<Constant *, 4> Result(FVTy->getNumElements());
3180  SmallVector<Constant *, 4> Lane(Operands.size());
3181  Type *Ty = FVTy->getElementType();
3182
3183  switch (IntrinsicID) {
3184  case Intrinsic::masked_load: {
3185    auto *SrcPtr = Operands[0];
3186    auto *Mask = Operands[2];
3187    auto *Passthru = Operands[3];
3188
3189    Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, FVTy, DL);
3190
3191    SmallVector<Constant *, 32> NewElements;
3192    for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3193      auto *MaskElt = Mask->getAggregateElement(I);
3194      if (!MaskElt)
3195        break;
3196      auto *PassthruElt = Passthru->getAggregateElement(I);
3197      auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
3198      if (isa<UndefValue>(MaskElt)) {
3199        if (PassthruElt)
3200          NewElements.push_back(PassthruElt);
3201        else if (VecElt)
3202          NewElements.push_back(VecElt);
3203        else
3204          return nullptr;
3205      }
3206      if (MaskElt->isNullValue()) {
3207        if (!PassthruElt)
3208          return nullptr;
3209        NewElements.push_back(PassthruElt);
3210      } else if (MaskElt->isOneValue()) {
3211        if (!VecElt)
3212          return nullptr;
3213        NewElements.push_back(VecElt);
3214      } else {
3215        return nullptr;
3216      }
3217    }
3218    if (NewElements.size() != FVTy->getNumElements())
3219      return nullptr;
3220    return ConstantVector::get(NewElements);
3221  }
3222  case Intrinsic::arm_mve_vctp8:
3223  case Intrinsic::arm_mve_vctp16:
3224  case Intrinsic::arm_mve_vctp32:
3225  case Intrinsic::arm_mve_vctp64: {
3226    if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
3227      unsigned Lanes = FVTy->getNumElements();
3228      uint64_t Limit = Op->getZExtValue();
3229
3230      SmallVector<Constant *, 16> NCs;
3231      for (unsigned i = 0; i < Lanes; i++) {
3232        if (i < Limit)
3233          NCs.push_back(ConstantInt::getTrue(Ty));
3234        else
3235          NCs.push_back(ConstantInt::getFalse(Ty));
3236      }
3237      return ConstantVector::get(NCs);
3238    }
3239    return nullptr;
3240  }
3241  case Intrinsic::get_active_lane_mask: {
3242    auto *Op0 = dyn_cast<ConstantInt>(Operands[0]);
3243    auto *Op1 = dyn_cast<ConstantInt>(Operands[1]);
3244    if (Op0 && Op1) {
3245      unsigned Lanes = FVTy->getNumElements();
3246      uint64_t Base = Op0->getZExtValue();
3247      uint64_t Limit = Op1->getZExtValue();
3248
3249      SmallVector<Constant *, 16> NCs;
3250      for (unsigned i = 0; i < Lanes; i++) {
3251        if (Base + i < Limit)
3252          NCs.push_back(ConstantInt::getTrue(Ty));
3253        else
3254          NCs.push_back(ConstantInt::getFalse(Ty));
3255      }
3256      return ConstantVector::get(NCs);
3257    }
3258    return nullptr;
3259  }
3260  default:
3261    break;
3262  }
3263
3264  for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3265    // Gather a column of constants.
3266    for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
3267      // Some intrinsics use a scalar type for certain arguments.
3268      if (isVectorIntrinsicWithScalarOpAtArg(IntrinsicID, J)) {
3269        Lane[J] = Operands[J];
3270        continue;
3271      }
3272
3273      Constant *Agg = Operands[J]->getAggregateElement(I);
3274      if (!Agg)
3275        return nullptr;
3276
3277      Lane[J] = Agg;
3278    }
3279
3280    // Use the regular scalar folding to simplify this column.
3281    Constant *Folded =
3282        ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call);
3283    if (!Folded)
3284      return nullptr;
3285    Result[I] = Folded;
3286  }
3287
3288  return ConstantVector::get(Result);
3289}
3290
3291static Constant *ConstantFoldScalableVectorCall(
3292    StringRef Name, Intrinsic::ID IntrinsicID, ScalableVectorType *SVTy,
3293    ArrayRef<Constant *> Operands, const DataLayout &DL,
3294    const TargetLibraryInfo *TLI, const CallBase *Call) {
3295  switch (IntrinsicID) {
3296  case Intrinsic::aarch64_sve_convert_from_svbool: {
3297    auto *Src = dyn_cast<Constant>(Operands[0]);
3298    if (!Src || !Src->isNullValue())
3299      break;
3300
3301    return ConstantInt::getFalse(SVTy);
3302  }
3303  default:
3304    break;
3305  }
3306  return nullptr;
3307}
3308
3309static std::pair<Constant *, Constant *>
3310ConstantFoldScalarFrexpCall(Constant *Op, Type *IntTy) {
3311  if (isa<PoisonValue>(Op))
3312    return {Op, PoisonValue::get(IntTy)};
3313
3314  auto *ConstFP = dyn_cast<ConstantFP>(Op);
3315  if (!ConstFP)
3316    return {};
3317
3318  const APFloat &U = ConstFP->getValueAPF();
3319  int FrexpExp;
3320  APFloat FrexpMant = frexp(U, FrexpExp, APFloat::rmNearestTiesToEven);
3321  Constant *Result0 = ConstantFP::get(ConstFP->getType(), FrexpMant);
3322
3323  // The exponent is an "unspecified value" for inf/nan. We use zero to avoid
3324  // using undef.
3325  Constant *Result1 = FrexpMant.isFinite() ? ConstantInt::get(IntTy, FrexpExp)
3326                                           : ConstantInt::getNullValue(IntTy);
3327  return {Result0, Result1};
3328}
3329
3330/// Handle intrinsics that return tuples, which may be tuples of vectors.
3331static Constant *
3332ConstantFoldStructCall(StringRef Name, Intrinsic::ID IntrinsicID,
3333                       StructType *StTy, ArrayRef<Constant *> Operands,
3334                       const DataLayout &DL, const TargetLibraryInfo *TLI,
3335                       const CallBase *Call) {
3336
3337  switch (IntrinsicID) {
3338  case Intrinsic::frexp: {
3339    Type *Ty0 = StTy->getContainedType(0);
3340    Type *Ty1 = StTy->getContainedType(1)->getScalarType();
3341
3342    if (auto *FVTy0 = dyn_cast<FixedVectorType>(Ty0)) {
3343      SmallVector<Constant *, 4> Results0(FVTy0->getNumElements());
3344      SmallVector<Constant *, 4> Results1(FVTy0->getNumElements());
3345
3346      for (unsigned I = 0, E = FVTy0->getNumElements(); I != E; ++I) {
3347        Constant *Lane = Operands[0]->getAggregateElement(I);
3348        std::tie(Results0[I], Results1[I]) =
3349            ConstantFoldScalarFrexpCall(Lane, Ty1);
3350        if (!Results0[I])
3351          return nullptr;
3352      }
3353
3354      return ConstantStruct::get(StTy, ConstantVector::get(Results0),
3355                                 ConstantVector::get(Results1));
3356    }
3357
3358    auto [Result0, Result1] = ConstantFoldScalarFrexpCall(Operands[0], Ty1);
3359    if (!Result0)
3360      return nullptr;
3361    return ConstantStruct::get(StTy, Result0, Result1);
3362  }
3363  default:
3364    // TODO: Constant folding of vector intrinsics that fall through here does
3365    // not work (e.g. overflow intrinsics)
3366    return ConstantFoldScalarCall(Name, IntrinsicID, StTy, Operands, TLI, Call);
3367  }
3368
3369  return nullptr;
3370}
3371
3372} // end anonymous namespace
3373
3374Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
3375                                 ArrayRef<Constant *> Operands,
3376                                 const TargetLibraryInfo *TLI) {
3377  if (Call->isNoBuiltin())
3378    return nullptr;
3379  if (!F->hasName())
3380    return nullptr;
3381
3382  // If this is not an intrinsic and not recognized as a library call, bail out.
3383  Intrinsic::ID IID = F->getIntrinsicID();
3384  if (IID == Intrinsic::not_intrinsic) {
3385    if (!TLI)
3386      return nullptr;
3387    LibFunc LibF;
3388    if (!TLI->getLibFunc(*F, LibF))
3389      return nullptr;
3390  }
3391
3392  StringRef Name = F->getName();
3393  Type *Ty = F->getReturnType();
3394  if (auto *FVTy = dyn_cast<FixedVectorType>(Ty))
3395    return ConstantFoldFixedVectorCall(
3396        Name, IID, FVTy, Operands, F->getParent()->getDataLayout(), TLI, Call);
3397
3398  if (auto *SVTy = dyn_cast<ScalableVectorType>(Ty))
3399    return ConstantFoldScalableVectorCall(
3400        Name, IID, SVTy, Operands, F->getParent()->getDataLayout(), TLI, Call);
3401
3402  if (auto *StTy = dyn_cast<StructType>(Ty))
3403    return ConstantFoldStructCall(Name, IID, StTy, Operands,
3404                                  F->getParent()->getDataLayout(), TLI, Call);
3405
3406  // TODO: If this is a library function, we already discovered that above,
3407  //       so we should pass the LibFunc, not the name (and it might be better
3408  //       still to separate intrinsic handling from libcalls).
3409  return ConstantFoldScalarCall(Name, IID, Ty, Operands, TLI, Call);
3410}
3411
3412bool llvm::isMathLibCallNoop(const CallBase *Call,
3413                             const TargetLibraryInfo *TLI) {
3414  // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
3415  // (and to some extent ConstantFoldScalarCall).
3416  if (Call->isNoBuiltin() || Call->isStrictFP())
3417    return false;
3418  Function *F = Call->getCalledFunction();
3419  if (!F)
3420    return false;
3421
3422  LibFunc Func;
3423  if (!TLI || !TLI->getLibFunc(*F, Func))
3424    return false;
3425
3426  if (Call->arg_size() == 1) {
3427    if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
3428      const APFloat &Op = OpC->getValueAPF();
3429      switch (Func) {
3430      case LibFunc_logl:
3431      case LibFunc_log:
3432      case LibFunc_logf:
3433      case LibFunc_log2l:
3434      case LibFunc_log2:
3435      case LibFunc_log2f:
3436      case LibFunc_log10l:
3437      case LibFunc_log10:
3438      case LibFunc_log10f:
3439        return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
3440
3441      case LibFunc_expl:
3442      case LibFunc_exp:
3443      case LibFunc_expf:
3444        // FIXME: These boundaries are slightly conservative.
3445        if (OpC->getType()->isDoubleTy())
3446          return !(Op < APFloat(-745.0) || Op > APFloat(709.0));
3447        if (OpC->getType()->isFloatTy())
3448          return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f));
3449        break;
3450
3451      case LibFunc_exp2l:
3452      case LibFunc_exp2:
3453      case LibFunc_exp2f:
3454        // FIXME: These boundaries are slightly conservative.
3455        if (OpC->getType()->isDoubleTy())
3456          return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0));
3457        if (OpC->getType()->isFloatTy())
3458          return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f));
3459        break;
3460
3461      case LibFunc_sinl:
3462      case LibFunc_sin:
3463      case LibFunc_sinf:
3464      case LibFunc_cosl:
3465      case LibFunc_cos:
3466      case LibFunc_cosf:
3467        return !Op.isInfinity();
3468
3469      case LibFunc_tanl:
3470      case LibFunc_tan:
3471      case LibFunc_tanf: {
3472        // FIXME: Stop using the host math library.
3473        // FIXME: The computation isn't done in the right precision.
3474        Type *Ty = OpC->getType();
3475        if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy())
3476          return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) != nullptr;
3477        break;
3478      }
3479
3480      case LibFunc_atan:
3481      case LibFunc_atanf:
3482      case LibFunc_atanl:
3483        // Per POSIX, this MAY fail if Op is denormal. We choose not failing.
3484        return true;
3485
3486
3487      case LibFunc_asinl:
3488      case LibFunc_asin:
3489      case LibFunc_asinf:
3490      case LibFunc_acosl:
3491      case LibFunc_acos:
3492      case LibFunc_acosf:
3493        return !(Op < APFloat(Op.getSemantics(), "-1") ||
3494                 Op > APFloat(Op.getSemantics(), "1"));
3495
3496      case LibFunc_sinh:
3497      case LibFunc_cosh:
3498      case LibFunc_sinhf:
3499      case LibFunc_coshf:
3500      case LibFunc_sinhl:
3501      case LibFunc_coshl:
3502        // FIXME: These boundaries are slightly conservative.
3503        if (OpC->getType()->isDoubleTy())
3504          return !(Op < APFloat(-710.0) || Op > APFloat(710.0));
3505        if (OpC->getType()->isFloatTy())
3506          return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f));
3507        break;
3508
3509      case LibFunc_sqrtl:
3510      case LibFunc_sqrt:
3511      case LibFunc_sqrtf:
3512        return Op.isNaN() || Op.isZero() || !Op.isNegative();
3513
3514      // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
3515      // maybe others?
3516      default:
3517        break;
3518      }
3519    }
3520  }
3521
3522  if (Call->arg_size() == 2) {
3523    ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
3524    ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
3525    if (Op0C && Op1C) {
3526      const APFloat &Op0 = Op0C->getValueAPF();
3527      const APFloat &Op1 = Op1C->getValueAPF();
3528
3529      switch (Func) {
3530      case LibFunc_powl:
3531      case LibFunc_pow:
3532      case LibFunc_powf: {
3533        // FIXME: Stop using the host math library.
3534        // FIXME: The computation isn't done in the right precision.
3535        Type *Ty = Op0C->getType();
3536        if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
3537          if (Ty == Op1C->getType())
3538            return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) != nullptr;
3539        }
3540        break;
3541      }
3542
3543      case LibFunc_fmodl:
3544      case LibFunc_fmod:
3545      case LibFunc_fmodf:
3546      case LibFunc_remainderl:
3547      case LibFunc_remainder:
3548      case LibFunc_remainderf:
3549        return Op0.isNaN() || Op1.isNaN() ||
3550               (!Op0.isInfinity() && !Op1.isZero());
3551
3552      case LibFunc_atan2:
3553      case LibFunc_atan2f:
3554      case LibFunc_atan2l:
3555        // Although IEEE-754 says atan2(+/-0.0, +/-0.0) are well-defined, and
3556        // GLIBC and MSVC do not appear to raise an error on those, we
3557        // cannot rely on that behavior. POSIX and C11 say that a domain error
3558        // may occur, so allow for that possibility.
3559        return !Op0.isZero() || !Op1.isZero();
3560
3561      default:
3562        break;
3563      }
3564    }
3565  }
3566
3567  return false;
3568}
3569
3570void TargetFolder::anchor() {}
3571