1//===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file describes how to lower LLVM code to machine code.  This has two
11/// main components:
12///
13///  1. Which ValueTypes are natively supported by the target.
14///  2. Which operations are supported for supported ValueTypes.
15///  3. Cost thresholds for alternative implementations of certain operations.
16///
17/// In addition it has a few other components, like information about FP
18/// immediates.
19///
20//===----------------------------------------------------------------------===//
21
22#ifndef LLVM_CODEGEN_TARGETLOWERING_H
23#define LLVM_CODEGEN_TARGETLOWERING_H
24
25#include "llvm/ADT/APInt.h"
26#include "llvm/ADT/ArrayRef.h"
27#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/SmallVector.h"
29#include "llvm/ADT/StringRef.h"
30#include "llvm/CodeGen/DAGCombine.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/LowLevelTypeUtils.h"
33#include "llvm/CodeGen/MachineRegisterInfo.h"
34#include "llvm/CodeGen/MachineValueType.h"
35#include "llvm/CodeGen/RuntimeLibcalls.h"
36#include "llvm/CodeGen/SelectionDAG.h"
37#include "llvm/CodeGen/SelectionDAGNodes.h"
38#include "llvm/CodeGen/TargetCallingConv.h"
39#include "llvm/CodeGen/ValueTypes.h"
40#include "llvm/IR/Attributes.h"
41#include "llvm/IR/CallingConv.h"
42#include "llvm/IR/DataLayout.h"
43#include "llvm/IR/DerivedTypes.h"
44#include "llvm/IR/Function.h"
45#include "llvm/IR/InlineAsm.h"
46#include "llvm/IR/Instruction.h"
47#include "llvm/IR/Instructions.h"
48#include "llvm/IR/Type.h"
49#include "llvm/Support/Alignment.h"
50#include "llvm/Support/AtomicOrdering.h"
51#include "llvm/Support/Casting.h"
52#include "llvm/Support/ErrorHandling.h"
53#include <algorithm>
54#include <cassert>
55#include <climits>
56#include <cstdint>
57#include <iterator>
58#include <map>
59#include <string>
60#include <utility>
61#include <vector>
62
63namespace llvm {
64
65class AssumptionCache;
66class CCState;
67class CCValAssign;
68enum class ComplexDeinterleavingOperation;
69enum class ComplexDeinterleavingRotation;
70class Constant;
71class FastISel;
72class FunctionLoweringInfo;
73class GlobalValue;
74class Loop;
75class GISelKnownBits;
76class IntrinsicInst;
77class IRBuilderBase;
78struct KnownBits;
79class LLVMContext;
80class MachineBasicBlock;
81class MachineFunction;
82class MachineInstr;
83class MachineJumpTableInfo;
84class MachineLoop;
85class MachineRegisterInfo;
86class MCContext;
87class MCExpr;
88class Module;
89class ProfileSummaryInfo;
90class TargetLibraryInfo;
91class TargetMachine;
92class TargetRegisterClass;
93class TargetRegisterInfo;
94class TargetTransformInfo;
95class Value;
96
97namespace Sched {
98
99enum Preference {
100  None,        // No preference
101  Source,      // Follow source order.
102  RegPressure, // Scheduling for lowest register pressure.
103  Hybrid,      // Scheduling for both latency and register pressure.
104  ILP,         // Scheduling for ILP in low register pressure mode.
105  VLIW,        // Scheduling for VLIW targets.
106  Fast,        // Fast suboptimal list scheduling
107  Linearize    // Linearize DAG, no scheduling
108};
109
110} // end namespace Sched
111
112// MemOp models a memory operation, either memset or memcpy/memmove.
113struct MemOp {
114private:
115  // Shared
116  uint64_t Size;
117  bool DstAlignCanChange; // true if destination alignment can satisfy any
118                          // constraint.
119  Align DstAlign;         // Specified alignment of the memory operation.
120
121  bool AllowOverlap;
122  // memset only
123  bool IsMemset;   // If setthis memory operation is a memset.
124  bool ZeroMemset; // If set clears out memory with zeros.
125  // memcpy only
126  bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register
127                     // constant so it does not need to be loaded.
128  Align SrcAlign;    // Inferred alignment of the source or default value if the
129                     // memory operation does not need to load the value.
130public:
131  static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
132                    Align SrcAlign, bool IsVolatile,
133                    bool MemcpyStrSrc = false) {
134    MemOp Op;
135    Op.Size = Size;
136    Op.DstAlignCanChange = DstAlignCanChange;
137    Op.DstAlign = DstAlign;
138    Op.AllowOverlap = !IsVolatile;
139    Op.IsMemset = false;
140    Op.ZeroMemset = false;
141    Op.MemcpyStrSrc = MemcpyStrSrc;
142    Op.SrcAlign = SrcAlign;
143    return Op;
144  }
145
146  static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
147                   bool IsZeroMemset, bool IsVolatile) {
148    MemOp Op;
149    Op.Size = Size;
150    Op.DstAlignCanChange = DstAlignCanChange;
151    Op.DstAlign = DstAlign;
152    Op.AllowOverlap = !IsVolatile;
153    Op.IsMemset = true;
154    Op.ZeroMemset = IsZeroMemset;
155    Op.MemcpyStrSrc = false;
156    return Op;
157  }
158
159  uint64_t size() const { return Size; }
160  Align getDstAlign() const {
161    assert(!DstAlignCanChange);
162    return DstAlign;
163  }
164  bool isFixedDstAlign() const { return !DstAlignCanChange; }
165  bool allowOverlap() const { return AllowOverlap; }
166  bool isMemset() const { return IsMemset; }
167  bool isMemcpy() const { return !IsMemset; }
168  bool isMemcpyWithFixedDstAlign() const {
169    return isMemcpy() && !DstAlignCanChange;
170  }
171  bool isZeroMemset() const { return isMemset() && ZeroMemset; }
172  bool isMemcpyStrSrc() const {
173    assert(isMemcpy() && "Must be a memcpy");
174    return MemcpyStrSrc;
175  }
176  Align getSrcAlign() const {
177    assert(isMemcpy() && "Must be a memcpy");
178    return SrcAlign;
179  }
180  bool isSrcAligned(Align AlignCheck) const {
181    return isMemset() || llvm::isAligned(AlignCheck, SrcAlign.value());
182  }
183  bool isDstAligned(Align AlignCheck) const {
184    return DstAlignCanChange || llvm::isAligned(AlignCheck, DstAlign.value());
185  }
186  bool isAligned(Align AlignCheck) const {
187    return isSrcAligned(AlignCheck) && isDstAligned(AlignCheck);
188  }
189};
190
191/// This base class for TargetLowering contains the SelectionDAG-independent
192/// parts that can be used from the rest of CodeGen.
193class TargetLoweringBase {
194public:
195  /// This enum indicates whether operations are valid for a target, and if not,
196  /// what action should be used to make them valid.
197  enum LegalizeAction : uint8_t {
198    Legal,      // The target natively supports this operation.
199    Promote,    // This operation should be executed in a larger type.
200    Expand,     // Try to expand this to other ops, otherwise use a libcall.
201    LibCall,    // Don't try to expand this to other ops, always use a libcall.
202    Custom      // Use the LowerOperation hook to implement custom lowering.
203  };
204
205  /// This enum indicates whether a types are legal for a target, and if not,
206  /// what action should be used to make them valid.
207  enum LegalizeTypeAction : uint8_t {
208    TypeLegal,           // The target natively supports this type.
209    TypePromoteInteger,  // Replace this integer with a larger one.
210    TypeExpandInteger,   // Split this integer into two of half the size.
211    TypeSoftenFloat,     // Convert this float to a same size integer type.
212    TypeExpandFloat,     // Split this float into two of half the size.
213    TypeScalarizeVector, // Replace this one-element vector with its element.
214    TypeSplitVector,     // Split this vector into two of half the size.
215    TypeWidenVector,     // This vector should be widened into a larger vector.
216    TypePromoteFloat,    // Replace this float with a larger one.
217    TypeSoftPromoteHalf, // Soften half to i16 and use float to do arithmetic.
218    TypeScalarizeScalableVector, // This action is explicitly left unimplemented.
219                                 // While it is theoretically possible to
220                                 // legalize operations on scalable types with a
221                                 // loop that handles the vscale * #lanes of the
222                                 // vector, this is non-trivial at SelectionDAG
223                                 // level and these types are better to be
224                                 // widened or promoted.
225  };
226
227  /// LegalizeKind holds the legalization kind that needs to happen to EVT
228  /// in order to type-legalize it.
229  using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
230
231  /// Enum that describes how the target represents true/false values.
232  enum BooleanContent {
233    UndefinedBooleanContent,    // Only bit 0 counts, the rest can hold garbage.
234    ZeroOrOneBooleanContent,        // All bits zero except for bit 0.
235    ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
236  };
237
238  /// Enum that describes what type of support for selects the target has.
239  enum SelectSupportKind {
240    ScalarValSelect,      // The target supports scalar selects (ex: cmov).
241    ScalarCondVectorVal,  // The target supports selects with a scalar condition
242                          // and vector values (ex: cmov).
243    VectorMaskSelect      // The target supports vector selects with a vector
244                          // mask (ex: x86 blends).
245  };
246
247  /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
248  /// to, if at all. Exists because different targets have different levels of
249  /// support for these atomic instructions, and also have different options
250  /// w.r.t. what they should expand to.
251  enum class AtomicExpansionKind {
252    None,    // Don't expand the instruction.
253    CastToInteger,    // Cast the atomic instruction to another type, e.g. from
254                      // floating-point to integer type.
255    LLSC,    // Expand the instruction into loadlinked/storeconditional; used
256             // by ARM/AArch64.
257    LLOnly,  // Expand the (load) instruction into just a load-linked, which has
258             // greater atomic guarantees than a normal load.
259    CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
260    MaskedIntrinsic,  // Use a target-specific intrinsic for the LL/SC loop.
261    BitTestIntrinsic, // Use a target-specific intrinsic for special bit
262                      // operations; used by X86.
263    CmpArithIntrinsic,// Use a target-specific intrinsic for special compare
264                      // operations; used by X86.
265    Expand,           // Generic expansion in terms of other atomic operations.
266
267    // Rewrite to a non-atomic form for use in a known non-preemptible
268    // environment.
269    NotAtomic
270  };
271
272  /// Enum that specifies when a multiplication should be expanded.
273  enum class MulExpansionKind {
274    Always,            // Always expand the instruction.
275    OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
276                       // or custom.
277  };
278
279  /// Enum that specifies when a float negation is beneficial.
280  enum class NegatibleCost {
281    Cheaper = 0,    // Negated expression is cheaper.
282    Neutral = 1,    // Negated expression has the same cost.
283    Expensive = 2   // Negated expression is more expensive.
284  };
285
286  /// Enum of different potentially desirable ways to fold (and/or (setcc ...),
287  /// (setcc ...)).
288  enum AndOrSETCCFoldKind : uint8_t {
289    None = 0,   // No fold is preferable.
290    AddAnd = 1, // Fold with `Add` op and `And` op is preferable.
291    NotAnd = 2, // Fold with `Not` op and `And` op is preferable.
292    ABS = 4,    // Fold with `llvm.abs` op is preferable.
293  };
294
295  class ArgListEntry {
296  public:
297    Value *Val = nullptr;
298    SDValue Node = SDValue();
299    Type *Ty = nullptr;
300    bool IsSExt : 1;
301    bool IsZExt : 1;
302    bool IsInReg : 1;
303    bool IsSRet : 1;
304    bool IsNest : 1;
305    bool IsByVal : 1;
306    bool IsByRef : 1;
307    bool IsInAlloca : 1;
308    bool IsPreallocated : 1;
309    bool IsReturned : 1;
310    bool IsSwiftSelf : 1;
311    bool IsSwiftAsync : 1;
312    bool IsSwiftError : 1;
313    bool IsCFGuardTarget : 1;
314    MaybeAlign Alignment = std::nullopt;
315    Type *IndirectType = nullptr;
316
317    ArgListEntry()
318        : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false),
319          IsNest(false), IsByVal(false), IsByRef(false), IsInAlloca(false),
320          IsPreallocated(false), IsReturned(false), IsSwiftSelf(false),
321          IsSwiftAsync(false), IsSwiftError(false), IsCFGuardTarget(false) {}
322
323    void setAttributes(const CallBase *Call, unsigned ArgIdx);
324  };
325  using ArgListTy = std::vector<ArgListEntry>;
326
327  virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
328                                     ArgListTy &Args) const {};
329
330  static ISD::NodeType getExtendForContent(BooleanContent Content) {
331    switch (Content) {
332    case UndefinedBooleanContent:
333      // Extend by adding rubbish bits.
334      return ISD::ANY_EXTEND;
335    case ZeroOrOneBooleanContent:
336      // Extend by adding zero bits.
337      return ISD::ZERO_EXTEND;
338    case ZeroOrNegativeOneBooleanContent:
339      // Extend by copying the sign bit.
340      return ISD::SIGN_EXTEND;
341    }
342    llvm_unreachable("Invalid content kind");
343  }
344
345  explicit TargetLoweringBase(const TargetMachine &TM);
346  TargetLoweringBase(const TargetLoweringBase &) = delete;
347  TargetLoweringBase &operator=(const TargetLoweringBase &) = delete;
348  virtual ~TargetLoweringBase() = default;
349
350  /// Return true if the target support strict float operation
351  bool isStrictFPEnabled() const {
352    return IsStrictFPEnabled;
353  }
354
355protected:
356  /// Initialize all of the actions to default values.
357  void initActions();
358
359public:
360  const TargetMachine &getTargetMachine() const { return TM; }
361
362  virtual bool useSoftFloat() const { return false; }
363
364  /// Return the pointer type for the given address space, defaults to
365  /// the pointer type from the data layout.
366  /// FIXME: The default needs to be removed once all the code is updated.
367  virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
368    return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
369  }
370
371  /// Return the in-memory pointer type for the given address space, defaults to
372  /// the pointer type from the data layout.
373  /// FIXME: The default needs to be removed once all the code is updated.
374  virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const {
375    return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
376  }
377
378  /// Return the type for frame index, which is determined by
379  /// the alloca address space specified through the data layout.
380  MVT getFrameIndexTy(const DataLayout &DL) const {
381    return getPointerTy(DL, DL.getAllocaAddrSpace());
382  }
383
384  /// Return the type for code pointers, which is determined by the program
385  /// address space specified through the data layout.
386  MVT getProgramPointerTy(const DataLayout &DL) const {
387    return getPointerTy(DL, DL.getProgramAddressSpace());
388  }
389
390  /// Return the type for operands of fence.
391  /// TODO: Let fence operands be of i32 type and remove this.
392  virtual MVT getFenceOperandTy(const DataLayout &DL) const {
393    return getPointerTy(DL);
394  }
395
396  /// Return the type to use for a scalar shift opcode, given the shifted amount
397  /// type. Targets should return a legal type if the input type is legal.
398  /// Targets can return a type that is too small if the input type is illegal.
399  virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
400
401  /// Returns the type for the shift amount of a shift opcode. For vectors,
402  /// returns the input type. For scalars, behavior depends on \p LegalTypes. If
403  /// \p LegalTypes is true, calls getScalarShiftAmountTy, otherwise uses
404  /// pointer type. If getScalarShiftAmountTy or pointer type cannot represent
405  /// all possible shift amounts, returns MVT::i32. In general, \p LegalTypes
406  /// should be set to true for calls during type legalization and after type
407  /// legalization has been completed.
408  EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
409                       bool LegalTypes = true) const;
410
411  /// Return the preferred type to use for a shift opcode, given the shifted
412  /// amount type is \p ShiftValueTy.
413  LLVM_READONLY
414  virtual LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const {
415    return ShiftValueTy;
416  }
417
418  /// Returns the type to be used for the index operand of:
419  /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
420  /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
421  virtual MVT getVectorIdxTy(const DataLayout &DL) const {
422    return getPointerTy(DL);
423  }
424
425  /// Returns the type to be used for the EVL/AVL operand of VP nodes:
426  /// ISD::VP_ADD, ISD::VP_SUB, etc. It must be a legal scalar integer type,
427  /// and must be at least as large as i32. The EVL is implicitly zero-extended
428  /// to any larger type.
429  virtual MVT getVPExplicitVectorLengthTy() const { return MVT::i32; }
430
431  /// This callback is used to inspect load/store instructions and add
432  /// target-specific MachineMemOperand flags to them.  The default
433  /// implementation does nothing.
434  virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const {
435    return MachineMemOperand::MONone;
436  }
437
438  /// This callback is used to inspect load/store SDNode.
439  /// The default implementation does nothing.
440  virtual MachineMemOperand::Flags
441  getTargetMMOFlags(const MemSDNode &Node) const {
442    return MachineMemOperand::MONone;
443  }
444
445  MachineMemOperand::Flags
446  getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL,
447                         AssumptionCache *AC = nullptr,
448                         const TargetLibraryInfo *LibInfo = nullptr) const;
449  MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI,
450                                                   const DataLayout &DL) const;
451  MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI,
452                                                    const DataLayout &DL) const;
453
454  virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
455    return true;
456  }
457
458  /// Return true if the @llvm.get.active.lane.mask intrinsic should be expanded
459  /// using generic code in SelectionDAGBuilder.
460  virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const {
461    return true;
462  }
463
464  virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF,
465                                           bool IsScalable) const {
466    return true;
467  }
468
469  /// Return true if the @llvm.experimental.cttz.elts intrinsic should be
470  /// expanded using generic code in SelectionDAGBuilder.
471  virtual bool shouldExpandCttzElements(EVT VT) const { return true; }
472
473  // Return true if op(vecreduce(x), vecreduce(y)) should be reassociated to
474  // vecreduce(op(x, y)) for the reduction opcode RedOpc.
475  virtual bool shouldReassociateReduction(unsigned RedOpc, EVT VT) const {
476    return true;
477  }
478
479  /// Return true if it is profitable to convert a select of FP constants into
480  /// a constant pool load whose address depends on the select condition. The
481  /// parameter may be used to differentiate a select with FP compare from
482  /// integer compare.
483  virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
484    return true;
485  }
486
487  /// Return true if multiple condition registers are available.
488  bool hasMultipleConditionRegisters() const {
489    return HasMultipleConditionRegisters;
490  }
491
492  /// Return true if the target has BitExtract instructions.
493  bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
494
495  /// Return the preferred vector type legalization action.
496  virtual TargetLoweringBase::LegalizeTypeAction
497  getPreferredVectorAction(MVT VT) const {
498    // The default action for one element vectors is to scalarize
499    if (VT.getVectorElementCount().isScalar())
500      return TypeScalarizeVector;
501    // The default action for an odd-width vector is to widen.
502    if (!VT.isPow2VectorType())
503      return TypeWidenVector;
504    // The default action for other vectors is to promote
505    return TypePromoteInteger;
506  }
507
508  // Return true if the half type should be passed around as i16, but promoted
509  // to float around arithmetic. The default behavior is to pass around as
510  // float and convert around loads/stores/bitcasts and other places where
511  // the size matters.
512  virtual bool softPromoteHalfType() const { return false; }
513
514  // There are two general methods for expanding a BUILD_VECTOR node:
515  //  1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
516  //     them together.
517  //  2. Build the vector on the stack and then load it.
518  // If this function returns true, then method (1) will be used, subject to
519  // the constraint that all of the necessary shuffles are legal (as determined
520  // by isShuffleMaskLegal). If this function returns false, then method (2) is
521  // always used. The vector type, and the number of defined values, are
522  // provided.
523  virtual bool
524  shouldExpandBuildVectorWithShuffles(EVT /* VT */,
525                                      unsigned DefinedValues) const {
526    return DefinedValues < 3;
527  }
528
529  /// Return true if integer divide is usually cheaper than a sequence of
530  /// several shifts, adds, and multiplies for this target.
531  /// The definition of "cheaper" may depend on whether we're optimizing
532  /// for speed or for size.
533  virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
534
535  /// Return true if the target can handle a standalone remainder operation.
536  virtual bool hasStandaloneRem(EVT VT) const {
537    return true;
538  }
539
540  /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
541  virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
542    // Default behavior is to replace SQRT(X) with X*RSQRT(X).
543    return false;
544  }
545
546  /// Reciprocal estimate status values used by the functions below.
547  enum ReciprocalEstimate : int {
548    Unspecified = -1,
549    Disabled = 0,
550    Enabled = 1
551  };
552
553  /// Return a ReciprocalEstimate enum value for a square root of the given type
554  /// based on the function's attributes. If the operation is not overridden by
555  /// the function's attributes, "Unspecified" is returned and target defaults
556  /// are expected to be used for instruction selection.
557  int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;
558
559  /// Return a ReciprocalEstimate enum value for a division of the given type
560  /// based on the function's attributes. If the operation is not overridden by
561  /// the function's attributes, "Unspecified" is returned and target defaults
562  /// are expected to be used for instruction selection.
563  int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;
564
565  /// Return the refinement step count for a square root of the given type based
566  /// on the function's attributes. If the operation is not overridden by
567  /// the function's attributes, "Unspecified" is returned and target defaults
568  /// are expected to be used for instruction selection.
569  int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
570
571  /// Return the refinement step count for a division of the given type based
572  /// on the function's attributes. If the operation is not overridden by
573  /// the function's attributes, "Unspecified" is returned and target defaults
574  /// are expected to be used for instruction selection.
575  int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
576
577  /// Returns true if target has indicated at least one type should be bypassed.
578  bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
579
580  /// Returns map of slow types for division or remainder with corresponding
581  /// fast types
582  const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
583    return BypassSlowDivWidths;
584  }
585
586  /// Return true only if vscale must be a power of two.
587  virtual bool isVScaleKnownToBeAPowerOfTwo() const { return false; }
588
589  /// Return true if Flow Control is an expensive operation that should be
590  /// avoided.
591  bool isJumpExpensive() const { return JumpIsExpensive; }
592
593  /// Return true if selects are only cheaper than branches if the branch is
594  /// unlikely to be predicted right.
595  bool isPredictableSelectExpensive() const {
596    return PredictableSelectIsExpensive;
597  }
598
599  virtual bool fallBackToDAGISel(const Instruction &Inst) const {
600    return false;
601  }
602
603  /// Return true if the following transform is beneficial:
604  /// fold (conv (load x)) -> (load (conv*)x)
605  /// On architectures that don't natively support some vector loads
606  /// efficiently, casting the load to a smaller vector of larger types and
607  /// loading is more efficient, however, this can be undone by optimizations in
608  /// dag combiner.
609  virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
610                                       const SelectionDAG &DAG,
611                                       const MachineMemOperand &MMO) const;
612
613  /// Return true if the following transform is beneficial:
614  /// (store (y (conv x)), y*)) -> (store x, (x*))
615  virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT,
616                                        const SelectionDAG &DAG,
617                                        const MachineMemOperand &MMO) const {
618    // Default to the same logic as loads.
619    return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO);
620  }
621
622  /// Return true if it is expected to be cheaper to do a store of vector
623  /// constant with the given size and type for the address space than to
624  /// store the individual scalar element constants.
625  virtual bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT,
626                                            unsigned NumElem,
627                                            unsigned AddrSpace) const {
628    return IsZero;
629  }
630
631  /// Allow store merging for the specified type after legalization in addition
632  /// to before legalization. This may transform stores that do not exist
633  /// earlier (for example, stores created from intrinsics).
634  virtual bool mergeStoresAfterLegalization(EVT MemVT) const {
635    return true;
636  }
637
638  /// Returns if it's reasonable to merge stores to MemVT size.
639  virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
640                                const MachineFunction &MF) const {
641    return true;
642  }
643
644  /// Return true if it is cheap to speculate a call to intrinsic cttz.
645  virtual bool isCheapToSpeculateCttz(Type *Ty) const {
646    return false;
647  }
648
649  /// Return true if it is cheap to speculate a call to intrinsic ctlz.
650  virtual bool isCheapToSpeculateCtlz(Type *Ty) const {
651    return false;
652  }
653
654  /// Return true if ctlz instruction is fast.
655  virtual bool isCtlzFast() const {
656    return false;
657  }
658
659  /// Return true if ctpop instruction is fast.
660  virtual bool isCtpopFast(EVT VT) const {
661    return isOperationLegal(ISD::CTPOP, VT);
662  }
663
664  /// Return the maximum number of "x & (x - 1)" operations that can be done
665  /// instead of deferring to a custom CTPOP.
666  virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const {
667    return 1;
668  }
669
670  /// Return true if instruction generated for equality comparison is folded
671  /// with instruction generated for signed comparison.
672  virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; }
673
674  /// Return true if the heuristic to prefer icmp eq zero should be used in code
675  /// gen prepare.
676  virtual bool preferZeroCompareBranch() const { return false; }
677
678  /// Return true if it is cheaper to split the store of a merged int val
679  /// from a pair of smaller values into multiple stores.
680  virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
681    return false;
682  }
683
684  /// Return if the target supports combining a
685  /// chain like:
686  /// \code
687  ///   %andResult = and %val1, #mask
688  ///   %icmpResult = icmp %andResult, 0
689  /// \endcode
690  /// into a single machine instruction of a form like:
691  /// \code
692  ///   cc = test %register, #mask
693  /// \endcode
694  virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
695    return false;
696  }
697
698  /// Return true if it is valid to merge the TargetMMOFlags in two SDNodes.
699  virtual bool
700  areTwoSDNodeTargetMMOFlagsMergeable(const MemSDNode &NodeX,
701                                      const MemSDNode &NodeY) const {
702    return true;
703  }
704
705  /// Use bitwise logic to make pairs of compares more efficient. For example:
706  /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
707  /// This should be true when it takes more than one instruction to lower
708  /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
709  /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
710  virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
711    return false;
712  }
713
714  /// Return the preferred operand type if the target has a quick way to compare
715  /// integer values of the given size. Assume that any legal integer type can
716  /// be compared efficiently. Targets may override this to allow illegal wide
717  /// types to return a vector type if there is support to compare that type.
718  virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
719    MVT VT = MVT::getIntegerVT(NumBits);
720    return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE;
721  }
722
723  /// Return true if the target should transform:
724  /// (X & Y) == Y ---> (~X & Y) == 0
725  /// (X & Y) != Y ---> (~X & Y) != 0
726  ///
727  /// This may be profitable if the target has a bitwise and-not operation that
728  /// sets comparison flags. A target may want to limit the transformation based
729  /// on the type of Y or if Y is a constant.
730  ///
731  /// Note that the transform will not occur if Y is known to be a power-of-2
732  /// because a mask and compare of a single bit can be handled by inverting the
733  /// predicate, for example:
734  /// (X & 8) == 8 ---> (X & 8) != 0
735  virtual bool hasAndNotCompare(SDValue Y) const {
736    return false;
737  }
738
739  /// Return true if the target has a bitwise and-not operation:
740  /// X = ~A & B
741  /// This can be used to simplify select or other instructions.
742  virtual bool hasAndNot(SDValue X) const {
743    // If the target has the more complex version of this operation, assume that
744    // it has this operation too.
745    return hasAndNotCompare(X);
746  }
747
748  /// Return true if the target has a bit-test instruction:
749  ///   (X & (1 << Y)) ==/!= 0
750  /// This knowledge can be used to prevent breaking the pattern,
751  /// or creating it if it could be recognized.
752  virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; }
753
754  /// There are two ways to clear extreme bits (either low or high):
755  /// Mask:    x &  (-1 << y)  (the instcombine canonical form)
756  /// Shifts:  x >> y << y
757  /// Return true if the variant with 2 variable shifts is preferred.
758  /// Return false if there is no preference.
759  virtual bool shouldFoldMaskToVariableShiftPair(SDValue X) const {
760    // By default, let's assume that no one prefers shifts.
761    return false;
762  }
763
764  /// Return true if it is profitable to fold a pair of shifts into a mask.
765  /// This is usually true on most targets. But some targets, like Thumb1,
766  /// have immediate shift instructions, but no immediate "and" instruction;
767  /// this makes the fold unprofitable.
768  virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N,
769                                                 CombineLevel Level) const {
770    return true;
771  }
772
773  /// Should we tranform the IR-optimal check for whether given truncation
774  /// down into KeptBits would be truncating or not:
775  ///   (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
776  /// Into it's more traditional form:
777  ///   ((%x << C) a>> C) dstcond %x
778  /// Return true if we should transform.
779  /// Return false if there is no preference.
780  virtual bool shouldTransformSignedTruncationCheck(EVT XVT,
781                                                    unsigned KeptBits) const {
782    // By default, let's assume that no one prefers shifts.
783    return false;
784  }
785
786  /// Given the pattern
787  ///   (X & (C l>>/<< Y)) ==/!= 0
788  /// return true if it should be transformed into:
789  ///   ((X <</l>> Y) & C) ==/!= 0
790  /// WARNING: if 'X' is a constant, the fold may deadlock!
791  /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat()
792  ///        here because it can end up being not linked in.
793  virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
794      SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
795      unsigned OldShiftOpcode, unsigned NewShiftOpcode,
796      SelectionDAG &DAG) const {
797    if (hasBitTest(X, Y)) {
798      // One interesting pattern that we'd want to form is 'bit test':
799      //   ((1 << Y) & C) ==/!= 0
800      // But we also need to be careful not to try to reverse that fold.
801
802      // Is this '1 << Y' ?
803      if (OldShiftOpcode == ISD::SHL && CC->isOne())
804        return false; // Keep the 'bit test' pattern.
805
806      // Will it be '1 << Y' after the transform ?
807      if (XC && NewShiftOpcode == ISD::SHL && XC->isOne())
808        return true; // Do form the 'bit test' pattern.
809    }
810
811    // If 'X' is a constant, and we transform, then we will immediately
812    // try to undo the fold, thus causing endless combine loop.
813    // So by default, let's assume everyone prefers the fold
814    // iff 'X' is not a constant.
815    return !XC;
816  }
817
818  // Return true if its desirable to perform the following transform:
819  // (fmul C, (uitofp Pow2))
820  //     -> (bitcast_to_FP (add (bitcast_to_INT C), Log2(Pow2) << mantissa))
821  // (fdiv C, (uitofp Pow2))
822  //     -> (bitcast_to_FP (sub (bitcast_to_INT C), Log2(Pow2) << mantissa))
823  //
824  // This is only queried after we have verified the transform will be bitwise
825  // equals.
826  //
827  // SDNode *N      : The FDiv/FMul node we want to transform.
828  // SDValue FPConst: The Float constant operand in `N`.
829  // SDValue IntPow2: The Integer power of 2 operand in `N`.
830  virtual bool optimizeFMulOrFDivAsShiftAddBitcast(SDNode *N, SDValue FPConst,
831                                                   SDValue IntPow2) const {
832    // Default to avoiding fdiv which is often very expensive.
833    return N->getOpcode() == ISD::FDIV;
834  }
835
836  // Given:
837  //    (icmp eq/ne (and X, C0), (shift X, C1))
838  // or
839  //    (icmp eq/ne X, (rotate X, CPow2))
840
841  // If C0 is a mask or shifted mask and the shift amt (C1) isolates the
842  // remaining bits (i.e something like `(x64 & UINT32_MAX) == (x64 >> 32)`)
843  // Do we prefer the shift to be shift-right, shift-left, or rotate.
844  // Note: Its only valid to convert the rotate version to the shift version iff
845  // the shift-amt (`C1`) is a power of 2 (including 0).
846  // If ShiftOpc (current Opcode) is returned, do nothing.
847  virtual unsigned preferedOpcodeForCmpEqPiecesOfOperand(
848      EVT VT, unsigned ShiftOpc, bool MayTransformRotate,
849      const APInt &ShiftOrRotateAmt,
850      const std::optional<APInt> &AndMask) const {
851    return ShiftOpc;
852  }
853
854  /// These two forms are equivalent:
855  ///   sub %y, (xor %x, -1)
856  ///   add (add %x, 1), %y
857  /// The variant with two add's is IR-canonical.
858  /// Some targets may prefer one to the other.
859  virtual bool preferIncOfAddToSubOfNot(EVT VT) const {
860    // By default, let's assume that everyone prefers the form with two add's.
861    return true;
862  }
863
864  // By default prefer folding (abs (sub nsw x, y)) -> abds(x, y). Some targets
865  // may want to avoid this to prevent loss of sub_nsw pattern.
866  virtual bool preferABDSToABSWithNSW(EVT VT) const {
867    return true;
868  }
869
870  // Return true if the target wants to transform Op(Splat(X)) -> Splat(Op(X))
871  virtual bool preferScalarizeSplat(SDNode *N) const { return true; }
872
873  // Return true if the target wants to transform:
874  // (TruncVT truncate(sext_in_reg(VT X, ExtVT))
875  //  -> (TruncVT sext_in_reg(truncate(VT X), ExtVT))
876  // Some targets might prefer pre-sextinreg to improve truncation/saturation.
877  virtual bool preferSextInRegOfTruncate(EVT TruncVT, EVT VT, EVT ExtVT) const {
878    return true;
879  }
880
881  /// Return true if the target wants to use the optimization that
882  /// turns ext(promotableInst1(...(promotableInstN(load)))) into
883  /// promotedInst1(...(promotedInstN(ext(load)))).
884  bool enableExtLdPromotion() const { return EnableExtLdPromotion; }
885
886  /// Return true if the target can combine store(extractelement VectorTy,
887  /// Idx).
888  /// \p Cost[out] gives the cost of that transformation when this is true.
889  virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
890                                         unsigned &Cost) const {
891    return false;
892  }
893
894  /// Return true if the target shall perform extract vector element and store
895  /// given that the vector is known to be splat of constant.
896  /// \p Index[out] gives the index of the vector element to be extracted when
897  /// this is true.
898  virtual bool shallExtractConstSplatVectorElementToStore(
899      Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const {
900    return false;
901  }
902
903  /// Return true if inserting a scalar into a variable element of an undef
904  /// vector is more efficiently handled by splatting the scalar instead.
905  virtual bool shouldSplatInsEltVarIndex(EVT) const {
906    return false;
907  }
908
909  /// Return true if target always benefits from combining into FMA for a
910  /// given value type. This must typically return false on targets where FMA
911  /// takes more cycles to execute than FADD.
912  virtual bool enableAggressiveFMAFusion(EVT VT) const { return false; }
913
914  /// Return true if target always benefits from combining into FMA for a
915  /// given value type. This must typically return false on targets where FMA
916  /// takes more cycles to execute than FADD.
917  virtual bool enableAggressiveFMAFusion(LLT Ty) const { return false; }
918
919  /// Return the ValueType of the result of SETCC operations.
920  virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
921                                 EVT VT) const;
922
923  /// Return the ValueType for comparison libcalls. Comparison libcalls include
924  /// floating point comparison calls, and Ordered/Unordered check calls on
925  /// floating point numbers.
926  virtual
927  MVT::SimpleValueType getCmpLibcallReturnType() const;
928
929  /// For targets without i1 registers, this gives the nature of the high-bits
930  /// of boolean values held in types wider than i1.
931  ///
932  /// "Boolean values" are special true/false values produced by nodes like
933  /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
934  /// Not to be confused with general values promoted from i1.  Some cpus
935  /// distinguish between vectors of boolean and scalars; the isVec parameter
936  /// selects between the two kinds.  For example on X86 a scalar boolean should
937  /// be zero extended from i1, while the elements of a vector of booleans
938  /// should be sign extended from i1.
939  ///
940  /// Some cpus also treat floating point types the same way as they treat
941  /// vectors instead of the way they treat scalars.
942  BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
943    if (isVec)
944      return BooleanVectorContents;
945    return isFloat ? BooleanFloatContents : BooleanContents;
946  }
947
948  BooleanContent getBooleanContents(EVT Type) const {
949    return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
950  }
951
952  /// Promote the given target boolean to a target boolean of the given type.
953  /// A target boolean is an integer value, not necessarily of type i1, the bits
954  /// of which conform to getBooleanContents.
955  ///
956  /// ValVT is the type of values that produced the boolean.
957  SDValue promoteTargetBoolean(SelectionDAG &DAG, SDValue Bool,
958                               EVT ValVT) const {
959    SDLoc dl(Bool);
960    EVT BoolVT =
961        getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ValVT);
962    ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(ValVT));
963    return DAG.getNode(ExtendCode, dl, BoolVT, Bool);
964  }
965
966  /// Return target scheduling preference.
967  Sched::Preference getSchedulingPreference() const {
968    return SchedPreferenceInfo;
969  }
970
971  /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
972  /// for different nodes. This function returns the preference (or none) for
973  /// the given node.
974  virtual Sched::Preference getSchedulingPreference(SDNode *) const {
975    return Sched::None;
976  }
977
978  /// Return the register class that should be used for the specified value
979  /// type.
980  virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const {
981    (void)isDivergent;
982    const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
983    assert(RC && "This value type is not natively supported!");
984    return RC;
985  }
986
987  /// Allows target to decide about the register class of the
988  /// specific value that is live outside the defining block.
989  /// Returns true if the value needs uniform register class.
990  virtual bool requiresUniformRegister(MachineFunction &MF,
991                                       const Value *) const {
992    return false;
993  }
994
995  /// Return the 'representative' register class for the specified value
996  /// type.
997  ///
998  /// The 'representative' register class is the largest legal super-reg
999  /// register class for the register class of the value type.  For example, on
1000  /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
1001  /// register class is GR64 on x86_64.
1002  virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
1003    const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
1004    return RC;
1005  }
1006
1007  /// Return the cost of the 'representative' register class for the specified
1008  /// value type.
1009  virtual uint8_t getRepRegClassCostFor(MVT VT) const {
1010    return RepRegClassCostForVT[VT.SimpleTy];
1011  }
1012
1013  /// Return the preferred strategy to legalize tihs SHIFT instruction, with
1014  /// \p ExpansionFactor being the recursion depth - how many expansion needed.
1015  enum class ShiftLegalizationStrategy {
1016    ExpandToParts,
1017    ExpandThroughStack,
1018    LowerToLibcall
1019  };
1020  virtual ShiftLegalizationStrategy
1021  preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N,
1022                                     unsigned ExpansionFactor) const {
1023    if (ExpansionFactor == 1)
1024      return ShiftLegalizationStrategy::ExpandToParts;
1025    return ShiftLegalizationStrategy::ExpandThroughStack;
1026  }
1027
1028  /// Return true if the target has native support for the specified value type.
1029  /// This means that it has a register that directly holds it without
1030  /// promotions or expansions.
1031  bool isTypeLegal(EVT VT) const {
1032    assert(!VT.isSimple() ||
1033           (unsigned)VT.getSimpleVT().SimpleTy < std::size(RegClassForVT));
1034    return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
1035  }
1036
1037  class ValueTypeActionImpl {
1038    /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
1039    /// that indicates how instruction selection should deal with the type.
1040    LegalizeTypeAction ValueTypeActions[MVT::VALUETYPE_SIZE];
1041
1042  public:
1043    ValueTypeActionImpl() {
1044      std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
1045                TypeLegal);
1046    }
1047
1048    LegalizeTypeAction getTypeAction(MVT VT) const {
1049      return ValueTypeActions[VT.SimpleTy];
1050    }
1051
1052    void setTypeAction(MVT VT, LegalizeTypeAction Action) {
1053      ValueTypeActions[VT.SimpleTy] = Action;
1054    }
1055  };
1056
1057  const ValueTypeActionImpl &getValueTypeActions() const {
1058    return ValueTypeActions;
1059  }
1060
1061  /// Return pair that represents the legalization kind (first) that needs to
1062  /// happen to EVT (second) in order to type-legalize it.
1063  ///
1064  /// First: how we should legalize values of this type, either it is already
1065  /// legal (return 'Legal') or we need to promote it to a larger type (return
1066  /// 'Promote'), or we need to expand it into multiple registers of smaller
1067  /// integer type (return 'Expand').  'Custom' is not an option.
1068  ///
1069  /// Second: for types supported by the target, this is an identity function.
1070  /// For types that must be promoted to larger types, this returns the larger
1071  /// type to promote to.  For integer types that are larger than the largest
1072  /// integer register, this contains one step in the expansion to get to the
1073  /// smaller register. For illegal floating point types, this returns the
1074  /// integer type to transform to.
1075  LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
1076
1077  /// Return how we should legalize values of this type, either it is already
1078  /// legal (return 'Legal') or we need to promote it to a larger type (return
1079  /// 'Promote'), or we need to expand it into multiple registers of smaller
1080  /// integer type (return 'Expand').  'Custom' is not an option.
1081  LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
1082    return getTypeConversion(Context, VT).first;
1083  }
1084  LegalizeTypeAction getTypeAction(MVT VT) const {
1085    return ValueTypeActions.getTypeAction(VT);
1086  }
1087
1088  /// For types supported by the target, this is an identity function.  For
1089  /// types that must be promoted to larger types, this returns the larger type
1090  /// to promote to.  For integer types that are larger than the largest integer
1091  /// register, this contains one step in the expansion to get to the smaller
1092  /// register. For illegal floating point types, this returns the integer type
1093  /// to transform to.
1094  virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
1095    return getTypeConversion(Context, VT).second;
1096  }
1097
1098  /// For types supported by the target, this is an identity function.  For
1099  /// types that must be expanded (i.e. integer types that are larger than the
1100  /// largest integer register or illegal floating point types), this returns
1101  /// the largest legal type it will be expanded to.
1102  EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
1103    assert(!VT.isVector());
1104    while (true) {
1105      switch (getTypeAction(Context, VT)) {
1106      case TypeLegal:
1107        return VT;
1108      case TypeExpandInteger:
1109        VT = getTypeToTransformTo(Context, VT);
1110        break;
1111      default:
1112        llvm_unreachable("Type is not legal nor is it to be expanded!");
1113      }
1114    }
1115  }
1116
1117  /// Vector types are broken down into some number of legal first class types.
1118  /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
1119  /// promoted EVT::f64 values with the X86 FP stack.  Similarly, EVT::v2i64
1120  /// turns into 4 EVT::i32 values with both PPC and X86.
1121  ///
1122  /// This method returns the number of registers needed, and the VT for each
1123  /// register.  It also returns the VT and quantity of the intermediate values
1124  /// before they are promoted/expanded.
1125  unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
1126                                  EVT &IntermediateVT,
1127                                  unsigned &NumIntermediates,
1128                                  MVT &RegisterVT) const;
1129
1130  /// Certain targets such as MIPS require that some types such as vectors are
1131  /// always broken down into scalars in some contexts. This occurs even if the
1132  /// vector type is legal.
1133  virtual unsigned getVectorTypeBreakdownForCallingConv(
1134      LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
1135      unsigned &NumIntermediates, MVT &RegisterVT) const {
1136    return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
1137                                  RegisterVT);
1138  }
1139
1140  struct IntrinsicInfo {
1141    unsigned     opc = 0;          // target opcode
1142    EVT          memVT;            // memory VT
1143
1144    // value representing memory location
1145    PointerUnion<const Value *, const PseudoSourceValue *> ptrVal;
1146
1147    // Fallback address space for use if ptrVal is nullptr. std::nullopt means
1148    // unknown address space.
1149    std::optional<unsigned> fallbackAddressSpace;
1150
1151    int          offset = 0;       // offset off of ptrVal
1152    uint64_t     size = 0;         // the size of the memory location
1153                                   // (taken from memVT if zero)
1154    MaybeAlign align = Align(1);   // alignment
1155
1156    MachineMemOperand::Flags flags = MachineMemOperand::MONone;
1157    IntrinsicInfo() = default;
1158  };
1159
1160  /// Given an intrinsic, checks if on the target the intrinsic will need to map
1161  /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
1162  /// true and store the intrinsic information into the IntrinsicInfo that was
1163  /// passed to the function.
1164  virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
1165                                  MachineFunction &,
1166                                  unsigned /*Intrinsic*/) const {
1167    return false;
1168  }
1169
1170  /// Returns true if the target can instruction select the specified FP
1171  /// immediate natively. If false, the legalizer will materialize the FP
1172  /// immediate as a load from a constant pool.
1173  virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/,
1174                            bool ForCodeSize = false) const {
1175    return false;
1176  }
1177
1178  /// Targets can use this to indicate that they only support *some*
1179  /// VECTOR_SHUFFLE operations, those with specific masks.  By default, if a
1180  /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
1181  /// legal.
1182  virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
1183    return true;
1184  }
1185
1186  /// Returns true if the operation can trap for the value type.
1187  ///
1188  /// VT must be a legal type. By default, we optimistically assume most
1189  /// operations don't trap except for integer divide and remainder.
1190  virtual bool canOpTrap(unsigned Op, EVT VT) const;
1191
1192  /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
1193  /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
1194  /// constant pool entry.
1195  virtual bool isVectorClearMaskLegal(ArrayRef<int> /*Mask*/,
1196                                      EVT /*VT*/) const {
1197    return false;
1198  }
1199
1200  /// How to legalize this custom operation?
1201  virtual LegalizeAction getCustomOperationAction(SDNode &Op) const {
1202    return Legal;
1203  }
1204
1205  /// Return how this operation should be treated: either it is legal, needs to
1206  /// be promoted to a larger size, needs to be expanded to some other code
1207  /// sequence, or the target has a custom expander for it.
1208  LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
1209    if (VT.isExtended()) return Expand;
1210    // If a target-specific SDNode requires legalization, require the target
1211    // to provide custom legalization for it.
1212    if (Op >= std::size(OpActions[0]))
1213      return Custom;
1214    return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
1215  }
1216
1217  /// Custom method defined by each target to indicate if an operation which
1218  /// may require a scale is supported natively by the target.
1219  /// If not, the operation is illegal.
1220  virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT,
1221                                              unsigned Scale) const {
1222    return false;
1223  }
1224
1225  /// Some fixed point operations may be natively supported by the target but
1226  /// only for specific scales. This method allows for checking
1227  /// if the width is supported by the target for a given operation that may
1228  /// depend on scale.
1229  LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT,
1230                                              unsigned Scale) const {
1231    auto Action = getOperationAction(Op, VT);
1232    if (Action != Legal)
1233      return Action;
1234
1235    // This operation is supported in this type but may only work on specific
1236    // scales.
1237    bool Supported;
1238    switch (Op) {
1239    default:
1240      llvm_unreachable("Unexpected fixed point operation.");
1241    case ISD::SMULFIX:
1242    case ISD::SMULFIXSAT:
1243    case ISD::UMULFIX:
1244    case ISD::UMULFIXSAT:
1245    case ISD::SDIVFIX:
1246    case ISD::SDIVFIXSAT:
1247    case ISD::UDIVFIX:
1248    case ISD::UDIVFIXSAT:
1249      Supported = isSupportedFixedPointOperation(Op, VT, Scale);
1250      break;
1251    }
1252
1253    return Supported ? Action : Expand;
1254  }
1255
1256  // If Op is a strict floating-point operation, return the result
1257  // of getOperationAction for the equivalent non-strict operation.
1258  LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const {
1259    unsigned EqOpc;
1260    switch (Op) {
1261      default: llvm_unreachable("Unexpected FP pseudo-opcode");
1262#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
1263      case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break;
1264#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
1265      case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break;
1266#include "llvm/IR/ConstrainedOps.def"
1267    }
1268
1269    return getOperationAction(EqOpc, VT);
1270  }
1271
1272  /// Return true if the specified operation is legal on this target or can be
1273  /// made legal with custom lowering. This is used to help guide high-level
1274  /// lowering decisions. LegalOnly is an optional convenience for code paths
1275  /// traversed pre and post legalisation.
1276  bool isOperationLegalOrCustom(unsigned Op, EVT VT,
1277                                bool LegalOnly = false) const {
1278    if (LegalOnly)
1279      return isOperationLegal(Op, VT);
1280
1281    return (VT == MVT::Other || isTypeLegal(VT)) &&
1282      (getOperationAction(Op, VT) == Legal ||
1283       getOperationAction(Op, VT) == Custom);
1284  }
1285
1286  /// Return true if the specified operation is legal on this target or can be
1287  /// made legal using promotion. This is used to help guide high-level lowering
1288  /// decisions. LegalOnly is an optional convenience for code paths traversed
1289  /// pre and post legalisation.
1290  bool isOperationLegalOrPromote(unsigned Op, EVT VT,
1291                                 bool LegalOnly = false) const {
1292    if (LegalOnly)
1293      return isOperationLegal(Op, VT);
1294
1295    return (VT == MVT::Other || isTypeLegal(VT)) &&
1296      (getOperationAction(Op, VT) == Legal ||
1297       getOperationAction(Op, VT) == Promote);
1298  }
1299
1300  /// Return true if the specified operation is legal on this target or can be
1301  /// made legal with custom lowering or using promotion. This is used to help
1302  /// guide high-level lowering decisions. LegalOnly is an optional convenience
1303  /// for code paths traversed pre and post legalisation.
1304  bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT,
1305                                         bool LegalOnly = false) const {
1306    if (LegalOnly)
1307      return isOperationLegal(Op, VT);
1308
1309    return (VT == MVT::Other || isTypeLegal(VT)) &&
1310      (getOperationAction(Op, VT) == Legal ||
1311       getOperationAction(Op, VT) == Custom ||
1312       getOperationAction(Op, VT) == Promote);
1313  }
1314
1315  /// Return true if the operation uses custom lowering, regardless of whether
1316  /// the type is legal or not.
1317  bool isOperationCustom(unsigned Op, EVT VT) const {
1318    return getOperationAction(Op, VT) == Custom;
1319  }
1320
1321  /// Return true if lowering to a jump table is allowed.
1322  virtual bool areJTsAllowed(const Function *Fn) const {
1323    if (Fn->getFnAttribute("no-jump-tables").getValueAsBool())
1324      return false;
1325
1326    return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1327           isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
1328  }
1329
1330  /// Check whether the range [Low,High] fits in a machine word.
1331  bool rangeFitsInWord(const APInt &Low, const APInt &High,
1332                       const DataLayout &DL) const {
1333    // FIXME: Using the pointer type doesn't seem ideal.
1334    uint64_t BW = DL.getIndexSizeInBits(0u);
1335    uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
1336    return Range <= BW;
1337  }
1338
1339  /// Return true if lowering to a jump table is suitable for a set of case
1340  /// clusters which may contain \p NumCases cases, \p Range range of values.
1341  virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
1342                                      uint64_t Range, ProfileSummaryInfo *PSI,
1343                                      BlockFrequencyInfo *BFI) const;
1344
1345  /// Returns preferred type for switch condition.
1346  virtual MVT getPreferredSwitchConditionType(LLVMContext &Context,
1347                                              EVT ConditionVT) const;
1348
1349  /// Return true if lowering to a bit test is suitable for a set of case
1350  /// clusters which contains \p NumDests unique destinations, \p Low and
1351  /// \p High as its lowest and highest case values, and expects \p NumCmps
1352  /// case value comparisons. Check if the number of destinations, comparison
1353  /// metric, and range are all suitable.
1354  bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
1355                             const APInt &Low, const APInt &High,
1356                             const DataLayout &DL) const {
1357    // FIXME: I don't think NumCmps is the correct metric: a single case and a
1358    // range of cases both require only one branch to lower. Just looking at the
1359    // number of clusters and destinations should be enough to decide whether to
1360    // build bit tests.
1361
1362    // To lower a range with bit tests, the range must fit the bitwidth of a
1363    // machine word.
1364    if (!rangeFitsInWord(Low, High, DL))
1365      return false;
1366
1367    // Decide whether it's profitable to lower this range with bit tests. Each
1368    // destination requires a bit test and branch, and there is an overall range
1369    // check branch. For a small number of clusters, separate comparisons might
1370    // be cheaper, and for many destinations, splitting the range might be
1371    // better.
1372    return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
1373           (NumDests == 3 && NumCmps >= 6);
1374  }
1375
1376  /// Return true if the specified operation is illegal on this target or
1377  /// unlikely to be made legal with custom lowering. This is used to help guide
1378  /// high-level lowering decisions.
1379  bool isOperationExpand(unsigned Op, EVT VT) const {
1380    return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
1381  }
1382
1383  /// Return true if the specified operation is legal on this target.
1384  bool isOperationLegal(unsigned Op, EVT VT) const {
1385    return (VT == MVT::Other || isTypeLegal(VT)) &&
1386           getOperationAction(Op, VT) == Legal;
1387  }
1388
1389  /// Return how this load with extension should be treated: either it is legal,
1390  /// needs to be promoted to a larger size, needs to be expanded to some other
1391  /// code sequence, or the target has a custom expander for it.
1392  LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
1393                                  EVT MemVT) const {
1394    if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1395    unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1396    unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1397    assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE &&
1398           MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!");
1399    unsigned Shift = 4 * ExtType;
1400    return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
1401  }
1402
1403  /// Return true if the specified load with extension is legal on this target.
1404  bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1405    return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
1406  }
1407
1408  /// Return true if the specified load with extension is legal or custom
1409  /// on this target.
1410  bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1411    return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
1412           getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
1413  }
1414
1415  /// Return how this store with truncation should be treated: either it is
1416  /// legal, needs to be promoted to a larger size, needs to be expanded to some
1417  /// other code sequence, or the target has a custom expander for it.
1418  LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
1419    if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1420    unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1421    unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1422    assert(ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE &&
1423           "Table isn't big enough!");
1424    return TruncStoreActions[ValI][MemI];
1425  }
1426
1427  /// Return true if the specified store with truncation is legal on this
1428  /// target.
1429  bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
1430    return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
1431  }
1432
1433  /// Return true if the specified store with truncation has solution on this
1434  /// target.
1435  bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
1436    return isTypeLegal(ValVT) &&
1437      (getTruncStoreAction(ValVT, MemVT) == Legal ||
1438       getTruncStoreAction(ValVT, MemVT) == Custom);
1439  }
1440
1441  virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT,
1442                                    bool LegalOnly) const {
1443    if (LegalOnly)
1444      return isTruncStoreLegal(ValVT, MemVT);
1445
1446    return isTruncStoreLegalOrCustom(ValVT, MemVT);
1447  }
1448
1449  /// Return how the indexed load should be treated: either it is legal, needs
1450  /// to be promoted to a larger size, needs to be expanded to some other code
1451  /// sequence, or the target has a custom expander for it.
1452  LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
1453    return getIndexedModeAction(IdxMode, VT, IMAB_Load);
1454  }
1455
1456  /// Return true if the specified indexed load is legal on this target.
1457  bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
1458    return VT.isSimple() &&
1459      (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1460       getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1461  }
1462
1463  /// Return how the indexed store should be treated: either it is legal, needs
1464  /// to be promoted to a larger size, needs to be expanded to some other code
1465  /// sequence, or the target has a custom expander for it.
1466  LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
1467    return getIndexedModeAction(IdxMode, VT, IMAB_Store);
1468  }
1469
1470  /// Return true if the specified indexed load is legal on this target.
1471  bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
1472    return VT.isSimple() &&
1473      (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1474       getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1475  }
1476
1477  /// Return how the indexed load should be treated: either it is legal, needs
1478  /// to be promoted to a larger size, needs to be expanded to some other code
1479  /// sequence, or the target has a custom expander for it.
1480  LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const {
1481    return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad);
1482  }
1483
1484  /// Return true if the specified indexed load is legal on this target.
1485  bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const {
1486    return VT.isSimple() &&
1487           (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1488            getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1489  }
1490
1491  /// Return how the indexed store should be treated: either it is legal, needs
1492  /// to be promoted to a larger size, needs to be expanded to some other code
1493  /// sequence, or the target has a custom expander for it.
1494  LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const {
1495    return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore);
1496  }
1497
1498  /// Return true if the specified indexed load is legal on this target.
1499  bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const {
1500    return VT.isSimple() &&
1501           (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1502            getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1503  }
1504
1505  /// Returns true if the index type for a masked gather/scatter requires
1506  /// extending
1507  virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const { return false; }
1508
1509  // Returns true if Extend can be folded into the index of a masked gathers/scatters
1510  // on this target.
1511  virtual bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const {
1512    return false;
1513  }
1514
1515  // Return true if the target supports a scatter/gather instruction with
1516  // indices which are scaled by the particular value.  Note that all targets
1517  // must by definition support scale of 1.
1518  virtual bool isLegalScaleForGatherScatter(uint64_t Scale,
1519                                            uint64_t ElemSize) const {
1520    // MGATHER/MSCATTER are only required to support scaling by one or by the
1521    // element size.
1522    if (Scale != ElemSize && Scale != 1)
1523      return false;
1524    return true;
1525  }
1526
1527  /// Return how the condition code should be treated: either it is legal, needs
1528  /// to be expanded to some other code sequence, or the target has a custom
1529  /// expander for it.
1530  LegalizeAction
1531  getCondCodeAction(ISD::CondCode CC, MVT VT) const {
1532    assert((unsigned)CC < std::size(CondCodeActions) &&
1533           ((unsigned)VT.SimpleTy >> 3) < std::size(CondCodeActions[0]) &&
1534           "Table isn't big enough!");
1535    // See setCondCodeAction for how this is encoded.
1536    uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1537    uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
1538    LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
1539    assert(Action != Promote && "Can't promote condition code!");
1540    return Action;
1541  }
1542
1543  /// Return true if the specified condition code is legal on this target.
1544  bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
1545    return getCondCodeAction(CC, VT) == Legal;
1546  }
1547
1548  /// Return true if the specified condition code is legal or custom on this
1549  /// target.
1550  bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const {
1551    return getCondCodeAction(CC, VT) == Legal ||
1552           getCondCodeAction(CC, VT) == Custom;
1553  }
1554
1555  /// If the action for this operation is to promote, this method returns the
1556  /// ValueType to promote to.
1557  MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
1558    assert(getOperationAction(Op, VT) == Promote &&
1559           "This operation isn't promoted!");
1560
1561    // See if this has an explicit type specified.
1562    std::map<std::pair<unsigned, MVT::SimpleValueType>,
1563             MVT::SimpleValueType>::const_iterator PTTI =
1564      PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
1565    if (PTTI != PromoteToType.end()) return PTTI->second;
1566
1567    assert((VT.isInteger() || VT.isFloatingPoint()) &&
1568           "Cannot autopromote this type, add it with AddPromotedToType.");
1569
1570    MVT NVT = VT;
1571    do {
1572      NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
1573      assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
1574             "Didn't find type to promote to!");
1575    } while (!isTypeLegal(NVT) ||
1576              getOperationAction(Op, NVT) == Promote);
1577    return NVT;
1578  }
1579
1580  virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty,
1581                                     bool AllowUnknown = false) const {
1582    return getValueType(DL, Ty, AllowUnknown);
1583  }
1584
1585  /// Return the EVT corresponding to this LLVM type.  This is fixed by the LLVM
1586  /// operations except for the pointer size.  If AllowUnknown is true, this
1587  /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
1588  /// otherwise it will assert.
1589  EVT getValueType(const DataLayout &DL, Type *Ty,
1590                   bool AllowUnknown = false) const {
1591    // Lower scalar pointers to native pointer types.
1592    if (auto *PTy = dyn_cast<PointerType>(Ty))
1593      return getPointerTy(DL, PTy->getAddressSpace());
1594
1595    if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1596      Type *EltTy = VTy->getElementType();
1597      // Lower vectors of pointers to native pointer types.
1598      if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
1599        EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace()));
1600        EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1601      }
1602      return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1603                              VTy->getElementCount());
1604    }
1605
1606    return EVT::getEVT(Ty, AllowUnknown);
1607  }
1608
1609  EVT getMemValueType(const DataLayout &DL, Type *Ty,
1610                      bool AllowUnknown = false) const {
1611    // Lower scalar pointers to native pointer types.
1612    if (auto *PTy = dyn_cast<PointerType>(Ty))
1613      return getPointerMemTy(DL, PTy->getAddressSpace());
1614
1615    if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1616      Type *EltTy = VTy->getElementType();
1617      if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
1618        EVT PointerTy(getPointerMemTy(DL, PTy->getAddressSpace()));
1619        EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1620      }
1621      return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1622                              VTy->getElementCount());
1623    }
1624
1625    return getValueType(DL, Ty, AllowUnknown);
1626  }
1627
1628
1629  /// Return the MVT corresponding to this LLVM type. See getValueType.
1630  MVT getSimpleValueType(const DataLayout &DL, Type *Ty,
1631                         bool AllowUnknown = false) const {
1632    return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
1633  }
1634
1635  /// Return the desired alignment for ByVal or InAlloca aggregate function
1636  /// arguments in the caller parameter area.  This is the actual alignment, not
1637  /// its logarithm.
1638  virtual uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
1639
1640  /// Return the type of registers that this ValueType will eventually require.
1641  MVT getRegisterType(MVT VT) const {
1642    assert((unsigned)VT.SimpleTy < std::size(RegisterTypeForVT));
1643    return RegisterTypeForVT[VT.SimpleTy];
1644  }
1645
1646  /// Return the type of registers that this ValueType will eventually require.
1647  MVT getRegisterType(LLVMContext &Context, EVT VT) const {
1648    if (VT.isSimple())
1649      return getRegisterType(VT.getSimpleVT());
1650    if (VT.isVector()) {
1651      EVT VT1;
1652      MVT RegisterVT;
1653      unsigned NumIntermediates;
1654      (void)getVectorTypeBreakdown(Context, VT, VT1,
1655                                   NumIntermediates, RegisterVT);
1656      return RegisterVT;
1657    }
1658    if (VT.isInteger()) {
1659      return getRegisterType(Context, getTypeToTransformTo(Context, VT));
1660    }
1661    llvm_unreachable("Unsupported extended type!");
1662  }
1663
1664  /// Return the number of registers that this ValueType will eventually
1665  /// require.
1666  ///
1667  /// This is one for any types promoted to live in larger registers, but may be
1668  /// more than one for types (like i64) that are split into pieces.  For types
1669  /// like i140, which are first promoted then expanded, it is the number of
1670  /// registers needed to hold all the bits of the original type.  For an i140
1671  /// on a 32 bit machine this means 5 registers.
1672  ///
1673  /// RegisterVT may be passed as a way to override the default settings, for
1674  /// instance with i128 inline assembly operands on SystemZ.
1675  virtual unsigned
1676  getNumRegisters(LLVMContext &Context, EVT VT,
1677                  std::optional<MVT> RegisterVT = std::nullopt) const {
1678    if (VT.isSimple()) {
1679      assert((unsigned)VT.getSimpleVT().SimpleTy <
1680             std::size(NumRegistersForVT));
1681      return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
1682    }
1683    if (VT.isVector()) {
1684      EVT VT1;
1685      MVT VT2;
1686      unsigned NumIntermediates;
1687      return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
1688    }
1689    if (VT.isInteger()) {
1690      unsigned BitWidth = VT.getSizeInBits();
1691      unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
1692      return (BitWidth + RegWidth - 1) / RegWidth;
1693    }
1694    llvm_unreachable("Unsupported extended type!");
1695  }
1696
1697  /// Certain combinations of ABIs, Targets and features require that types
1698  /// are legal for some operations and not for other operations.
1699  /// For MIPS all vector types must be passed through the integer register set.
1700  virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
1701                                            CallingConv::ID CC, EVT VT) const {
1702    return getRegisterType(Context, VT);
1703  }
1704
1705  /// Certain targets require unusual breakdowns of certain types. For MIPS,
1706  /// this occurs when a vector type is used, as vector are passed through the
1707  /// integer register set.
1708  virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
1709                                                 CallingConv::ID CC,
1710                                                 EVT VT) const {
1711    return getNumRegisters(Context, VT);
1712  }
1713
1714  /// Certain targets have context sensitive alignment requirements, where one
1715  /// type has the alignment requirement of another type.
1716  virtual Align getABIAlignmentForCallingConv(Type *ArgTy,
1717                                              const DataLayout &DL) const {
1718    return DL.getABITypeAlign(ArgTy);
1719  }
1720
1721  /// If true, then instruction selection should seek to shrink the FP constant
1722  /// of the specified type to a smaller type in order to save space and / or
1723  /// reduce runtime.
1724  virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
1725
1726  /// Return true if it is profitable to reduce a load to a smaller type.
1727  /// Example: (i16 (trunc (i32 (load x))) -> i16 load x
1728  virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
1729                                     EVT NewVT) const {
1730    // By default, assume that it is cheaper to extract a subvector from a wide
1731    // vector load rather than creating multiple narrow vector loads.
1732    if (NewVT.isVector() && !Load->hasOneUse())
1733      return false;
1734
1735    return true;
1736  }
1737
1738  /// Return true (the default) if it is profitable to remove a sext_inreg(x)
1739  /// where the sext is redundant, and use x directly.
1740  virtual bool shouldRemoveRedundantExtend(SDValue Op) const { return true; }
1741
1742  /// When splitting a value of the specified type into parts, does the Lo
1743  /// or Hi part come first?  This usually follows the endianness, except
1744  /// for ppcf128, where the Hi part always comes first.
1745  bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
1746    return DL.isBigEndian() || VT == MVT::ppcf128;
1747  }
1748
1749  /// If true, the target has custom DAG combine transformations that it can
1750  /// perform for the specified node.
1751  bool hasTargetDAGCombine(ISD::NodeType NT) const {
1752    assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
1753    return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
1754  }
1755
1756  unsigned getGatherAllAliasesMaxDepth() const {
1757    return GatherAllAliasesMaxDepth;
1758  }
1759
1760  /// Returns the size of the platform's va_list object.
1761  virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
1762    return getPointerTy(DL).getSizeInBits();
1763  }
1764
1765  /// Get maximum # of store operations permitted for llvm.memset
1766  ///
1767  /// This function returns the maximum number of store operations permitted
1768  /// to replace a call to llvm.memset. The value is set by the target at the
1769  /// performance threshold for such a replacement. If OptSize is true,
1770  /// return the limit for functions that have OptSize attribute.
1771  unsigned getMaxStoresPerMemset(bool OptSize) const {
1772    return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
1773  }
1774
1775  /// Get maximum # of store operations permitted for llvm.memcpy
1776  ///
1777  /// This function returns the maximum number of store operations permitted
1778  /// to replace a call to llvm.memcpy. The value is set by the target at the
1779  /// performance threshold for such a replacement. If OptSize is true,
1780  /// return the limit for functions that have OptSize attribute.
1781  unsigned getMaxStoresPerMemcpy(bool OptSize) const {
1782    return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
1783  }
1784
1785  /// \brief Get maximum # of store operations to be glued together
1786  ///
1787  /// This function returns the maximum number of store operations permitted
1788  /// to glue together during lowering of llvm.memcpy. The value is set by
1789  //  the target at the performance threshold for such a replacement.
1790  virtual unsigned getMaxGluedStoresPerMemcpy() const {
1791    return MaxGluedStoresPerMemcpy;
1792  }
1793
1794  /// Get maximum # of load operations permitted for memcmp
1795  ///
1796  /// This function returns the maximum number of load operations permitted
1797  /// to replace a call to memcmp. The value is set by the target at the
1798  /// performance threshold for such a replacement. If OptSize is true,
1799  /// return the limit for functions that have OptSize attribute.
1800  unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
1801    return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp;
1802  }
1803
1804  /// Get maximum # of store operations permitted for llvm.memmove
1805  ///
1806  /// This function returns the maximum number of store operations permitted
1807  /// to replace a call to llvm.memmove. The value is set by the target at the
1808  /// performance threshold for such a replacement. If OptSize is true,
1809  /// return the limit for functions that have OptSize attribute.
1810  unsigned getMaxStoresPerMemmove(bool OptSize) const {
1811    return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
1812  }
1813
1814  /// Determine if the target supports unaligned memory accesses.
1815  ///
1816  /// This function returns true if the target allows unaligned memory accesses
1817  /// of the specified type in the given address space. If true, it also returns
1818  /// a relative speed of the unaligned memory access in the last argument by
1819  /// reference. The higher the speed number the faster the operation comparing
1820  /// to a number returned by another such call. This is used, for example, in
1821  /// situations where an array copy/move/set is converted to a sequence of
1822  /// store operations. Its use helps to ensure that such replacements don't
1823  /// generate code that causes an alignment error (trap) on the target machine.
1824  virtual bool allowsMisalignedMemoryAccesses(
1825      EVT, unsigned AddrSpace = 0, Align Alignment = Align(1),
1826      MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1827      unsigned * /*Fast*/ = nullptr) const {
1828    return false;
1829  }
1830
1831  /// LLT handling variant.
1832  virtual bool allowsMisalignedMemoryAccesses(
1833      LLT, unsigned AddrSpace = 0, Align Alignment = Align(1),
1834      MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1835      unsigned * /*Fast*/ = nullptr) const {
1836    return false;
1837  }
1838
1839  /// This function returns true if the memory access is aligned or if the
1840  /// target allows this specific unaligned memory access. If the access is
1841  /// allowed, the optional final parameter returns a relative speed of the
1842  /// access (as defined by the target).
1843  bool allowsMemoryAccessForAlignment(
1844      LLVMContext &Context, const DataLayout &DL, EVT VT,
1845      unsigned AddrSpace = 0, Align Alignment = Align(1),
1846      MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1847      unsigned *Fast = nullptr) const;
1848
1849  /// Return true if the memory access of this type is aligned or if the target
1850  /// allows this specific unaligned access for the given MachineMemOperand.
1851  /// If the access is allowed, the optional final parameter returns a relative
1852  /// speed of the access (as defined by the target).
1853  bool allowsMemoryAccessForAlignment(LLVMContext &Context,
1854                                      const DataLayout &DL, EVT VT,
1855                                      const MachineMemOperand &MMO,
1856                                      unsigned *Fast = nullptr) const;
1857
1858  /// Return true if the target supports a memory access of this type for the
1859  /// given address space and alignment. If the access is allowed, the optional
1860  /// final parameter returns the relative speed of the access (as defined by
1861  /// the target).
1862  virtual bool
1863  allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1864                     unsigned AddrSpace = 0, Align Alignment = Align(1),
1865                     MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
1866                     unsigned *Fast = nullptr) const;
1867
1868  /// Return true if the target supports a memory access of this type for the
1869  /// given MachineMemOperand. If the access is allowed, the optional
1870  /// final parameter returns the relative access speed (as defined by the
1871  /// target).
1872  bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1873                          const MachineMemOperand &MMO,
1874                          unsigned *Fast = nullptr) const;
1875
1876  /// LLT handling variant.
1877  bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, LLT Ty,
1878                          const MachineMemOperand &MMO,
1879                          unsigned *Fast = nullptr) const;
1880
1881  /// Returns the target specific optimal type for load and store operations as
1882  /// a result of memset, memcpy, and memmove lowering.
1883  /// It returns EVT::Other if the type should be determined using generic
1884  /// target-independent logic.
1885  virtual EVT
1886  getOptimalMemOpType(const MemOp &Op,
1887                      const AttributeList & /*FuncAttributes*/) const {
1888    return MVT::Other;
1889  }
1890
1891  /// LLT returning variant.
1892  virtual LLT
1893  getOptimalMemOpLLT(const MemOp &Op,
1894                     const AttributeList & /*FuncAttributes*/) const {
1895    return LLT();
1896  }
1897
1898  /// Returns true if it's safe to use load / store of the specified type to
1899  /// expand memcpy / memset inline.
1900  ///
1901  /// This is mostly true for all types except for some special cases. For
1902  /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
1903  /// fstpl which also does type conversion. Note the specified type doesn't
1904  /// have to be legal as the hook is used before type legalization.
1905  virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
1906
1907  /// Return lower limit for number of blocks in a jump table.
1908  virtual unsigned getMinimumJumpTableEntries() const;
1909
1910  /// Return lower limit of the density in a jump table.
1911  unsigned getMinimumJumpTableDensity(bool OptForSize) const;
1912
1913  /// Return upper limit for number of entries in a jump table.
1914  /// Zero if no limit.
1915  unsigned getMaximumJumpTableSize() const;
1916
1917  virtual bool isJumpTableRelative() const;
1918
1919  /// If a physical register, this specifies the register that
1920  /// llvm.savestack/llvm.restorestack should save and restore.
1921  Register getStackPointerRegisterToSaveRestore() const {
1922    return StackPointerRegisterToSaveRestore;
1923  }
1924
1925  /// If a physical register, this returns the register that receives the
1926  /// exception address on entry to an EH pad.
1927  virtual Register
1928  getExceptionPointerRegister(const Constant *PersonalityFn) const {
1929    return Register();
1930  }
1931
1932  /// If a physical register, this returns the register that receives the
1933  /// exception typeid on entry to a landing pad.
1934  virtual Register
1935  getExceptionSelectorRegister(const Constant *PersonalityFn) const {
1936    return Register();
1937  }
1938
1939  virtual bool needsFixedCatchObjects() const {
1940    report_fatal_error("Funclet EH is not implemented for this target");
1941  }
1942
1943  /// Return the minimum stack alignment of an argument.
1944  Align getMinStackArgumentAlignment() const {
1945    return MinStackArgumentAlignment;
1946  }
1947
1948  /// Return the minimum function alignment.
1949  Align getMinFunctionAlignment() const { return MinFunctionAlignment; }
1950
1951  /// Return the preferred function alignment.
1952  Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
1953
1954  /// Return the preferred loop alignment.
1955  virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const;
1956
1957  /// Return the maximum amount of bytes allowed to be emitted when padding for
1958  /// alignment
1959  virtual unsigned
1960  getMaxPermittedBytesForAlignment(MachineBasicBlock *MBB) const;
1961
1962  /// Should loops be aligned even when the function is marked OptSize (but not
1963  /// MinSize).
1964  virtual bool alignLoopsWithOptSize() const { return false; }
1965
1966  /// If the target has a standard location for the stack protector guard,
1967  /// returns the address of that location. Otherwise, returns nullptr.
1968  /// DEPRECATED: please override useLoadStackGuardNode and customize
1969  ///             LOAD_STACK_GUARD, or customize \@llvm.stackguard().
1970  virtual Value *getIRStackGuard(IRBuilderBase &IRB) const;
1971
1972  /// Inserts necessary declarations for SSP (stack protection) purpose.
1973  /// Should be used only when getIRStackGuard returns nullptr.
1974  virtual void insertSSPDeclarations(Module &M) const;
1975
1976  /// Return the variable that's previously inserted by insertSSPDeclarations,
1977  /// if any, otherwise return nullptr. Should be used only when
1978  /// getIRStackGuard returns nullptr.
1979  virtual Value *getSDagStackGuard(const Module &M) const;
1980
1981  /// If this function returns true, stack protection checks should XOR the
1982  /// frame pointer (or whichever pointer is used to address locals) into the
1983  /// stack guard value before checking it. getIRStackGuard must return nullptr
1984  /// if this returns true.
1985  virtual bool useStackGuardXorFP() const { return false; }
1986
1987  /// If the target has a standard stack protection check function that
1988  /// performs validation and error handling, returns the function. Otherwise,
1989  /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
1990  /// Should be used only when getIRStackGuard returns nullptr.
1991  virtual Function *getSSPStackGuardCheck(const Module &M) const;
1992
1993protected:
1994  Value *getDefaultSafeStackPointerLocation(IRBuilderBase &IRB,
1995                                            bool UseTLS) const;
1996
1997public:
1998  /// Returns the target-specific address of the unsafe stack pointer.
1999  virtual Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const;
2000
2001  /// Returns the name of the symbol used to emit stack probes or the empty
2002  /// string if not applicable.
2003  virtual bool hasStackProbeSymbol(const MachineFunction &MF) const { return false; }
2004
2005  virtual bool hasInlineStackProbe(const MachineFunction &MF) const { return false; }
2006
2007  virtual StringRef getStackProbeSymbolName(const MachineFunction &MF) const {
2008    return "";
2009  }
2010
2011  /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
2012  /// are happy to sink it into basic blocks. A cast may be free, but not
2013  /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
2014  virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const;
2015
2016  /// Return true if the pointer arguments to CI should be aligned by aligning
2017  /// the object whose address is being passed. If so then MinSize is set to the
2018  /// minimum size the object must be to be aligned and PrefAlign is set to the
2019  /// preferred alignment.
2020  virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
2021                                      Align & /*PrefAlign*/) const {
2022    return false;
2023  }
2024
2025  //===--------------------------------------------------------------------===//
2026  /// \name Helpers for TargetTransformInfo implementations
2027  /// @{
2028
2029  /// Get the ISD node that corresponds to the Instruction class opcode.
2030  int InstructionOpcodeToISD(unsigned Opcode) const;
2031
2032  /// @}
2033
2034  //===--------------------------------------------------------------------===//
2035  /// \name Helpers for atomic expansion.
2036  /// @{
2037
2038  /// Returns the maximum atomic operation size (in bits) supported by
2039  /// the backend. Atomic operations greater than this size (as well
2040  /// as ones that are not naturally aligned), will be expanded by
2041  /// AtomicExpandPass into an __atomic_* library call.
2042  unsigned getMaxAtomicSizeInBitsSupported() const {
2043    return MaxAtomicSizeInBitsSupported;
2044  }
2045
2046  /// Returns the size in bits of the maximum div/rem the backend supports.
2047  /// Larger operations will be expanded by ExpandLargeDivRem.
2048  unsigned getMaxDivRemBitWidthSupported() const {
2049    return MaxDivRemBitWidthSupported;
2050  }
2051
2052  /// Returns the size in bits of the maximum larget fp convert the backend
2053  /// supports. Larger operations will be expanded by ExpandLargeFPConvert.
2054  unsigned getMaxLargeFPConvertBitWidthSupported() const {
2055    return MaxLargeFPConvertBitWidthSupported;
2056  }
2057
2058  /// Returns the size of the smallest cmpxchg or ll/sc instruction
2059  /// the backend supports.  Any smaller operations are widened in
2060  /// AtomicExpandPass.
2061  ///
2062  /// Note that *unlike* operations above the maximum size, atomic ops
2063  /// are still natively supported below the minimum; they just
2064  /// require a more complex expansion.
2065  unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
2066
2067  /// Whether the target supports unaligned atomic operations.
2068  bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
2069
2070  /// Whether AtomicExpandPass should automatically insert fences and reduce
2071  /// ordering for this atomic. This should be true for most architectures with
2072  /// weak memory ordering. Defaults to false.
2073  virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
2074    return false;
2075  }
2076
2077  /// Whether AtomicExpandPass should automatically insert a trailing fence
2078  /// without reducing the ordering for this atomic. Defaults to false.
2079  virtual bool
2080  shouldInsertTrailingFenceForAtomicStore(const Instruction *I) const {
2081    return false;
2082  }
2083
2084  /// Perform a load-linked operation on Addr, returning a "Value *" with the
2085  /// corresponding pointee type. This may entail some non-trivial operations to
2086  /// truncate or reconstruct types that will be illegal in the backend. See
2087  /// ARMISelLowering for an example implementation.
2088  virtual Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy,
2089                                Value *Addr, AtomicOrdering Ord) const {
2090    llvm_unreachable("Load linked unimplemented on this target");
2091  }
2092
2093  /// Perform a store-conditional operation to Addr. Return the status of the
2094  /// store. This should be 0 if the store succeeded, non-zero otherwise.
2095  virtual Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val,
2096                                      Value *Addr, AtomicOrdering Ord) const {
2097    llvm_unreachable("Store conditional unimplemented on this target");
2098  }
2099
2100  /// Perform a masked atomicrmw using a target-specific intrinsic. This
2101  /// represents the core LL/SC loop which will be lowered at a late stage by
2102  /// the backend. The target-specific intrinsic returns the loaded value and
2103  /// is not responsible for masking and shifting the result.
2104  virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder,
2105                                              AtomicRMWInst *AI,
2106                                              Value *AlignedAddr, Value *Incr,
2107                                              Value *Mask, Value *ShiftAmt,
2108                                              AtomicOrdering Ord) const {
2109    llvm_unreachable("Masked atomicrmw expansion unimplemented on this target");
2110  }
2111
2112  /// Perform a atomicrmw expansion using a target-specific way. This is
2113  /// expected to be called when masked atomicrmw and bit test atomicrmw don't
2114  /// work, and the target supports another way to lower atomicrmw.
2115  virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const {
2116    llvm_unreachable(
2117        "Generic atomicrmw expansion unimplemented on this target");
2118  }
2119
2120  /// Perform a bit test atomicrmw using a target-specific intrinsic. This
2121  /// represents the combined bit test intrinsic which will be lowered at a late
2122  /// stage by the backend.
2123  virtual void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
2124    llvm_unreachable(
2125        "Bit test atomicrmw expansion unimplemented on this target");
2126  }
2127
2128  /// Perform a atomicrmw which the result is only used by comparison, using a
2129  /// target-specific intrinsic. This represents the combined atomic and compare
2130  /// intrinsic which will be lowered at a late stage by the backend.
2131  virtual void emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
2132    llvm_unreachable(
2133        "Compare arith atomicrmw expansion unimplemented on this target");
2134  }
2135
2136  /// Perform a masked cmpxchg using a target-specific intrinsic. This
2137  /// represents the core LL/SC loop which will be lowered at a late stage by
2138  /// the backend. The target-specific intrinsic returns the loaded value and
2139  /// is not responsible for masking and shifting the result.
2140  virtual Value *emitMaskedAtomicCmpXchgIntrinsic(
2141      IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
2142      Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
2143    llvm_unreachable("Masked cmpxchg expansion unimplemented on this target");
2144  }
2145
2146  //===--------------------------------------------------------------------===//
2147  /// \name KCFI check lowering.
2148  /// @{
2149
2150  virtual MachineInstr *EmitKCFICheck(MachineBasicBlock &MBB,
2151                                      MachineBasicBlock::instr_iterator &MBBI,
2152                                      const TargetInstrInfo *TII) const {
2153    llvm_unreachable("KCFI is not supported on this target");
2154  }
2155
2156  /// @}
2157
2158  /// Inserts in the IR a target-specific intrinsic specifying a fence.
2159  /// It is called by AtomicExpandPass before expanding an
2160  ///   AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
2161  ///   if shouldInsertFencesForAtomic returns true.
2162  ///
2163  /// Inst is the original atomic instruction, prior to other expansions that
2164  /// may be performed.
2165  ///
2166  /// This function should either return a nullptr, or a pointer to an IR-level
2167  ///   Instruction*. Even complex fence sequences can be represented by a
2168  ///   single Instruction* through an intrinsic to be lowered later.
2169  ///
2170  /// The default implementation emits an IR fence before any release (or
2171  ///   stronger) operation that stores, and after any acquire (or stronger)
2172  ///   operation. This is generally a correct implementation, but backends may
2173  ///   override if they wish to use alternative schemes (e.g. the PowerPC
2174  ///   standard ABI uses a fence before a seq_cst load instead of after a
2175  ///   seq_cst store).
2176  /// @{
2177  virtual Instruction *emitLeadingFence(IRBuilderBase &Builder,
2178                                        Instruction *Inst,
2179                                        AtomicOrdering Ord) const;
2180
2181  virtual Instruction *emitTrailingFence(IRBuilderBase &Builder,
2182                                         Instruction *Inst,
2183                                         AtomicOrdering Ord) const;
2184  /// @}
2185
2186  // Emits code that executes when the comparison result in the ll/sc
2187  // expansion of a cmpxchg instruction is such that the store-conditional will
2188  // not execute.  This makes it possible to balance out the load-linked with
2189  // a dedicated instruction, if desired.
2190  // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
2191  // be unnecessarily held, except if clrex, inserted by this hook, is executed.
2192  virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const {}
2193
2194  /// Returns true if arguments should be sign-extended in lib calls.
2195  virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
2196    return IsSigned;
2197  }
2198
2199  /// Returns true if arguments should be extended in lib calls.
2200  virtual bool shouldExtendTypeInLibCall(EVT Type) const {
2201    return true;
2202  }
2203
2204  /// Returns how the given (atomic) load should be expanded by the
2205  /// IR-level AtomicExpand pass.
2206  virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const {
2207    return AtomicExpansionKind::None;
2208  }
2209
2210  /// Returns how the given (atomic) load should be cast by the IR-level
2211  /// AtomicExpand pass.
2212  virtual AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const {
2213    if (LI->getType()->isFloatingPointTy())
2214      return AtomicExpansionKind::CastToInteger;
2215    return AtomicExpansionKind::None;
2216  }
2217
2218  /// Returns how the given (atomic) store should be expanded by the IR-level
2219  /// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try
2220  /// to use an atomicrmw xchg.
2221  virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const {
2222    return AtomicExpansionKind::None;
2223  }
2224
2225  /// Returns how the given (atomic) store should be cast by the IR-level
2226  /// AtomicExpand pass into. For instance AtomicExpansionKind::CastToInteger
2227  /// will try to cast the operands to integer values.
2228  virtual AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const {
2229    if (SI->getValueOperand()->getType()->isFloatingPointTy())
2230      return AtomicExpansionKind::CastToInteger;
2231    return AtomicExpansionKind::None;
2232  }
2233
2234  /// Returns how the given atomic cmpxchg should be expanded by the IR-level
2235  /// AtomicExpand pass.
2236  virtual AtomicExpansionKind
2237  shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
2238    return AtomicExpansionKind::None;
2239  }
2240
2241  /// Returns how the IR-level AtomicExpand pass should expand the given
2242  /// AtomicRMW, if at all. Default is to never expand.
2243  virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
2244    return RMW->isFloatingPointOperation() ?
2245      AtomicExpansionKind::CmpXChg : AtomicExpansionKind::None;
2246  }
2247
2248  /// Returns how the given atomic atomicrmw should be cast by the IR-level
2249  /// AtomicExpand pass.
2250  virtual AtomicExpansionKind
2251  shouldCastAtomicRMWIInIR(AtomicRMWInst *RMWI) const {
2252    if (RMWI->getOperation() == AtomicRMWInst::Xchg &&
2253        (RMWI->getValOperand()->getType()->isFloatingPointTy() ||
2254         RMWI->getValOperand()->getType()->isPointerTy()))
2255      return AtomicExpansionKind::CastToInteger;
2256
2257    return AtomicExpansionKind::None;
2258  }
2259
2260  /// On some platforms, an AtomicRMW that never actually modifies the value
2261  /// (such as fetch_add of 0) can be turned into a fence followed by an
2262  /// atomic load. This may sound useless, but it makes it possible for the
2263  /// processor to keep the cacheline shared, dramatically improving
2264  /// performance. And such idempotent RMWs are useful for implementing some
2265  /// kinds of locks, see for example (justification + benchmarks):
2266  /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
2267  /// This method tries doing that transformation, returning the atomic load if
2268  /// it succeeds, and nullptr otherwise.
2269  /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
2270  /// another round of expansion.
2271  virtual LoadInst *
2272  lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
2273    return nullptr;
2274  }
2275
2276  /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
2277  /// SIGN_EXTEND, or ANY_EXTEND).
2278  virtual ISD::NodeType getExtendForAtomicOps() const {
2279    return ISD::ZERO_EXTEND;
2280  }
2281
2282  /// Returns how the platform's atomic compare and swap expects its comparison
2283  /// value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). This is
2284  /// separate from getExtendForAtomicOps, which is concerned with the
2285  /// sign-extension of the instruction's output, whereas here we are concerned
2286  /// with the sign-extension of the input. For targets with compare-and-swap
2287  /// instructions (or sub-word comparisons in their LL/SC loop expansions),
2288  /// the input can be ANY_EXTEND, but the output will still have a specific
2289  /// extension.
2290  virtual ISD::NodeType getExtendForAtomicCmpSwapArg() const {
2291    return ISD::ANY_EXTEND;
2292  }
2293
2294  /// @}
2295
2296  /// Returns true if we should normalize
2297  /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
2298  /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
2299  /// that it saves us from materializing N0 and N1 in an integer register.
2300  /// Targets that are able to perform and/or on flags should return false here.
2301  virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context,
2302                                               EVT VT) const {
2303    // If a target has multiple condition registers, then it likely has logical
2304    // operations on those registers.
2305    if (hasMultipleConditionRegisters())
2306      return false;
2307    // Only do the transform if the value won't be split into multiple
2308    // registers.
2309    LegalizeTypeAction Action = getTypeAction(Context, VT);
2310    return Action != TypeExpandInteger && Action != TypeExpandFloat &&
2311      Action != TypeSplitVector;
2312  }
2313
2314  virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; }
2315
2316  /// Return true if a select of constants (select Cond, C1, C2) should be
2317  /// transformed into simple math ops with the condition value. For example:
2318  /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
2319  virtual bool convertSelectOfConstantsToMath(EVT VT) const {
2320    return false;
2321  }
2322
2323  /// Return true if it is profitable to transform an integer
2324  /// multiplication-by-constant into simpler operations like shifts and adds.
2325  /// This may be true if the target does not directly support the
2326  /// multiplication operation for the specified type or the sequence of simpler
2327  /// ops is faster than the multiply.
2328  virtual bool decomposeMulByConstant(LLVMContext &Context,
2329                                      EVT VT, SDValue C) const {
2330    return false;
2331  }
2332
2333  /// Return true if it may be profitable to transform
2334  /// (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
2335  /// This may not be true if c1 and c2 can be represented as immediates but
2336  /// c1*c2 cannot, for example.
2337  /// The target should check if c1, c2 and c1*c2 can be represented as
2338  /// immediates, or have to be materialized into registers. If it is not sure
2339  /// about some cases, a default true can be returned to let the DAGCombiner
2340  /// decide.
2341  /// AddNode is (add x, c1), and ConstNode is c2.
2342  virtual bool isMulAddWithConstProfitable(SDValue AddNode,
2343                                           SDValue ConstNode) const {
2344    return true;
2345  }
2346
2347  /// Return true if it is more correct/profitable to use strict FP_TO_INT
2348  /// conversion operations - canonicalizing the FP source value instead of
2349  /// converting all cases and then selecting based on value.
2350  /// This may be true if the target throws exceptions for out of bounds
2351  /// conversions or has fast FP CMOV.
2352  virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
2353                                        bool IsSigned) const {
2354    return false;
2355  }
2356
2357  /// Return true if it is beneficial to expand an @llvm.powi.* intrinsic.
2358  /// If not optimizing for size, expanding @llvm.powi.* intrinsics is always
2359  /// considered beneficial.
2360  /// If optimizing for size, expansion is only considered beneficial for upto
2361  /// 5 multiplies and a divide (if the exponent is negative).
2362  bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const {
2363    if (Exponent < 0)
2364      Exponent = -Exponent;
2365    uint64_t E = static_cast<uint64_t>(Exponent);
2366    return !OptForSize || (llvm::popcount(E) + Log2_64(E) < 7);
2367  }
2368
2369  //===--------------------------------------------------------------------===//
2370  // TargetLowering Configuration Methods - These methods should be invoked by
2371  // the derived class constructor to configure this object for the target.
2372  //
2373protected:
2374  /// Specify how the target extends the result of integer and floating point
2375  /// boolean values from i1 to a wider type.  See getBooleanContents.
2376  void setBooleanContents(BooleanContent Ty) {
2377    BooleanContents = Ty;
2378    BooleanFloatContents = Ty;
2379  }
2380
2381  /// Specify how the target extends the result of integer and floating point
2382  /// boolean values from i1 to a wider type.  See getBooleanContents.
2383  void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
2384    BooleanContents = IntTy;
2385    BooleanFloatContents = FloatTy;
2386  }
2387
2388  /// Specify how the target extends the result of a vector boolean value from a
2389  /// vector of i1 to a wider type.  See getBooleanContents.
2390  void setBooleanVectorContents(BooleanContent Ty) {
2391    BooleanVectorContents = Ty;
2392  }
2393
2394  /// Specify the target scheduling preference.
2395  void setSchedulingPreference(Sched::Preference Pref) {
2396    SchedPreferenceInfo = Pref;
2397  }
2398
2399  /// Indicate the minimum number of blocks to generate jump tables.
2400  void setMinimumJumpTableEntries(unsigned Val);
2401
2402  /// Indicate the maximum number of entries in jump tables.
2403  /// Set to zero to generate unlimited jump tables.
2404  void setMaximumJumpTableSize(unsigned);
2405
2406  /// If set to a physical register, this specifies the register that
2407  /// llvm.savestack/llvm.restorestack should save and restore.
2408  void setStackPointerRegisterToSaveRestore(Register R) {
2409    StackPointerRegisterToSaveRestore = R;
2410  }
2411
2412  /// Tells the code generator that the target has multiple (allocatable)
2413  /// condition registers that can be used to store the results of comparisons
2414  /// for use by selects and conditional branches. With multiple condition
2415  /// registers, the code generator will not aggressively sink comparisons into
2416  /// the blocks of their users.
2417  void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
2418    HasMultipleConditionRegisters = hasManyRegs;
2419  }
2420
2421  /// Tells the code generator that the target has BitExtract instructions.
2422  /// The code generator will aggressively sink "shift"s into the blocks of
2423  /// their users if the users will generate "and" instructions which can be
2424  /// combined with "shift" to BitExtract instructions.
2425  void setHasExtractBitsInsn(bool hasExtractInsn = true) {
2426    HasExtractBitsInsn = hasExtractInsn;
2427  }
2428
2429  /// Tells the code generator not to expand logic operations on comparison
2430  /// predicates into separate sequences that increase the amount of flow
2431  /// control.
2432  void setJumpIsExpensive(bool isExpensive = true);
2433
2434  /// Tells the code generator which bitwidths to bypass.
2435  void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
2436    BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
2437  }
2438
2439  /// Add the specified register class as an available regclass for the
2440  /// specified value type. This indicates the selector can handle values of
2441  /// that class natively.
2442  void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
2443    assert((unsigned)VT.SimpleTy < std::size(RegClassForVT));
2444    RegClassForVT[VT.SimpleTy] = RC;
2445  }
2446
2447  /// Return the largest legal super-reg register class of the register class
2448  /// for the specified type and its associated "cost".
2449  virtual std::pair<const TargetRegisterClass *, uint8_t>
2450  findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
2451
2452  /// Once all of the register classes are added, this allows us to compute
2453  /// derived properties we expose.
2454  void computeRegisterProperties(const TargetRegisterInfo *TRI);
2455
2456  /// Indicate that the specified operation does not work with the specified
2457  /// type and indicate what to do about it. Note that VT may refer to either
2458  /// the type of a result or that of an operand of Op.
2459  void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action) {
2460    assert(Op < std::size(OpActions[0]) && "Table isn't big enough!");
2461    OpActions[(unsigned)VT.SimpleTy][Op] = Action;
2462  }
2463  void setOperationAction(ArrayRef<unsigned> Ops, MVT VT,
2464                          LegalizeAction Action) {
2465    for (auto Op : Ops)
2466      setOperationAction(Op, VT, Action);
2467  }
2468  void setOperationAction(ArrayRef<unsigned> Ops, ArrayRef<MVT> VTs,
2469                          LegalizeAction Action) {
2470    for (auto VT : VTs)
2471      setOperationAction(Ops, VT, Action);
2472  }
2473
2474  /// Indicate that the specified load with extension does not work with the
2475  /// specified type and indicate what to do about it.
2476  void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
2477                        LegalizeAction Action) {
2478    assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
2479           MemVT.isValid() && "Table isn't big enough!");
2480    assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2481    unsigned Shift = 4 * ExtType;
2482    LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
2483    LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
2484  }
2485  void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, MVT MemVT,
2486                        LegalizeAction Action) {
2487    for (auto ExtType : ExtTypes)
2488      setLoadExtAction(ExtType, ValVT, MemVT, Action);
2489  }
2490  void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT,
2491                        ArrayRef<MVT> MemVTs, LegalizeAction Action) {
2492    for (auto MemVT : MemVTs)
2493      setLoadExtAction(ExtTypes, ValVT, MemVT, Action);
2494  }
2495
2496  /// Indicate that the specified truncating store does not work with the
2497  /// specified type and indicate what to do about it.
2498  void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action) {
2499    assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
2500    TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
2501  }
2502
2503  /// Indicate that the specified indexed load does or does not work with the
2504  /// specified type and indicate what to do abort it.
2505  ///
2506  /// NOTE: All indexed mode loads are initialized to Expand in
2507  /// TargetLowering.cpp
2508  void setIndexedLoadAction(ArrayRef<unsigned> IdxModes, MVT VT,
2509                            LegalizeAction Action) {
2510    for (auto IdxMode : IdxModes)
2511      setIndexedModeAction(IdxMode, VT, IMAB_Load, Action);
2512  }
2513
2514  void setIndexedLoadAction(ArrayRef<unsigned> IdxModes, ArrayRef<MVT> VTs,
2515                            LegalizeAction Action) {
2516    for (auto VT : VTs)
2517      setIndexedLoadAction(IdxModes, VT, Action);
2518  }
2519
2520  /// Indicate that the specified indexed store does or does not work with the
2521  /// specified type and indicate what to do about it.
2522  ///
2523  /// NOTE: All indexed mode stores are initialized to Expand in
2524  /// TargetLowering.cpp
2525  void setIndexedStoreAction(ArrayRef<unsigned> IdxModes, MVT VT,
2526                             LegalizeAction Action) {
2527    for (auto IdxMode : IdxModes)
2528      setIndexedModeAction(IdxMode, VT, IMAB_Store, Action);
2529  }
2530
2531  void setIndexedStoreAction(ArrayRef<unsigned> IdxModes, ArrayRef<MVT> VTs,
2532                             LegalizeAction Action) {
2533    for (auto VT : VTs)
2534      setIndexedStoreAction(IdxModes, VT, Action);
2535  }
2536
2537  /// Indicate that the specified indexed masked load does or does not work with
2538  /// the specified type and indicate what to do about it.
2539  ///
2540  /// NOTE: All indexed mode masked loads are initialized to Expand in
2541  /// TargetLowering.cpp
2542  void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT,
2543                                  LegalizeAction Action) {
2544    setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action);
2545  }
2546
2547  /// Indicate that the specified indexed masked store does or does not work
2548  /// with the specified type and indicate what to do about it.
2549  ///
2550  /// NOTE: All indexed mode masked stores are initialized to Expand in
2551  /// TargetLowering.cpp
2552  void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT,
2553                                   LegalizeAction Action) {
2554    setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action);
2555  }
2556
2557  /// Indicate that the specified condition code is or isn't supported on the
2558  /// target and indicate what to do about it.
2559  void setCondCodeAction(ArrayRef<ISD::CondCode> CCs, MVT VT,
2560                         LegalizeAction Action) {
2561    for (auto CC : CCs) {
2562      assert(VT.isValid() && (unsigned)CC < std::size(CondCodeActions) &&
2563             "Table isn't big enough!");
2564      assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2565      /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the
2566      /// 32-bit value and the upper 29 bits index into the second dimension of
2567      /// the array to select what 32-bit value to use.
2568      uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
2569      CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
2570      CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
2571    }
2572  }
2573  void setCondCodeAction(ArrayRef<ISD::CondCode> CCs, ArrayRef<MVT> VTs,
2574                         LegalizeAction Action) {
2575    for (auto VT : VTs)
2576      setCondCodeAction(CCs, VT, Action);
2577  }
2578
2579  /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
2580  /// to trying a larger integer/fp until it can find one that works. If that
2581  /// default is insufficient, this method can be used by the target to override
2582  /// the default.
2583  void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2584    PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
2585  }
2586
2587  /// Convenience method to set an operation to Promote and specify the type
2588  /// in a single call.
2589  void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2590    setOperationAction(Opc, OrigVT, Promote);
2591    AddPromotedToType(Opc, OrigVT, DestVT);
2592  }
2593  void setOperationPromotedToType(ArrayRef<unsigned> Ops, MVT OrigVT,
2594                                  MVT DestVT) {
2595    for (auto Op : Ops) {
2596      setOperationAction(Op, OrigVT, Promote);
2597      AddPromotedToType(Op, OrigVT, DestVT);
2598    }
2599  }
2600
2601  /// Targets should invoke this method for each target independent node that
2602  /// they want to provide a custom DAG combiner for by implementing the
2603  /// PerformDAGCombine virtual method.
2604  void setTargetDAGCombine(ArrayRef<ISD::NodeType> NTs) {
2605    for (auto NT : NTs) {
2606      assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
2607      TargetDAGCombineArray[NT >> 3] |= 1 << (NT & 7);
2608    }
2609  }
2610
2611  /// Set the target's minimum function alignment.
2612  void setMinFunctionAlignment(Align Alignment) {
2613    MinFunctionAlignment = Alignment;
2614  }
2615
2616  /// Set the target's preferred function alignment.  This should be set if
2617  /// there is a performance benefit to higher-than-minimum alignment
2618  void setPrefFunctionAlignment(Align Alignment) {
2619    PrefFunctionAlignment = Alignment;
2620  }
2621
2622  /// Set the target's preferred loop alignment. Default alignment is one, it
2623  /// means the target does not care about loop alignment. The target may also
2624  /// override getPrefLoopAlignment to provide per-loop values.
2625  void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; }
2626  void setMaxBytesForAlignment(unsigned MaxBytes) {
2627    MaxBytesForAlignment = MaxBytes;
2628  }
2629
2630  /// Set the minimum stack alignment of an argument.
2631  void setMinStackArgumentAlignment(Align Alignment) {
2632    MinStackArgumentAlignment = Alignment;
2633  }
2634
2635  /// Set the maximum atomic operation size supported by the
2636  /// backend. Atomic operations greater than this size (as well as
2637  /// ones that are not naturally aligned), will be expanded by
2638  /// AtomicExpandPass into an __atomic_* library call.
2639  void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
2640    MaxAtomicSizeInBitsSupported = SizeInBits;
2641  }
2642
2643  /// Set the size in bits of the maximum div/rem the backend supports.
2644  /// Larger operations will be expanded by ExpandLargeDivRem.
2645  void setMaxDivRemBitWidthSupported(unsigned SizeInBits) {
2646    MaxDivRemBitWidthSupported = SizeInBits;
2647  }
2648
2649  /// Set the size in bits of the maximum fp convert the backend supports.
2650  /// Larger operations will be expanded by ExpandLargeFPConvert.
2651  void setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits) {
2652    MaxLargeFPConvertBitWidthSupported = SizeInBits;
2653  }
2654
2655  /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
2656  void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
2657    MinCmpXchgSizeInBits = SizeInBits;
2658  }
2659
2660  /// Sets whether unaligned atomic operations are supported.
2661  void setSupportsUnalignedAtomics(bool UnalignedSupported) {
2662    SupportsUnalignedAtomics = UnalignedSupported;
2663  }
2664
2665public:
2666  //===--------------------------------------------------------------------===//
2667  // Addressing mode description hooks (used by LSR etc).
2668  //
2669
2670  /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
2671  /// instructions reading the address. This allows as much computation as
2672  /// possible to be done in the address mode for that operand. This hook lets
2673  /// targets also pass back when this should be done on intrinsics which
2674  /// load/store.
2675  virtual bool getAddrModeArguments(IntrinsicInst * /*I*/,
2676                                    SmallVectorImpl<Value*> &/*Ops*/,
2677                                    Type *&/*AccessTy*/) const {
2678    return false;
2679  }
2680
2681  /// This represents an addressing mode of:
2682  ///    BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
2683  /// If BaseGV is null,  there is no BaseGV.
2684  /// If BaseOffs is zero, there is no base offset.
2685  /// If HasBaseReg is false, there is no base register.
2686  /// If Scale is zero, there is no ScaleReg.  Scale of 1 indicates a reg with
2687  /// no scale.
2688  struct AddrMode {
2689    GlobalValue *BaseGV = nullptr;
2690    int64_t      BaseOffs = 0;
2691    bool         HasBaseReg = false;
2692    int64_t      Scale = 0;
2693    AddrMode() = default;
2694  };
2695
2696  /// Return true if the addressing mode represented by AM is legal for this
2697  /// target, for a load/store of the specified type.
2698  ///
2699  /// The type may be VoidTy, in which case only return true if the addressing
2700  /// mode is legal for a load/store of any legal type.  TODO: Handle
2701  /// pre/postinc as well.
2702  ///
2703  /// If the address space cannot be determined, it will be -1.
2704  ///
2705  /// TODO: Remove default argument
2706  virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
2707                                     Type *Ty, unsigned AddrSpace,
2708                                     Instruction *I = nullptr) const;
2709
2710  /// Return the prefered common base offset.
2711  virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset,
2712                                                 int64_t MaxOffset) const {
2713    return 0;
2714  }
2715
2716  /// Return true if the specified immediate is legal icmp immediate, that is
2717  /// the target has icmp instructions which can compare a register against the
2718  /// immediate without having to materialize the immediate into a register.
2719  virtual bool isLegalICmpImmediate(int64_t) const {
2720    return true;
2721  }
2722
2723  /// Return true if the specified immediate is legal add immediate, that is the
2724  /// target has add instructions which can add a register with the immediate
2725  /// without having to materialize the immediate into a register.
2726  virtual bool isLegalAddImmediate(int64_t) const {
2727    return true;
2728  }
2729
2730  /// Return true if the specified immediate is legal for the value input of a
2731  /// store instruction.
2732  virtual bool isLegalStoreImmediate(int64_t Value) const {
2733    // Default implementation assumes that at least 0 works since it is likely
2734    // that a zero register exists or a zero immediate is allowed.
2735    return Value == 0;
2736  }
2737
2738  /// Return true if it's significantly cheaper to shift a vector by a uniform
2739  /// scalar than by an amount which will vary across each lane. On x86 before
2740  /// AVX2 for example, there is a "psllw" instruction for the former case, but
2741  /// no simple instruction for a general "a << b" operation on vectors.
2742  /// This should also apply to lowering for vector funnel shifts (rotates).
2743  virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
2744    return false;
2745  }
2746
2747  /// Given a shuffle vector SVI representing a vector splat, return a new
2748  /// scalar type of size equal to SVI's scalar type if the new type is more
2749  /// profitable. Returns nullptr otherwise. For example under MVE float splats
2750  /// are converted to integer to prevent the need to move from SPR to GPR
2751  /// registers.
2752  virtual Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const {
2753    return nullptr;
2754  }
2755
2756  /// Given a set in interconnected phis of type 'From' that are loaded/stored
2757  /// or bitcast to type 'To', return true if the set should be converted to
2758  /// 'To'.
2759  virtual bool shouldConvertPhiType(Type *From, Type *To) const {
2760    return (From->isIntegerTy() || From->isFloatingPointTy()) &&
2761           (To->isIntegerTy() || To->isFloatingPointTy());
2762  }
2763
2764  /// Returns true if the opcode is a commutative binary operation.
2765  virtual bool isCommutativeBinOp(unsigned Opcode) const {
2766    // FIXME: This should get its info from the td file.
2767    switch (Opcode) {
2768    case ISD::ADD:
2769    case ISD::SMIN:
2770    case ISD::SMAX:
2771    case ISD::UMIN:
2772    case ISD::UMAX:
2773    case ISD::MUL:
2774    case ISD::MULHU:
2775    case ISD::MULHS:
2776    case ISD::SMUL_LOHI:
2777    case ISD::UMUL_LOHI:
2778    case ISD::FADD:
2779    case ISD::FMUL:
2780    case ISD::AND:
2781    case ISD::OR:
2782    case ISD::XOR:
2783    case ISD::SADDO:
2784    case ISD::UADDO:
2785    case ISD::ADDC:
2786    case ISD::ADDE:
2787    case ISD::SADDSAT:
2788    case ISD::UADDSAT:
2789    case ISD::FMINNUM:
2790    case ISD::FMAXNUM:
2791    case ISD::FMINNUM_IEEE:
2792    case ISD::FMAXNUM_IEEE:
2793    case ISD::FMINIMUM:
2794    case ISD::FMAXIMUM:
2795    case ISD::AVGFLOORS:
2796    case ISD::AVGFLOORU:
2797    case ISD::AVGCEILS:
2798    case ISD::AVGCEILU:
2799    case ISD::ABDS:
2800    case ISD::ABDU:
2801      return true;
2802    default: return false;
2803    }
2804  }
2805
2806  /// Return true if the node is a math/logic binary operator.
2807  virtual bool isBinOp(unsigned Opcode) const {
2808    // A commutative binop must be a binop.
2809    if (isCommutativeBinOp(Opcode))
2810      return true;
2811    // These are non-commutative binops.
2812    switch (Opcode) {
2813    case ISD::SUB:
2814    case ISD::SHL:
2815    case ISD::SRL:
2816    case ISD::SRA:
2817    case ISD::ROTL:
2818    case ISD::ROTR:
2819    case ISD::SDIV:
2820    case ISD::UDIV:
2821    case ISD::SREM:
2822    case ISD::UREM:
2823    case ISD::SSUBSAT:
2824    case ISD::USUBSAT:
2825    case ISD::FSUB:
2826    case ISD::FDIV:
2827    case ISD::FREM:
2828      return true;
2829    default:
2830      return false;
2831    }
2832  }
2833
2834  /// Return true if it's free to truncate a value of type FromTy to type
2835  /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
2836  /// by referencing its sub-register AX.
2837  /// Targets must return false when FromTy <= ToTy.
2838  virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
2839    return false;
2840  }
2841
2842  /// Return true if a truncation from FromTy to ToTy is permitted when deciding
2843  /// whether a call is in tail position. Typically this means that both results
2844  /// would be assigned to the same register or stack slot, but it could mean
2845  /// the target performs adequate checks of its own before proceeding with the
2846  /// tail call.  Targets must return false when FromTy <= ToTy.
2847  virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
2848    return false;
2849  }
2850
2851  virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const { return false; }
2852  virtual bool isTruncateFree(LLT FromTy, LLT ToTy, const DataLayout &DL,
2853                              LLVMContext &Ctx) const {
2854    return isTruncateFree(getApproximateEVTForLLT(FromTy, DL, Ctx),
2855                          getApproximateEVTForLLT(ToTy, DL, Ctx));
2856  }
2857
2858  /// Return true if truncating the specific node Val to type VT2 is free.
2859  virtual bool isTruncateFree(SDValue Val, EVT VT2) const {
2860    // Fallback to type matching.
2861    return isTruncateFree(Val.getValueType(), VT2);
2862  }
2863
2864  virtual bool isProfitableToHoist(Instruction *I) const { return true; }
2865
2866  /// Return true if the extension represented by \p I is free.
2867  /// Unlikely the is[Z|FP]ExtFree family which is based on types,
2868  /// this method can use the context provided by \p I to decide
2869  /// whether or not \p I is free.
2870  /// This method extends the behavior of the is[Z|FP]ExtFree family.
2871  /// In other words, if is[Z|FP]Free returns true, then this method
2872  /// returns true as well. The converse is not true.
2873  /// The target can perform the adequate checks by overriding isExtFreeImpl.
2874  /// \pre \p I must be a sign, zero, or fp extension.
2875  bool isExtFree(const Instruction *I) const {
2876    switch (I->getOpcode()) {
2877    case Instruction::FPExt:
2878      if (isFPExtFree(EVT::getEVT(I->getType()),
2879                      EVT::getEVT(I->getOperand(0)->getType())))
2880        return true;
2881      break;
2882    case Instruction::ZExt:
2883      if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
2884        return true;
2885      break;
2886    case Instruction::SExt:
2887      break;
2888    default:
2889      llvm_unreachable("Instruction is not an extension");
2890    }
2891    return isExtFreeImpl(I);
2892  }
2893
2894  /// Return true if \p Load and \p Ext can form an ExtLoad.
2895  /// For example, in AArch64
2896  ///   %L = load i8, i8* %ptr
2897  ///   %E = zext i8 %L to i32
2898  /// can be lowered into one load instruction
2899  ///   ldrb w0, [x0]
2900  bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
2901                 const DataLayout &DL) const {
2902    EVT VT = getValueType(DL, Ext->getType());
2903    EVT LoadVT = getValueType(DL, Load->getType());
2904
2905    // If the load has other users and the truncate is not free, the ext
2906    // probably isn't free.
2907    if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
2908        !isTruncateFree(Ext->getType(), Load->getType()))
2909      return false;
2910
2911    // Check whether the target supports casts folded into loads.
2912    unsigned LType;
2913    if (isa<ZExtInst>(Ext))
2914      LType = ISD::ZEXTLOAD;
2915    else {
2916      assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
2917      LType = ISD::SEXTLOAD;
2918    }
2919
2920    return isLoadExtLegal(LType, VT, LoadVT);
2921  }
2922
2923  /// Return true if any actual instruction that defines a value of type FromTy
2924  /// implicitly zero-extends the value to ToTy in the result register.
2925  ///
2926  /// The function should return true when it is likely that the truncate can
2927  /// be freely folded with an instruction defining a value of FromTy. If
2928  /// the defining instruction is unknown (because you're looking at a
2929  /// function argument, PHI, etc.) then the target may require an
2930  /// explicit truncate, which is not necessarily free, but this function
2931  /// does not deal with those cases.
2932  /// Targets must return false when FromTy >= ToTy.
2933  virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
2934    return false;
2935  }
2936
2937  virtual bool isZExtFree(EVT FromTy, EVT ToTy) const { return false; }
2938  virtual bool isZExtFree(LLT FromTy, LLT ToTy, const DataLayout &DL,
2939                          LLVMContext &Ctx) const {
2940    return isZExtFree(getApproximateEVTForLLT(FromTy, DL, Ctx),
2941                      getApproximateEVTForLLT(ToTy, DL, Ctx));
2942  }
2943
2944  /// Return true if zero-extending the specific node Val to type VT2 is free
2945  /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
2946  /// because it's folded such as X86 zero-extending loads).
2947  virtual bool isZExtFree(SDValue Val, EVT VT2) const {
2948    return isZExtFree(Val.getValueType(), VT2);
2949  }
2950
2951  /// Return true if sign-extension from FromTy to ToTy is cheaper than
2952  /// zero-extension.
2953  virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const {
2954    return false;
2955  }
2956
2957  /// Return true if this constant should be sign extended when promoting to
2958  /// a larger type.
2959  virtual bool signExtendConstant(const ConstantInt *C) const { return false; }
2960
2961  /// Return true if sinking I's operands to the same basic block as I is
2962  /// profitable, e.g. because the operands can be folded into a target
2963  /// instruction during instruction selection. After calling the function
2964  /// \p Ops contains the Uses to sink ordered by dominance (dominating users
2965  /// come first).
2966  virtual bool shouldSinkOperands(Instruction *I,
2967                                  SmallVectorImpl<Use *> &Ops) const {
2968    return false;
2969  }
2970
2971  /// Try to optimize extending or truncating conversion instructions (like
2972  /// zext, trunc, fptoui, uitofp) for the target.
2973  virtual bool
2974  optimizeExtendOrTruncateConversion(Instruction *I, Loop *L,
2975                                     const TargetTransformInfo &TTI) const {
2976    return false;
2977  }
2978
2979  /// Return true if the target supplies and combines to a paired load
2980  /// two loaded values of type LoadedType next to each other in memory.
2981  /// RequiredAlignment gives the minimal alignment constraints that must be met
2982  /// to be able to select this paired load.
2983  ///
2984  /// This information is *not* used to generate actual paired loads, but it is
2985  /// used to generate a sequence of loads that is easier to combine into a
2986  /// paired load.
2987  /// For instance, something like this:
2988  /// a = load i64* addr
2989  /// b = trunc i64 a to i32
2990  /// c = lshr i64 a, 32
2991  /// d = trunc i64 c to i32
2992  /// will be optimized into:
2993  /// b = load i32* addr1
2994  /// d = load i32* addr2
2995  /// Where addr1 = addr2 +/- sizeof(i32).
2996  ///
2997  /// In other words, unless the target performs a post-isel load combining,
2998  /// this information should not be provided because it will generate more
2999  /// loads.
3000  virtual bool hasPairedLoad(EVT /*LoadedType*/,
3001                             Align & /*RequiredAlignment*/) const {
3002    return false;
3003  }
3004
3005  /// Return true if the target has a vector blend instruction.
3006  virtual bool hasVectorBlend() const { return false; }
3007
3008  /// Get the maximum supported factor for interleaved memory accesses.
3009  /// Default to be the minimum interleave factor: 2.
3010  virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
3011
3012  /// Lower an interleaved load to target specific intrinsics. Return
3013  /// true on success.
3014  ///
3015  /// \p LI is the vector load instruction.
3016  /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
3017  /// \p Indices is the corresponding indices for each shufflevector.
3018  /// \p Factor is the interleave factor.
3019  virtual bool lowerInterleavedLoad(LoadInst *LI,
3020                                    ArrayRef<ShuffleVectorInst *> Shuffles,
3021                                    ArrayRef<unsigned> Indices,
3022                                    unsigned Factor) const {
3023    return false;
3024  }
3025
3026  /// Lower an interleaved store to target specific intrinsics. Return
3027  /// true on success.
3028  ///
3029  /// \p SI is the vector store instruction.
3030  /// \p SVI is the shufflevector to RE-interleave the stored vector.
3031  /// \p Factor is the interleave factor.
3032  virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
3033                                     unsigned Factor) const {
3034    return false;
3035  }
3036
3037  /// Lower a deinterleave intrinsic to a target specific load intrinsic.
3038  /// Return true on success. Currently only supports
3039  /// llvm.experimental.vector.deinterleave2
3040  ///
3041  /// \p DI is the deinterleave intrinsic.
3042  /// \p LI is the accompanying load instruction
3043  virtual bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI,
3044                                                LoadInst *LI) const {
3045    return false;
3046  }
3047
3048  /// Lower an interleave intrinsic to a target specific store intrinsic.
3049  /// Return true on success. Currently only supports
3050  /// llvm.experimental.vector.interleave2
3051  ///
3052  /// \p II is the interleave intrinsic.
3053  /// \p SI is the accompanying store instruction
3054  virtual bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II,
3055                                               StoreInst *SI) const {
3056    return false;
3057  }
3058
3059  /// Return true if an fpext operation is free (for instance, because
3060  /// single-precision floating-point numbers are implicitly extended to
3061  /// double-precision).
3062  virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
3063    assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&
3064           "invalid fpext types");
3065    return false;
3066  }
3067
3068  /// Return true if an fpext operation input to an \p Opcode operation is free
3069  /// (for instance, because half-precision floating-point numbers are
3070  /// implicitly extended to float-precision) for an FMA instruction.
3071  virtual bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode,
3072                               LLT DestTy, LLT SrcTy) const {
3073    return false;
3074  }
3075
3076  /// Return true if an fpext operation input to an \p Opcode operation is free
3077  /// (for instance, because half-precision floating-point numbers are
3078  /// implicitly extended to float-precision) for an FMA instruction.
3079  virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
3080                               EVT DestVT, EVT SrcVT) const {
3081    assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
3082           "invalid fpext types");
3083    return isFPExtFree(DestVT, SrcVT);
3084  }
3085
3086  /// Return true if folding a vector load into ExtVal (a sign, zero, or any
3087  /// extend node) is profitable.
3088  virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
3089
3090  /// Return true if an fneg operation is free to the point where it is never
3091  /// worthwhile to replace it with a bitwise operation.
3092  virtual bool isFNegFree(EVT VT) const {
3093    assert(VT.isFloatingPoint());
3094    return false;
3095  }
3096
3097  /// Return true if an fabs operation is free to the point where it is never
3098  /// worthwhile to replace it with a bitwise operation.
3099  virtual bool isFAbsFree(EVT VT) const {
3100    assert(VT.isFloatingPoint());
3101    return false;
3102  }
3103
3104  /// Return true if an FMA operation is faster than a pair of fmul and fadd
3105  /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
3106  /// returns true, otherwise fmuladd is expanded to fmul + fadd.
3107  ///
3108  /// NOTE: This may be called before legalization on types for which FMAs are
3109  /// not legal, but should return true if those types will eventually legalize
3110  /// to types that support FMAs. After legalization, it will only be called on
3111  /// types that support FMAs (via Legal or Custom actions)
3112  virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
3113                                          EVT) const {
3114    return false;
3115  }
3116
3117  /// Return true if an FMA operation is faster than a pair of fmul and fadd
3118  /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
3119  /// returns true, otherwise fmuladd is expanded to fmul + fadd.
3120  ///
3121  /// NOTE: This may be called before legalization on types for which FMAs are
3122  /// not legal, but should return true if those types will eventually legalize
3123  /// to types that support FMAs. After legalization, it will only be called on
3124  /// types that support FMAs (via Legal or Custom actions)
3125  virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
3126                                          LLT) const {
3127    return false;
3128  }
3129
3130  /// IR version
3131  virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const {
3132    return false;
3133  }
3134
3135  /// Returns true if \p MI can be combined with another instruction to
3136  /// form TargetOpcode::G_FMAD. \p N may be an TargetOpcode::G_FADD,
3137  /// TargetOpcode::G_FSUB, or an TargetOpcode::G_FMUL which will be
3138  /// distributed into an fadd/fsub.
3139  virtual bool isFMADLegal(const MachineInstr &MI, LLT Ty) const {
3140    assert((MI.getOpcode() == TargetOpcode::G_FADD ||
3141            MI.getOpcode() == TargetOpcode::G_FSUB ||
3142            MI.getOpcode() == TargetOpcode::G_FMUL) &&
3143           "unexpected node in FMAD forming combine");
3144    switch (Ty.getScalarSizeInBits()) {
3145    case 16:
3146      return isOperationLegal(TargetOpcode::G_FMAD, MVT::f16);
3147    case 32:
3148      return isOperationLegal(TargetOpcode::G_FMAD, MVT::f32);
3149    case 64:
3150      return isOperationLegal(TargetOpcode::G_FMAD, MVT::f64);
3151    default:
3152      break;
3153    }
3154
3155    return false;
3156  }
3157
3158  /// Returns true if be combined with to form an ISD::FMAD. \p N may be an
3159  /// ISD::FADD, ISD::FSUB, or an ISD::FMUL which will be distributed into an
3160  /// fadd/fsub.
3161  virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const {
3162    assert((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB ||
3163            N->getOpcode() == ISD::FMUL) &&
3164           "unexpected node in FMAD forming combine");
3165    return isOperationLegal(ISD::FMAD, N->getValueType(0));
3166  }
3167
3168  // Return true when the decision to generate FMA's (or FMS, FMLA etc) rather
3169  // than FMUL and ADD is delegated to the machine combiner.
3170  virtual bool generateFMAsInMachineCombiner(EVT VT,
3171                                             CodeGenOptLevel OptLevel) const {
3172    return false;
3173  }
3174
3175  /// Return true if it's profitable to narrow operations of type SrcVT to
3176  /// DestVT. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
3177  /// i32 to i16.
3178  virtual bool isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
3179    return false;
3180  }
3181
3182  /// Return true if pulling a binary operation into a select with an identity
3183  /// constant is profitable. This is the inverse of an IR transform.
3184  /// Example: X + (Cond ? Y : 0) --> Cond ? (X + Y) : X
3185  virtual bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
3186                                                    EVT VT) const {
3187    return false;
3188  }
3189
3190  /// Return true if it is beneficial to convert a load of a constant to
3191  /// just the constant itself.
3192  /// On some targets it might be more efficient to use a combination of
3193  /// arithmetic instructions to materialize the constant instead of loading it
3194  /// from a constant pool.
3195  virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
3196                                                 Type *Ty) const {
3197    return false;
3198  }
3199
3200  /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
3201  /// from this source type with this index. This is needed because
3202  /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
3203  /// the first element, and only the target knows which lowering is cheap.
3204  virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
3205                                       unsigned Index) const {
3206    return false;
3207  }
3208
3209  /// Try to convert an extract element of a vector binary operation into an
3210  /// extract element followed by a scalar operation.
3211  virtual bool shouldScalarizeBinop(SDValue VecOp) const {
3212    return false;
3213  }
3214
3215  /// Return true if extraction of a scalar element from the given vector type
3216  /// at the given index is cheap. For example, if scalar operations occur on
3217  /// the same register file as vector operations, then an extract element may
3218  /// be a sub-register rename rather than an actual instruction.
3219  virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const {
3220    return false;
3221  }
3222
3223  /// Try to convert math with an overflow comparison into the corresponding DAG
3224  /// node operation. Targets may want to override this independently of whether
3225  /// the operation is legal/custom for the given type because it may obscure
3226  /// matching of other patterns.
3227  virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
3228                                    bool MathUsed) const {
3229    // TODO: The default logic is inherited from code in CodeGenPrepare.
3230    // The opcode should not make a difference by default?
3231    if (Opcode != ISD::UADDO)
3232      return false;
3233
3234    // Allow the transform as long as we have an integer type that is not
3235    // obviously illegal and unsupported and if the math result is used
3236    // besides the overflow check. On some targets (e.g. SPARC), it is
3237    // not profitable to form on overflow op if the math result has no
3238    // concrete users.
3239    if (VT.isVector())
3240      return false;
3241    return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT));
3242  }
3243
3244  // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
3245  // even if the vector itself has multiple uses.
3246  virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
3247    return false;
3248  }
3249
3250  // Return true if CodeGenPrepare should consider splitting large offset of a
3251  // GEP to make the GEP fit into the addressing mode and can be sunk into the
3252  // same blocks of its users.
3253  virtual bool shouldConsiderGEPOffsetSplit() const { return false; }
3254
3255  /// Return true if creating a shift of the type by the given
3256  /// amount is not profitable.
3257  virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const {
3258    return false;
3259  }
3260
3261  // Should we fold (select_cc seteq (and x, y), 0, 0, A) -> (and (sra (shl x))
3262  // A) where y has a single bit set?
3263  virtual bool shouldFoldSelectWithSingleBitTest(EVT VT,
3264                                                 const APInt &AndMask) const {
3265    unsigned ShCt = AndMask.getBitWidth() - 1;
3266    return !shouldAvoidTransformToShift(VT, ShCt);
3267  }
3268
3269  /// Does this target require the clearing of high-order bits in a register
3270  /// passed to the fp16 to fp conversion library function.
3271  virtual bool shouldKeepZExtForFP16Conv() const { return false; }
3272
3273  /// Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT
3274  /// from min(max(fptoi)) saturation patterns.
3275  virtual bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const {
3276    return isOperationLegalOrCustom(Op, VT);
3277  }
3278
3279  /// Does this target support complex deinterleaving
3280  virtual bool isComplexDeinterleavingSupported() const { return false; }
3281
3282  /// Does this target support complex deinterleaving with the given operation
3283  /// and type
3284  virtual bool isComplexDeinterleavingOperationSupported(
3285      ComplexDeinterleavingOperation Operation, Type *Ty) const {
3286    return false;
3287  }
3288
3289  /// Create the IR node for the given complex deinterleaving operation.
3290  /// If one cannot be created using all the given inputs, nullptr should be
3291  /// returned.
3292  virtual Value *createComplexDeinterleavingIR(
3293      IRBuilderBase &B, ComplexDeinterleavingOperation OperationType,
3294      ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
3295      Value *Accumulator = nullptr) const {
3296    return nullptr;
3297  }
3298
3299  //===--------------------------------------------------------------------===//
3300  // Runtime Library hooks
3301  //
3302
3303  /// Rename the default libcall routine name for the specified libcall.
3304  void setLibcallName(RTLIB::Libcall Call, const char *Name) {
3305    LibcallRoutineNames[Call] = Name;
3306  }
3307  void setLibcallName(ArrayRef<RTLIB::Libcall> Calls, const char *Name) {
3308    for (auto Call : Calls)
3309      setLibcallName(Call, Name);
3310  }
3311
3312  /// Get the libcall routine name for the specified libcall.
3313  const char *getLibcallName(RTLIB::Libcall Call) const {
3314    return LibcallRoutineNames[Call];
3315  }
3316
3317  /// Override the default CondCode to be used to test the result of the
3318  /// comparison libcall against zero.
3319  void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
3320    CmpLibcallCCs[Call] = CC;
3321  }
3322
3323  /// Get the CondCode that's to be used to test the result of the comparison
3324  /// libcall against zero.
3325  ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
3326    return CmpLibcallCCs[Call];
3327  }
3328
3329  /// Set the CallingConv that should be used for the specified libcall.
3330  void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
3331    LibcallCallingConvs[Call] = CC;
3332  }
3333
3334  /// Get the CallingConv that should be used for the specified libcall.
3335  CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
3336    return LibcallCallingConvs[Call];
3337  }
3338
3339  /// Execute target specific actions to finalize target lowering.
3340  /// This is used to set extra flags in MachineFrameInformation and freezing
3341  /// the set of reserved registers.
3342  /// The default implementation just freezes the set of reserved registers.
3343  virtual void finalizeLowering(MachineFunction &MF) const;
3344
3345  //===----------------------------------------------------------------------===//
3346  //  GlobalISel Hooks
3347  //===----------------------------------------------------------------------===//
3348  /// Check whether or not \p MI needs to be moved close to its uses.
3349  virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const;
3350
3351
3352private:
3353  const TargetMachine &TM;
3354
3355  /// Tells the code generator that the target has multiple (allocatable)
3356  /// condition registers that can be used to store the results of comparisons
3357  /// for use by selects and conditional branches. With multiple condition
3358  /// registers, the code generator will not aggressively sink comparisons into
3359  /// the blocks of their users.
3360  bool HasMultipleConditionRegisters;
3361
3362  /// Tells the code generator that the target has BitExtract instructions.
3363  /// The code generator will aggressively sink "shift"s into the blocks of
3364  /// their users if the users will generate "and" instructions which can be
3365  /// combined with "shift" to BitExtract instructions.
3366  bool HasExtractBitsInsn;
3367
3368  /// Tells the code generator to bypass slow divide or remainder
3369  /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
3370  /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
3371  /// div/rem when the operands are positive and less than 256.
3372  DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
3373
3374  /// Tells the code generator that it shouldn't generate extra flow control
3375  /// instructions and should attempt to combine flow control instructions via
3376  /// predication.
3377  bool JumpIsExpensive;
3378
3379  /// Information about the contents of the high-bits in boolean values held in
3380  /// a type wider than i1. See getBooleanContents.
3381  BooleanContent BooleanContents;
3382
3383  /// Information about the contents of the high-bits in boolean values held in
3384  /// a type wider than i1. See getBooleanContents.
3385  BooleanContent BooleanFloatContents;
3386
3387  /// Information about the contents of the high-bits in boolean vector values
3388  /// when the element type is wider than i1. See getBooleanContents.
3389  BooleanContent BooleanVectorContents;
3390
3391  /// The target scheduling preference: shortest possible total cycles or lowest
3392  /// register usage.
3393  Sched::Preference SchedPreferenceInfo;
3394
3395  /// The minimum alignment that any argument on the stack needs to have.
3396  Align MinStackArgumentAlignment;
3397
3398  /// The minimum function alignment (used when optimizing for size, and to
3399  /// prevent explicitly provided alignment from leading to incorrect code).
3400  Align MinFunctionAlignment;
3401
3402  /// The preferred function alignment (used when alignment unspecified and
3403  /// optimizing for speed).
3404  Align PrefFunctionAlignment;
3405
3406  /// The preferred loop alignment (in log2 bot in bytes).
3407  Align PrefLoopAlignment;
3408  /// The maximum amount of bytes permitted to be emitted for alignment.
3409  unsigned MaxBytesForAlignment;
3410
3411  /// Size in bits of the maximum atomics size the backend supports.
3412  /// Accesses larger than this will be expanded by AtomicExpandPass.
3413  unsigned MaxAtomicSizeInBitsSupported;
3414
3415  /// Size in bits of the maximum div/rem size the backend supports.
3416  /// Larger operations will be expanded by ExpandLargeDivRem.
3417  unsigned MaxDivRemBitWidthSupported;
3418
3419  /// Size in bits of the maximum larget fp convert size the backend
3420  /// supports. Larger operations will be expanded by ExpandLargeFPConvert.
3421  unsigned MaxLargeFPConvertBitWidthSupported;
3422
3423  /// Size in bits of the minimum cmpxchg or ll/sc operation the
3424  /// backend supports.
3425  unsigned MinCmpXchgSizeInBits;
3426
3427  /// This indicates if the target supports unaligned atomic operations.
3428  bool SupportsUnalignedAtomics;
3429
3430  /// If set to a physical register, this specifies the register that
3431  /// llvm.savestack/llvm.restorestack should save and restore.
3432  Register StackPointerRegisterToSaveRestore;
3433
3434  /// This indicates the default register class to use for each ValueType the
3435  /// target supports natively.
3436  const TargetRegisterClass *RegClassForVT[MVT::VALUETYPE_SIZE];
3437  uint16_t NumRegistersForVT[MVT::VALUETYPE_SIZE];
3438  MVT RegisterTypeForVT[MVT::VALUETYPE_SIZE];
3439
3440  /// This indicates the "representative" register class to use for each
3441  /// ValueType the target supports natively. This information is used by the
3442  /// scheduler to track register pressure. By default, the representative
3443  /// register class is the largest legal super-reg register class of the
3444  /// register class of the specified type. e.g. On x86, i8, i16, and i32's
3445  /// representative class would be GR32.
3446  const TargetRegisterClass *RepRegClassForVT[MVT::VALUETYPE_SIZE] = {0};
3447
3448  /// This indicates the "cost" of the "representative" register class for each
3449  /// ValueType. The cost is used by the scheduler to approximate register
3450  /// pressure.
3451  uint8_t RepRegClassCostForVT[MVT::VALUETYPE_SIZE];
3452
3453  /// For any value types we are promoting or expanding, this contains the value
3454  /// type that we are changing to.  For Expanded types, this contains one step
3455  /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
3456  /// (e.g. i64 -> i16).  For types natively supported by the system, this holds
3457  /// the same type (e.g. i32 -> i32).
3458  MVT TransformToType[MVT::VALUETYPE_SIZE];
3459
3460  /// For each operation and each value type, keep a LegalizeAction that
3461  /// indicates how instruction selection should deal with the operation.  Most
3462  /// operations are Legal (aka, supported natively by the target), but
3463  /// operations that are not should be described.  Note that operations on
3464  /// non-legal value types are not described here.
3465  LegalizeAction OpActions[MVT::VALUETYPE_SIZE][ISD::BUILTIN_OP_END];
3466
3467  /// For each load extension type and each value type, keep a LegalizeAction
3468  /// that indicates how instruction selection should deal with a load of a
3469  /// specific value type and extension type. Uses 4-bits to store the action
3470  /// for each of the 4 load ext types.
3471  uint16_t LoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];
3472
3473  /// For each value type pair keep a LegalizeAction that indicates whether a
3474  /// truncating store of a specific value type and truncating type is legal.
3475  LegalizeAction TruncStoreActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];
3476
3477  /// For each indexed mode and each value type, keep a quad of LegalizeAction
3478  /// that indicates how instruction selection should deal with the load /
3479  /// store / maskedload / maskedstore.
3480  ///
3481  /// The first dimension is the value_type for the reference. The second
3482  /// dimension represents the various modes for load store.
3483  uint16_t IndexedModeActions[MVT::VALUETYPE_SIZE][ISD::LAST_INDEXED_MODE];
3484
3485  /// For each condition code (ISD::CondCode) keep a LegalizeAction that
3486  /// indicates how instruction selection should deal with the condition code.
3487  ///
3488  /// Because each CC action takes up 4 bits, we need to have the array size be
3489  /// large enough to fit all of the value types. This can be done by rounding
3490  /// up the MVT::VALUETYPE_SIZE value to the next multiple of 8.
3491  uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::VALUETYPE_SIZE + 7) / 8];
3492
3493  ValueTypeActionImpl ValueTypeActions;
3494
3495private:
3496  /// Targets can specify ISD nodes that they would like PerformDAGCombine
3497  /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
3498  /// array.
3499  unsigned char
3500  TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
3501
3502  /// For operations that must be promoted to a specific type, this holds the
3503  /// destination type.  This map should be sparse, so don't hold it as an
3504  /// array.
3505  ///
3506  /// Targets add entries to this map with AddPromotedToType(..), clients access
3507  /// this with getTypeToPromoteTo(..).
3508  std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
3509    PromoteToType;
3510
3511  /// Stores the name each libcall.
3512  const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1];
3513
3514  /// The ISD::CondCode that should be used to test the result of each of the
3515  /// comparison libcall against zero.
3516  ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
3517
3518  /// Stores the CallingConv that should be used for each libcall.
3519  CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
3520
3521  /// Set default libcall names and calling conventions.
3522  void InitLibcalls(const Triple &TT);
3523
3524  /// The bits of IndexedModeActions used to store the legalisation actions
3525  /// We store the data as   | ML | MS |  L |  S | each taking 4 bits.
3526  enum IndexedModeActionsBits {
3527    IMAB_Store = 0,
3528    IMAB_Load = 4,
3529    IMAB_MaskedStore = 8,
3530    IMAB_MaskedLoad = 12
3531  };
3532
3533  void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift,
3534                            LegalizeAction Action) {
3535    assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
3536           (unsigned)Action < 0xf && "Table isn't big enough!");
3537    unsigned Ty = (unsigned)VT.SimpleTy;
3538    IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift);
3539    IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift;
3540  }
3541
3542  LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT,
3543                                      unsigned Shift) const {
3544    assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
3545           "Table isn't big enough!");
3546    unsigned Ty = (unsigned)VT.SimpleTy;
3547    return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf);
3548  }
3549
3550protected:
3551  /// Return true if the extension represented by \p I is free.
3552  /// \pre \p I is a sign, zero, or fp extension and
3553  ///      is[Z|FP]ExtFree of the related types is not true.
3554  virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
3555
3556  /// Depth that GatherAllAliases should continue looking for chain
3557  /// dependencies when trying to find a more preferable chain. As an
3558  /// approximation, this should be more than the number of consecutive stores
3559  /// expected to be merged.
3560  unsigned GatherAllAliasesMaxDepth;
3561
3562  /// \brief Specify maximum number of store instructions per memset call.
3563  ///
3564  /// When lowering \@llvm.memset this field specifies the maximum number of
3565  /// store operations that may be substituted for the call to memset. Targets
3566  /// must set this value based on the cost threshold for that target. Targets
3567  /// should assume that the memset will be done using as many of the largest
3568  /// store operations first, followed by smaller ones, if necessary, per
3569  /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
3570  /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
3571  /// store.  This only applies to setting a constant array of a constant size.
3572  unsigned MaxStoresPerMemset;
3573  /// Likewise for functions with the OptSize attribute.
3574  unsigned MaxStoresPerMemsetOptSize;
3575
3576  /// \brief Specify maximum number of store instructions per memcpy call.
3577  ///
3578  /// When lowering \@llvm.memcpy this field specifies the maximum number of
3579  /// store operations that may be substituted for a call to memcpy. Targets
3580  /// must set this value based on the cost threshold for that target. Targets
3581  /// should assume that the memcpy will be done using as many of the largest
3582  /// store operations first, followed by smaller ones, if necessary, per
3583  /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
3584  /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
3585  /// and one 1-byte store. This only applies to copying a constant array of
3586  /// constant size.
3587  unsigned MaxStoresPerMemcpy;
3588  /// Likewise for functions with the OptSize attribute.
3589  unsigned MaxStoresPerMemcpyOptSize;
3590  /// \brief Specify max number of store instructions to glue in inlined memcpy.
3591  ///
3592  /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number
3593  /// of store instructions to keep together. This helps in pairing and
3594  //  vectorization later on.
3595  unsigned MaxGluedStoresPerMemcpy = 0;
3596
3597  /// \brief Specify maximum number of load instructions per memcmp call.
3598  ///
3599  /// When lowering \@llvm.memcmp this field specifies the maximum number of
3600  /// pairs of load operations that may be substituted for a call to memcmp.
3601  /// Targets must set this value based on the cost threshold for that target.
3602  /// Targets should assume that the memcmp will be done using as many of the
3603  /// largest load operations first, followed by smaller ones, if necessary, per
3604  /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine
3605  /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load
3606  /// and one 1-byte load. This only applies to copying a constant array of
3607  /// constant size.
3608  unsigned MaxLoadsPerMemcmp;
3609  /// Likewise for functions with the OptSize attribute.
3610  unsigned MaxLoadsPerMemcmpOptSize;
3611
3612  /// \brief Specify maximum number of store instructions per memmove call.
3613  ///
3614  /// When lowering \@llvm.memmove this field specifies the maximum number of
3615  /// store instructions that may be substituted for a call to memmove. Targets
3616  /// must set this value based on the cost threshold for that target. Targets
3617  /// should assume that the memmove will be done using as many of the largest
3618  /// store operations first, followed by smaller ones, if necessary, per
3619  /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
3620  /// with 8-bit alignment would result in nine 1-byte stores.  This only
3621  /// applies to copying a constant array of constant size.
3622  unsigned MaxStoresPerMemmove;
3623  /// Likewise for functions with the OptSize attribute.
3624  unsigned MaxStoresPerMemmoveOptSize;
3625
3626  /// Tells the code generator that select is more expensive than a branch if
3627  /// the branch is usually predicted right.
3628  bool PredictableSelectIsExpensive;
3629
3630  /// \see enableExtLdPromotion.
3631  bool EnableExtLdPromotion;
3632
3633  /// Return true if the value types that can be represented by the specified
3634  /// register class are all legal.
3635  bool isLegalRC(const TargetRegisterInfo &TRI,
3636                 const TargetRegisterClass &RC) const;
3637
3638  /// Replace/modify any TargetFrameIndex operands with a targte-dependent
3639  /// sequence of memory operands that is recognized by PrologEpilogInserter.
3640  MachineBasicBlock *emitPatchPoint(MachineInstr &MI,
3641                                    MachineBasicBlock *MBB) const;
3642
3643  bool IsStrictFPEnabled;
3644};
3645
3646/// This class defines information used to lower LLVM code to legal SelectionDAG
3647/// operators that the target instruction selector can accept natively.
3648///
3649/// This class also defines callbacks that targets must implement to lower
3650/// target-specific constructs to SelectionDAG operators.
3651class TargetLowering : public TargetLoweringBase {
3652public:
3653  struct DAGCombinerInfo;
3654  struct MakeLibCallOptions;
3655
3656  TargetLowering(const TargetLowering &) = delete;
3657  TargetLowering &operator=(const TargetLowering &) = delete;
3658
3659  explicit TargetLowering(const TargetMachine &TM);
3660
3661  bool isPositionIndependent() const;
3662
3663  virtual bool isSDNodeSourceOfDivergence(const SDNode *N,
3664                                          FunctionLoweringInfo *FLI,
3665                                          UniformityInfo *UA) const {
3666    return false;
3667  }
3668
3669  // Lets target to control the following reassociation of operands: (op (op x,
3670  // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
3671  // default consider profitable any case where N0 has single use.  This
3672  // behavior reflects the condition replaced by this target hook call in the
3673  // DAGCombiner.  Any particular target can implement its own heuristic to
3674  // restrict common combiner.
3675  virtual bool isReassocProfitable(SelectionDAG &DAG, SDValue N0,
3676                                   SDValue N1) const {
3677    return N0.hasOneUse();
3678  }
3679
3680  // Lets target to control the following reassociation of operands: (op (op x,
3681  // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
3682  // default consider profitable any case where N0 has single use.  This
3683  // behavior reflects the condition replaced by this target hook call in the
3684  // combiner.  Any particular target can implement its own heuristic to
3685  // restrict common combiner.
3686  virtual bool isReassocProfitable(MachineRegisterInfo &MRI, Register N0,
3687                                   Register N1) const {
3688    return MRI.hasOneNonDBGUse(N0);
3689  }
3690
3691  virtual bool isSDNodeAlwaysUniform(const SDNode * N) const {
3692    return false;
3693  }
3694
3695  /// Returns true by value, base pointer and offset pointer and addressing mode
3696  /// by reference if the node's address can be legally represented as
3697  /// pre-indexed load / store address.
3698  virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
3699                                         SDValue &/*Offset*/,
3700                                         ISD::MemIndexedMode &/*AM*/,
3701                                         SelectionDAG &/*DAG*/) const {
3702    return false;
3703  }
3704
3705  /// Returns true by value, base pointer and offset pointer and addressing mode
3706  /// by reference if this node can be combined with a load / store to form a
3707  /// post-indexed load / store.
3708  virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
3709                                          SDValue &/*Base*/,
3710                                          SDValue &/*Offset*/,
3711                                          ISD::MemIndexedMode &/*AM*/,
3712                                          SelectionDAG &/*DAG*/) const {
3713    return false;
3714  }
3715
3716  /// Returns true if the specified base+offset is a legal indexed addressing
3717  /// mode for this target. \p MI is the load or store instruction that is being
3718  /// considered for transformation.
3719  virtual bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset,
3720                               bool IsPre, MachineRegisterInfo &MRI) const {
3721    return false;
3722  }
3723
3724  /// Return the entry encoding for a jump table in the current function.  The
3725  /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
3726  virtual unsigned getJumpTableEncoding() const;
3727
3728  virtual const MCExpr *
3729  LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
3730                            const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
3731                            MCContext &/*Ctx*/) const {
3732    llvm_unreachable("Need to implement this hook if target has custom JTIs");
3733  }
3734
3735  /// Returns relocation base for the given PIC jumptable.
3736  virtual SDValue getPICJumpTableRelocBase(SDValue Table,
3737                                           SelectionDAG &DAG) const;
3738
3739  /// This returns the relocation base for the given PIC jumptable, the same as
3740  /// getPICJumpTableRelocBase, but as an MCExpr.
3741  virtual const MCExpr *
3742  getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
3743                               unsigned JTI, MCContext &Ctx) const;
3744
3745  /// Return true if folding a constant offset with the given GlobalAddress is
3746  /// legal.  It is frequently not legal in PIC relocation models.
3747  virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
3748
3749  /// On x86, return true if the operand with index OpNo is a CALL or JUMP
3750  /// instruction, which can use either a memory constraint or an address
3751  /// constraint. -fasm-blocks "__asm call foo" lowers to
3752  /// call void asm sideeffect inteldialect "call ${0:P}", "*m..."
3753  ///
3754  /// This function is used by a hack to choose the address constraint,
3755  /// lowering to a direct call.
3756  virtual bool
3757  isInlineAsmTargetBranch(const SmallVectorImpl<StringRef> &AsmStrs,
3758                          unsigned OpNo) const {
3759    return false;
3760  }
3761
3762  bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
3763                            SDValue &Chain) const;
3764
3765  void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3766                           SDValue &NewRHS, ISD::CondCode &CCCode,
3767                           const SDLoc &DL, const SDValue OldLHS,
3768                           const SDValue OldRHS) const;
3769
3770  void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3771                           SDValue &NewRHS, ISD::CondCode &CCCode,
3772                           const SDLoc &DL, const SDValue OldLHS,
3773                           const SDValue OldRHS, SDValue &Chain,
3774                           bool IsSignaling = false) const;
3775
3776  /// Returns a pair of (return value, chain).
3777  /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
3778  std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
3779                                          EVT RetVT, ArrayRef<SDValue> Ops,
3780                                          MakeLibCallOptions CallOptions,
3781                                          const SDLoc &dl,
3782                                          SDValue Chain = SDValue()) const;
3783
3784  /// Check whether parameters to a call that are passed in callee saved
3785  /// registers are the same as from the calling function.  This needs to be
3786  /// checked for tail call eligibility.
3787  bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
3788      const uint32_t *CallerPreservedMask,
3789      const SmallVectorImpl<CCValAssign> &ArgLocs,
3790      const SmallVectorImpl<SDValue> &OutVals) const;
3791
3792  //===--------------------------------------------------------------------===//
3793  // TargetLowering Optimization Methods
3794  //
3795
3796  /// A convenience struct that encapsulates a DAG, and two SDValues for
3797  /// returning information from TargetLowering to its clients that want to
3798  /// combine.
3799  struct TargetLoweringOpt {
3800    SelectionDAG &DAG;
3801    bool LegalTys;
3802    bool LegalOps;
3803    SDValue Old;
3804    SDValue New;
3805
3806    explicit TargetLoweringOpt(SelectionDAG &InDAG,
3807                               bool LT, bool LO) :
3808      DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
3809
3810    bool LegalTypes() const { return LegalTys; }
3811    bool LegalOperations() const { return LegalOps; }
3812
3813    bool CombineTo(SDValue O, SDValue N) {
3814      Old = O;
3815      New = N;
3816      return true;
3817    }
3818  };
3819
3820  /// Determines the optimal series of memory ops to replace the memset / memcpy.
3821  /// Return true if the number of memory ops is below the threshold (Limit).
3822  /// Note that this is always the case when Limit is ~0.
3823  /// It returns the types of the sequence of memory ops to perform
3824  /// memset / memcpy by reference.
3825  virtual bool
3826  findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit,
3827                           const MemOp &Op, unsigned DstAS, unsigned SrcAS,
3828                           const AttributeList &FuncAttributes) const;
3829
3830  /// Check to see if the specified operand of the specified instruction is a
3831  /// constant integer.  If so, check to see if there are any bits set in the
3832  /// constant that are not demanded.  If so, shrink the constant and return
3833  /// true.
3834  bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
3835                              const APInt &DemandedElts,
3836                              TargetLoweringOpt &TLO) const;
3837
3838  /// Helper wrapper around ShrinkDemandedConstant, demanding all elements.
3839  bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
3840                              TargetLoweringOpt &TLO) const;
3841
3842  // Target hook to do target-specific const optimization, which is called by
3843  // ShrinkDemandedConstant. This function should return true if the target
3844  // doesn't want ShrinkDemandedConstant to further optimize the constant.
3845  virtual bool targetShrinkDemandedConstant(SDValue Op,
3846                                            const APInt &DemandedBits,
3847                                            const APInt &DemandedElts,
3848                                            TargetLoweringOpt &TLO) const {
3849    return false;
3850  }
3851
3852  /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
3853  /// This uses isTruncateFree/isZExtFree and ANY_EXTEND for the widening cast,
3854  /// but it could be generalized for targets with other types of implicit
3855  /// widening casts.
3856  bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth,
3857                        const APInt &DemandedBits,
3858                        TargetLoweringOpt &TLO) const;
3859
3860  /// Look at Op.  At this point, we know that only the DemandedBits bits of the
3861  /// result of Op are ever used downstream.  If we can use this information to
3862  /// simplify Op, create a new simplified DAG node and return true, returning
3863  /// the original and new nodes in Old and New.  Otherwise, analyze the
3864  /// expression and return a mask of KnownOne and KnownZero bits for the
3865  /// expression (used to simplify the caller).  The KnownZero/One bits may only
3866  /// be accurate for those bits in the Demanded masks.
3867  /// \p AssumeSingleUse When this parameter is true, this function will
3868  ///    attempt to simplify \p Op even if there are multiple uses.
3869  ///    Callers are responsible for correctly updating the DAG based on the
3870  ///    results of this function, because simply replacing TLO.Old
3871  ///    with TLO.New will be incorrect when this parameter is true and TLO.Old
3872  ///    has multiple uses.
3873  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
3874                            const APInt &DemandedElts, KnownBits &Known,
3875                            TargetLoweringOpt &TLO, unsigned Depth = 0,
3876                            bool AssumeSingleUse = false) const;
3877
3878  /// Helper wrapper around SimplifyDemandedBits, demanding all elements.
3879  /// Adds Op back to the worklist upon success.
3880  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
3881                            KnownBits &Known, TargetLoweringOpt &TLO,
3882                            unsigned Depth = 0,
3883                            bool AssumeSingleUse = false) const;
3884
3885  /// Helper wrapper around SimplifyDemandedBits.
3886  /// Adds Op back to the worklist upon success.
3887  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
3888                            DAGCombinerInfo &DCI) const;
3889
3890  /// Helper wrapper around SimplifyDemandedBits.
3891  /// Adds Op back to the worklist upon success.
3892  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
3893                            const APInt &DemandedElts,
3894                            DAGCombinerInfo &DCI) const;
3895
3896  /// More limited version of SimplifyDemandedBits that can be used to "look
3897  /// through" ops that don't contribute to the DemandedBits/DemandedElts -
3898  /// bitwise ops etc.
3899  SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits,
3900                                          const APInt &DemandedElts,
3901                                          SelectionDAG &DAG,
3902                                          unsigned Depth = 0) const;
3903
3904  /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
3905  /// elements.
3906  SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits,
3907                                          SelectionDAG &DAG,
3908                                          unsigned Depth = 0) const;
3909
3910  /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
3911  /// bits from only some vector elements.
3912  SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op,
3913                                                const APInt &DemandedElts,
3914                                                SelectionDAG &DAG,
3915                                                unsigned Depth = 0) const;
3916
3917  /// Look at Vector Op. At this point, we know that only the DemandedElts
3918  /// elements of the result of Op are ever used downstream.  If we can use
3919  /// this information to simplify Op, create a new simplified DAG node and
3920  /// return true, storing the original and new nodes in TLO.
3921  /// Otherwise, analyze the expression and return a mask of KnownUndef and
3922  /// KnownZero elements for the expression (used to simplify the caller).
3923  /// The KnownUndef/Zero elements may only be accurate for those bits
3924  /// in the DemandedMask.
3925  /// \p AssumeSingleUse When this parameter is true, this function will
3926  ///    attempt to simplify \p Op even if there are multiple uses.
3927  ///    Callers are responsible for correctly updating the DAG based on the
3928  ///    results of this function, because simply replacing TLO.Old
3929  ///    with TLO.New will be incorrect when this parameter is true and TLO.Old
3930  ///    has multiple uses.
3931  bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask,
3932                                  APInt &KnownUndef, APInt &KnownZero,
3933                                  TargetLoweringOpt &TLO, unsigned Depth = 0,
3934                                  bool AssumeSingleUse = false) const;
3935
3936  /// Helper wrapper around SimplifyDemandedVectorElts.
3937  /// Adds Op back to the worklist upon success.
3938  bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
3939                                  DAGCombinerInfo &DCI) const;
3940
3941  /// Return true if the target supports simplifying demanded vector elements by
3942  /// converting them to undefs.
3943  virtual bool
3944  shouldSimplifyDemandedVectorElts(SDValue Op,
3945                                   const TargetLoweringOpt &TLO) const {
3946    return true;
3947  }
3948
3949  /// Determine which of the bits specified in Mask are known to be either zero
3950  /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
3951  /// argument allows us to only collect the known bits that are shared by the
3952  /// requested vector elements.
3953  virtual void computeKnownBitsForTargetNode(const SDValue Op,
3954                                             KnownBits &Known,
3955                                             const APInt &DemandedElts,
3956                                             const SelectionDAG &DAG,
3957                                             unsigned Depth = 0) const;
3958
3959  /// Determine which of the bits specified in Mask are known to be either zero
3960  /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
3961  /// argument allows us to only collect the known bits that are shared by the
3962  /// requested vector elements. This is for GISel.
3963  virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis,
3964                                              Register R, KnownBits &Known,
3965                                              const APInt &DemandedElts,
3966                                              const MachineRegisterInfo &MRI,
3967                                              unsigned Depth = 0) const;
3968
3969  /// Determine the known alignment for the pointer value \p R. This is can
3970  /// typically be inferred from the number of low known 0 bits. However, for a
3971  /// pointer with a non-integral address space, the alignment value may be
3972  /// independent from the known low bits.
3973  virtual Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis,
3974                                                Register R,
3975                                                const MachineRegisterInfo &MRI,
3976                                                unsigned Depth = 0) const;
3977
3978  /// Determine which of the bits of FrameIndex \p FIOp are known to be 0.
3979  /// Default implementation computes low bits based on alignment
3980  /// information. This should preserve known bits passed into it.
3981  virtual void computeKnownBitsForFrameIndex(int FIOp,
3982                                             KnownBits &Known,
3983                                             const MachineFunction &MF) const;
3984
3985  /// This method can be implemented by targets that want to expose additional
3986  /// information about sign bits to the DAG Combiner. The DemandedElts
3987  /// argument allows us to only collect the minimum sign bits that are shared
3988  /// by the requested vector elements.
3989  virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
3990                                                   const APInt &DemandedElts,
3991                                                   const SelectionDAG &DAG,
3992                                                   unsigned Depth = 0) const;
3993
3994  /// This method can be implemented by targets that want to expose additional
3995  /// information about sign bits to GlobalISel combiners. The DemandedElts
3996  /// argument allows us to only collect the minimum sign bits that are shared
3997  /// by the requested vector elements.
3998  virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis,
3999                                                    Register R,
4000                                                    const APInt &DemandedElts,
4001                                                    const MachineRegisterInfo &MRI,
4002                                                    unsigned Depth = 0) const;
4003
4004  /// Attempt to simplify any target nodes based on the demanded vector
4005  /// elements, returning true on success. Otherwise, analyze the expression and
4006  /// return a mask of KnownUndef and KnownZero elements for the expression
4007  /// (used to simplify the caller). The KnownUndef/Zero elements may only be
4008  /// accurate for those bits in the DemandedMask.
4009  virtual bool SimplifyDemandedVectorEltsForTargetNode(
4010      SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
4011      APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;
4012
4013  /// Attempt to simplify any target nodes based on the demanded bits/elts,
4014  /// returning true on success. Otherwise, analyze the
4015  /// expression and return a mask of KnownOne and KnownZero bits for the
4016  /// expression (used to simplify the caller).  The KnownZero/One bits may only
4017  /// be accurate for those bits in the Demanded masks.
4018  virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op,
4019                                                 const APInt &DemandedBits,
4020                                                 const APInt &DemandedElts,
4021                                                 KnownBits &Known,
4022                                                 TargetLoweringOpt &TLO,
4023                                                 unsigned Depth = 0) const;
4024
4025  /// More limited version of SimplifyDemandedBits that can be used to "look
4026  /// through" ops that don't contribute to the DemandedBits/DemandedElts -
4027  /// bitwise ops etc.
4028  virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode(
4029      SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
4030      SelectionDAG &DAG, unsigned Depth) const;
4031
4032  /// Return true if this function can prove that \p Op is never poison
4033  /// and, if \p PoisonOnly is false, does not have undef bits. The DemandedElts
4034  /// argument limits the check to the requested vector elements.
4035  virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(
4036      SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
4037      bool PoisonOnly, unsigned Depth) const;
4038
4039  /// Return true if Op can create undef or poison from non-undef & non-poison
4040  /// operands. The DemandedElts argument limits the check to the requested
4041  /// vector elements.
4042  virtual bool
4043  canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts,
4044                                      const SelectionDAG &DAG, bool PoisonOnly,
4045                                      bool ConsiderFlags, unsigned Depth) const;
4046
4047  /// Tries to build a legal vector shuffle using the provided parameters
4048  /// or equivalent variations. The Mask argument maybe be modified as the
4049  /// function tries different variations.
4050  /// Returns an empty SDValue if the operation fails.
4051  SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0,
4052                                  SDValue N1, MutableArrayRef<int> Mask,
4053                                  SelectionDAG &DAG) const;
4054
4055  /// This method returns the constant pool value that will be loaded by LD.
4056  /// NOTE: You must check for implicit extensions of the constant by LD.
4057  virtual const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const;
4058
4059  /// If \p SNaN is false, \returns true if \p Op is known to never be any
4060  /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling
4061  /// NaN.
4062  virtual bool isKnownNeverNaNForTargetNode(SDValue Op,
4063                                            const SelectionDAG &DAG,
4064                                            bool SNaN = false,
4065                                            unsigned Depth = 0) const;
4066
4067  /// Return true if vector \p Op has the same value across all \p DemandedElts,
4068  /// indicating any elements which may be undef in the output \p UndefElts.
4069  virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts,
4070                                         APInt &UndefElts,
4071                                         const SelectionDAG &DAG,
4072                                         unsigned Depth = 0) const;
4073
4074  /// Returns true if the given Opc is considered a canonical constant for the
4075  /// target, which should not be transformed back into a BUILD_VECTOR.
4076  virtual bool isTargetCanonicalConstantNode(SDValue Op) const {
4077    return Op.getOpcode() == ISD::SPLAT_VECTOR ||
4078           Op.getOpcode() == ISD::SPLAT_VECTOR_PARTS;
4079  }
4080
4081  struct DAGCombinerInfo {
4082    void *DC;  // The DAG Combiner object.
4083    CombineLevel Level;
4084    bool CalledByLegalizer;
4085
4086  public:
4087    SelectionDAG &DAG;
4088
4089    DAGCombinerInfo(SelectionDAG &dag, CombineLevel level,  bool cl, void *dc)
4090      : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
4091
4092    bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
4093    bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
4094    bool isAfterLegalizeDAG() const { return Level >= AfterLegalizeDAG; }
4095    CombineLevel getDAGCombineLevel() { return Level; }
4096    bool isCalledByLegalizer() const { return CalledByLegalizer; }
4097
4098    void AddToWorklist(SDNode *N);
4099    SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
4100    SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
4101    SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
4102
4103    bool recursivelyDeleteUnusedNodes(SDNode *N);
4104
4105    void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
4106  };
4107
4108  /// Return if the N is a constant or constant vector equal to the true value
4109  /// from getBooleanContents().
4110  bool isConstTrueVal(SDValue N) const;
4111
4112  /// Return if the N is a constant or constant vector equal to the false value
4113  /// from getBooleanContents().
4114  bool isConstFalseVal(SDValue N) const;
4115
4116  /// Return if \p N is a True value when extended to \p VT.
4117  bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const;
4118
4119  /// Try to simplify a setcc built with the specified operands and cc. If it is
4120  /// unable to simplify it, return a null SDValue.
4121  SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
4122                        bool foldBooleans, DAGCombinerInfo &DCI,
4123                        const SDLoc &dl) const;
4124
4125  // For targets which wrap address, unwrap for analysis.
4126  virtual SDValue unwrapAddress(SDValue N) const { return N; }
4127
4128  /// Returns true (and the GlobalValue and the offset) if the node is a
4129  /// GlobalAddress + offset.
4130  virtual bool
4131  isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
4132
4133  /// This method will be invoked for all target nodes and for any
4134  /// target-independent nodes that the target has registered with invoke it
4135  /// for.
4136  ///
4137  /// The semantics are as follows:
4138  /// Return Value:
4139  ///   SDValue.Val == 0   - No change was made
4140  ///   SDValue.Val == N   - N was replaced, is dead, and is already handled.
4141  ///   otherwise          - N should be replaced by the returned Operand.
4142  ///
4143  /// In addition, methods provided by DAGCombinerInfo may be used to perform
4144  /// more complex transformations.
4145  ///
4146  virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
4147
4148  /// Return true if it is profitable to move this shift by a constant amount
4149  /// through its operand, adjusting any immediate operands as necessary to
4150  /// preserve semantics. This transformation may not be desirable if it
4151  /// disrupts a particularly auspicious target-specific tree (e.g. bitfield
4152  /// extraction in AArch64). By default, it returns true.
4153  ///
4154  /// @param N the shift node
4155  /// @param Level the current DAGCombine legalization level.
4156  virtual bool isDesirableToCommuteWithShift(const SDNode *N,
4157                                             CombineLevel Level) const {
4158    return true;
4159  }
4160
4161  /// GlobalISel - return true if it is profitable to move this shift by a
4162  /// constant amount through its operand, adjusting any immediate operands as
4163  /// necessary to preserve semantics. This transformation may not be desirable
4164  /// if it disrupts a particularly auspicious target-specific tree (e.g.
4165  /// bitfield extraction in AArch64). By default, it returns true.
4166  ///
4167  /// @param MI the shift instruction
4168  /// @param IsAfterLegal true if running after legalization.
4169  virtual bool isDesirableToCommuteWithShift(const MachineInstr &MI,
4170                                             bool IsAfterLegal) const {
4171    return true;
4172  }
4173
4174  /// GlobalISel - return true if it's profitable to perform the combine:
4175  /// shl ([sza]ext x), y => zext (shl x, y)
4176  virtual bool isDesirableToPullExtFromShl(const MachineInstr &MI) const {
4177    return true;
4178  }
4179
4180  // Return AndOrSETCCFoldKind::{AddAnd, ABS} if its desirable to try and
4181  // optimize LogicOp(SETCC0, SETCC1). An example (what is implemented as of
4182  // writing this) is:
4183  //    With C as a power of 2 and C != 0 and C != INT_MIN:
4184  //    AddAnd:
4185  //     (icmp eq A, C) | (icmp eq A, -C)
4186  //            -> (icmp eq and(add(A, C), ~(C + C)), 0)
4187  //     (icmp ne A, C) & (icmp ne A, -C)w
4188  //            -> (icmp ne and(add(A, C), ~(C + C)), 0)
4189  //    ABS:
4190  //     (icmp eq A, C) | (icmp eq A, -C)
4191  //            -> (icmp eq Abs(A), C)
4192  //     (icmp ne A, C) & (icmp ne A, -C)w
4193  //            -> (icmp ne Abs(A), C)
4194  //
4195  // @param LogicOp the logic op
4196  // @param SETCC0 the first of the SETCC nodes
4197  // @param SETCC0 the second of the SETCC nodes
4198  virtual AndOrSETCCFoldKind isDesirableToCombineLogicOpOfSETCC(
4199      const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const {
4200    return AndOrSETCCFoldKind::None;
4201  }
4202
4203  /// Return true if it is profitable to combine an XOR of a logical shift
4204  /// to create a logical shift of NOT. This transformation may not be desirable
4205  /// if it disrupts a particularly auspicious target-specific tree (e.g.
4206  /// BIC on ARM/AArch64). By default, it returns true.
4207  virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const {
4208    return true;
4209  }
4210
4211  /// Return true if the target has native support for the specified value type
4212  /// and it is 'desirable' to use the type for the given node type. e.g. On x86
4213  /// i16 is legal, but undesirable since i16 instruction encodings are longer
4214  /// and some i16 instructions are slow.
4215  virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
4216    // By default, assume all legal types are desirable.
4217    return isTypeLegal(VT);
4218  }
4219
4220  /// Return true if it is profitable for dag combiner to transform a floating
4221  /// point op of specified opcode to a equivalent op of an integer
4222  /// type. e.g. f32 load -> i32 load can be profitable on ARM.
4223  virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
4224                                                 EVT /*VT*/) const {
4225    return false;
4226  }
4227
4228  /// This method query the target whether it is beneficial for dag combiner to
4229  /// promote the specified node. If true, it should return the desired
4230  /// promotion type by reference.
4231  virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
4232    return false;
4233  }
4234
4235  /// Return true if the target supports swifterror attribute. It optimizes
4236  /// loads and stores to reading and writing a specific register.
4237  virtual bool supportSwiftError() const {
4238    return false;
4239  }
4240
4241  /// Return true if the target supports that a subset of CSRs for the given
4242  /// machine function is handled explicitly via copies.
4243  virtual bool supportSplitCSR(MachineFunction *MF) const {
4244    return false;
4245  }
4246
4247  /// Return true if the target supports kcfi operand bundles.
4248  virtual bool supportKCFIBundles() const { return false; }
4249
4250  /// Perform necessary initialization to handle a subset of CSRs explicitly
4251  /// via copies. This function is called at the beginning of instruction
4252  /// selection.
4253  virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
4254    llvm_unreachable("Not Implemented");
4255  }
4256
4257  /// Insert explicit copies in entry and exit blocks. We copy a subset of
4258  /// CSRs to virtual registers in the entry block, and copy them back to
4259  /// physical registers in the exit blocks. This function is called at the end
4260  /// of instruction selection.
4261  virtual void insertCopiesSplitCSR(
4262      MachineBasicBlock *Entry,
4263      const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
4264    llvm_unreachable("Not Implemented");
4265  }
4266
4267  /// Return the newly negated expression if the cost is not expensive and
4268  /// set the cost in \p Cost to indicate that if it is cheaper or neutral to
4269  /// do the negation.
4270  virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG,
4271                                       bool LegalOps, bool OptForSize,
4272                                       NegatibleCost &Cost,
4273                                       unsigned Depth = 0) const;
4274
4275  SDValue getCheaperOrNeutralNegatedExpression(
4276      SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize,
4277      const NegatibleCost CostThreshold = NegatibleCost::Neutral,
4278      unsigned Depth = 0) const {
4279    NegatibleCost Cost = NegatibleCost::Expensive;
4280    SDValue Neg =
4281        getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
4282    if (!Neg)
4283      return SDValue();
4284
4285    if (Cost <= CostThreshold)
4286      return Neg;
4287
4288    // Remove the new created node to avoid the side effect to the DAG.
4289    if (Neg->use_empty())
4290      DAG.RemoveDeadNode(Neg.getNode());
4291    return SDValue();
4292  }
4293
4294  /// This is the helper function to return the newly negated expression only
4295  /// when the cost is cheaper.
4296  SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG,
4297                                      bool LegalOps, bool OptForSize,
4298                                      unsigned Depth = 0) const {
4299    return getCheaperOrNeutralNegatedExpression(Op, DAG, LegalOps, OptForSize,
4300                                                NegatibleCost::Cheaper, Depth);
4301  }
4302
4303  /// This is the helper function to return the newly negated expression if
4304  /// the cost is not expensive.
4305  SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps,
4306                               bool OptForSize, unsigned Depth = 0) const {
4307    NegatibleCost Cost = NegatibleCost::Expensive;
4308    return getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
4309  }
4310
4311  //===--------------------------------------------------------------------===//
4312  // Lowering methods - These methods must be implemented by targets so that
4313  // the SelectionDAGBuilder code knows how to lower these.
4314  //
4315
4316  /// Target-specific splitting of values into parts that fit a register
4317  /// storing a legal type
4318  virtual bool splitValueIntoRegisterParts(
4319      SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
4320      unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
4321    return false;
4322  }
4323
4324  /// Allows the target to handle physreg-carried dependency
4325  /// in target-specific way. Used from the ScheduleDAGSDNodes to decide whether
4326  /// to add the edge to the dependency graph.
4327  /// Def - input: Selection DAG node defininfg physical register
4328  /// User - input: Selection DAG node using physical register
4329  /// Op - input: Number of User operand
4330  /// PhysReg - inout: set to the physical register if the edge is
4331  /// necessary, unchanged otherwise
4332  /// Cost - inout: physical register copy cost.
4333  /// Returns 'true' is the edge is necessary, 'false' otherwise
4334  virtual bool checkForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
4335                                         const TargetRegisterInfo *TRI,
4336                                         const TargetInstrInfo *TII,
4337                                         unsigned &PhysReg, int &Cost) const {
4338    return false;
4339  }
4340
4341  /// Target-specific combining of register parts into its original value
4342  virtual SDValue
4343  joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL,
4344                             const SDValue *Parts, unsigned NumParts,
4345                             MVT PartVT, EVT ValueVT,
4346                             std::optional<CallingConv::ID> CC) const {
4347    return SDValue();
4348  }
4349
4350  /// This hook must be implemented to lower the incoming (formal) arguments,
4351  /// described by the Ins array, into the specified DAG. The implementation
4352  /// should fill in the InVals array with legal-type argument values, and
4353  /// return the resulting token chain value.
4354  virtual SDValue LowerFormalArguments(
4355      SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
4356      const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
4357      SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
4358    llvm_unreachable("Not Implemented");
4359  }
4360
4361  /// This structure contains all information that is necessary for lowering
4362  /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
4363  /// needs to lower a call, and targets will see this struct in their LowerCall
4364  /// implementation.
4365  struct CallLoweringInfo {
4366    SDValue Chain;
4367    Type *RetTy = nullptr;
4368    bool RetSExt           : 1;
4369    bool RetZExt           : 1;
4370    bool IsVarArg          : 1;
4371    bool IsInReg           : 1;
4372    bool DoesNotReturn     : 1;
4373    bool IsReturnValueUsed : 1;
4374    bool IsConvergent      : 1;
4375    bool IsPatchPoint      : 1;
4376    bool IsPreallocated : 1;
4377    bool NoMerge           : 1;
4378
4379    // IsTailCall should be modified by implementations of
4380    // TargetLowering::LowerCall that perform tail call conversions.
4381    bool IsTailCall = false;
4382
4383    // Is Call lowering done post SelectionDAG type legalization.
4384    bool IsPostTypeLegalization = false;
4385
4386    unsigned NumFixedArgs = -1;
4387    CallingConv::ID CallConv = CallingConv::C;
4388    SDValue Callee;
4389    ArgListTy Args;
4390    SelectionDAG &DAG;
4391    SDLoc DL;
4392    const CallBase *CB = nullptr;
4393    SmallVector<ISD::OutputArg, 32> Outs;
4394    SmallVector<SDValue, 32> OutVals;
4395    SmallVector<ISD::InputArg, 32> Ins;
4396    SmallVector<SDValue, 4> InVals;
4397    const ConstantInt *CFIType = nullptr;
4398
4399    CallLoweringInfo(SelectionDAG &DAG)
4400        : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
4401          DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false),
4402          IsPatchPoint(false), IsPreallocated(false), NoMerge(false),
4403          DAG(DAG) {}
4404
4405    CallLoweringInfo &setDebugLoc(const SDLoc &dl) {
4406      DL = dl;
4407      return *this;
4408    }
4409
4410    CallLoweringInfo &setChain(SDValue InChain) {
4411      Chain = InChain;
4412      return *this;
4413    }
4414
4415    // setCallee with target/module-specific attributes
4416    CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType,
4417                                   SDValue Target, ArgListTy &&ArgsList) {
4418      RetTy = ResultType;
4419      Callee = Target;
4420      CallConv = CC;
4421      NumFixedArgs = ArgsList.size();
4422      Args = std::move(ArgsList);
4423
4424      DAG.getTargetLoweringInfo().markLibCallAttributes(
4425          &(DAG.getMachineFunction()), CC, Args);
4426      return *this;
4427    }
4428
4429    CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
4430                                SDValue Target, ArgListTy &&ArgsList,
4431                                AttributeSet ResultAttrs = {}) {
4432      RetTy = ResultType;
4433      IsInReg = ResultAttrs.hasAttribute(Attribute::InReg);
4434      RetSExt = ResultAttrs.hasAttribute(Attribute::SExt);
4435      RetZExt = ResultAttrs.hasAttribute(Attribute::ZExt);
4436      NoMerge = ResultAttrs.hasAttribute(Attribute::NoMerge);
4437
4438      Callee = Target;
4439      CallConv = CC;
4440      NumFixedArgs = ArgsList.size();
4441      Args = std::move(ArgsList);
4442      return *this;
4443    }
4444
4445    CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
4446                                SDValue Target, ArgListTy &&ArgsList,
4447                                const CallBase &Call) {
4448      RetTy = ResultType;
4449
4450      IsInReg = Call.hasRetAttr(Attribute::InReg);
4451      DoesNotReturn =
4452          Call.doesNotReturn() ||
4453          (!isa<InvokeInst>(Call) && isa<UnreachableInst>(Call.getNextNode()));
4454      IsVarArg = FTy->isVarArg();
4455      IsReturnValueUsed = !Call.use_empty();
4456      RetSExt = Call.hasRetAttr(Attribute::SExt);
4457      RetZExt = Call.hasRetAttr(Attribute::ZExt);
4458      NoMerge = Call.hasFnAttr(Attribute::NoMerge);
4459
4460      Callee = Target;
4461
4462      CallConv = Call.getCallingConv();
4463      NumFixedArgs = FTy->getNumParams();
4464      Args = std::move(ArgsList);
4465
4466      CB = &Call;
4467
4468      return *this;
4469    }
4470
4471    CallLoweringInfo &setInRegister(bool Value = true) {
4472      IsInReg = Value;
4473      return *this;
4474    }
4475
4476    CallLoweringInfo &setNoReturn(bool Value = true) {
4477      DoesNotReturn = Value;
4478      return *this;
4479    }
4480
4481    CallLoweringInfo &setVarArg(bool Value = true) {
4482      IsVarArg = Value;
4483      return *this;
4484    }
4485
4486    CallLoweringInfo &setTailCall(bool Value = true) {
4487      IsTailCall = Value;
4488      return *this;
4489    }
4490
4491    CallLoweringInfo &setDiscardResult(bool Value = true) {
4492      IsReturnValueUsed = !Value;
4493      return *this;
4494    }
4495
4496    CallLoweringInfo &setConvergent(bool Value = true) {
4497      IsConvergent = Value;
4498      return *this;
4499    }
4500
4501    CallLoweringInfo &setSExtResult(bool Value = true) {
4502      RetSExt = Value;
4503      return *this;
4504    }
4505
4506    CallLoweringInfo &setZExtResult(bool Value = true) {
4507      RetZExt = Value;
4508      return *this;
4509    }
4510
4511    CallLoweringInfo &setIsPatchPoint(bool Value = true) {
4512      IsPatchPoint = Value;
4513      return *this;
4514    }
4515
4516    CallLoweringInfo &setIsPreallocated(bool Value = true) {
4517      IsPreallocated = Value;
4518      return *this;
4519    }
4520
4521    CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) {
4522      IsPostTypeLegalization = Value;
4523      return *this;
4524    }
4525
4526    CallLoweringInfo &setCFIType(const ConstantInt *Type) {
4527      CFIType = Type;
4528      return *this;
4529    }
4530
4531    ArgListTy &getArgs() {
4532      return Args;
4533    }
4534  };
4535
4536  /// This structure is used to pass arguments to makeLibCall function.
4537  struct MakeLibCallOptions {
4538    // By passing type list before soften to makeLibCall, the target hook
4539    // shouldExtendTypeInLibCall can get the original type before soften.
4540    ArrayRef<EVT> OpsVTBeforeSoften;
4541    EVT RetVTBeforeSoften;
4542    bool IsSExt : 1;
4543    bool DoesNotReturn : 1;
4544    bool IsReturnValueUsed : 1;
4545    bool IsPostTypeLegalization : 1;
4546    bool IsSoften : 1;
4547
4548    MakeLibCallOptions()
4549        : IsSExt(false), DoesNotReturn(false), IsReturnValueUsed(true),
4550          IsPostTypeLegalization(false), IsSoften(false) {}
4551
4552    MakeLibCallOptions &setSExt(bool Value = true) {
4553      IsSExt = Value;
4554      return *this;
4555    }
4556
4557    MakeLibCallOptions &setNoReturn(bool Value = true) {
4558      DoesNotReturn = Value;
4559      return *this;
4560    }
4561
4562    MakeLibCallOptions &setDiscardResult(bool Value = true) {
4563      IsReturnValueUsed = !Value;
4564      return *this;
4565    }
4566
4567    MakeLibCallOptions &setIsPostTypeLegalization(bool Value = true) {
4568      IsPostTypeLegalization = Value;
4569      return *this;
4570    }
4571
4572    MakeLibCallOptions &setTypeListBeforeSoften(ArrayRef<EVT> OpsVT, EVT RetVT,
4573                                                bool Value = true) {
4574      OpsVTBeforeSoften = OpsVT;
4575      RetVTBeforeSoften = RetVT;
4576      IsSoften = Value;
4577      return *this;
4578    }
4579  };
4580
4581  /// This function lowers an abstract call to a function into an actual call.
4582  /// This returns a pair of operands.  The first element is the return value
4583  /// for the function (if RetTy is not VoidTy).  The second element is the
4584  /// outgoing token chain. It calls LowerCall to do the actual lowering.
4585  std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
4586
4587  /// This hook must be implemented to lower calls into the specified
4588  /// DAG. The outgoing arguments to the call are described by the Outs array,
4589  /// and the values to be returned by the call are described by the Ins
4590  /// array. The implementation should fill in the InVals array with legal-type
4591  /// return values from the call, and return the resulting token chain value.
4592  virtual SDValue
4593    LowerCall(CallLoweringInfo &/*CLI*/,
4594              SmallVectorImpl<SDValue> &/*InVals*/) const {
4595    llvm_unreachable("Not Implemented");
4596  }
4597
4598  /// Target-specific cleanup for formal ByVal parameters.
4599  virtual void HandleByVal(CCState *, unsigned &, Align) const {}
4600
4601  /// This hook should be implemented to check whether the return values
4602  /// described by the Outs array can fit into the return registers.  If false
4603  /// is returned, an sret-demotion is performed.
4604  virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
4605                              MachineFunction &/*MF*/, bool /*isVarArg*/,
4606               const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
4607               LLVMContext &/*Context*/) const
4608  {
4609    // Return true by default to get preexisting behavior.
4610    return true;
4611  }
4612
4613  /// This hook must be implemented to lower outgoing return values, described
4614  /// by the Outs array, into the specified DAG. The implementation should
4615  /// return the resulting token chain value.
4616  virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
4617                              bool /*isVarArg*/,
4618                              const SmallVectorImpl<ISD::OutputArg> & /*Outs*/,
4619                              const SmallVectorImpl<SDValue> & /*OutVals*/,
4620                              const SDLoc & /*dl*/,
4621                              SelectionDAG & /*DAG*/) const {
4622    llvm_unreachable("Not Implemented");
4623  }
4624
4625  /// Return true if result of the specified node is used by a return node
4626  /// only. It also compute and return the input chain for the tail call.
4627  ///
4628  /// This is used to determine whether it is possible to codegen a libcall as
4629  /// tail call at legalization time.
4630  virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
4631    return false;
4632  }
4633
4634  /// Return true if the target may be able emit the call instruction as a tail
4635  /// call. This is used by optimization passes to determine if it's profitable
4636  /// to duplicate return instructions to enable tailcall optimization.
4637  virtual bool mayBeEmittedAsTailCall(const CallInst *) const {
4638    return false;
4639  }
4640
4641  /// Return the builtin name for the __builtin___clear_cache intrinsic
4642  /// Default is to invoke the clear cache library call
4643  virtual const char * getClearCacheBuiltinName() const {
4644    return "__clear_cache";
4645  }
4646
4647  /// Return the register ID of the name passed in. Used by named register
4648  /// global variables extension. There is no target-independent behaviour
4649  /// so the default action is to bail.
4650  virtual Register getRegisterByName(const char* RegName, LLT Ty,
4651                                     const MachineFunction &MF) const {
4652    report_fatal_error("Named registers not implemented for this target");
4653  }
4654
4655  /// Return the type that should be used to zero or sign extend a
4656  /// zeroext/signext integer return value.  FIXME: Some C calling conventions
4657  /// require the return type to be promoted, but this is not true all the time,
4658  /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
4659  /// conventions. The frontend should handle this and include all of the
4660  /// necessary information.
4661  virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
4662                                       ISD::NodeType /*ExtendKind*/) const {
4663    EVT MinVT = getRegisterType(MVT::i32);
4664    return VT.bitsLT(MinVT) ? MinVT : VT;
4665  }
4666
4667  /// For some targets, an LLVM struct type must be broken down into multiple
4668  /// simple types, but the calling convention specifies that the entire struct
4669  /// must be passed in a block of consecutive registers.
4670  virtual bool
4671  functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
4672                                            bool isVarArg,
4673                                            const DataLayout &DL) const {
4674    return false;
4675  }
4676
4677  /// For most targets, an LLVM type must be broken down into multiple
4678  /// smaller types. Usually the halves are ordered according to the endianness
4679  /// but for some platform that would break. So this method will default to
4680  /// matching the endianness but can be overridden.
4681  virtual bool
4682  shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout &DL) const {
4683    return DL.isLittleEndian();
4684  }
4685
4686  /// Returns a 0 terminated array of registers that can be safely used as
4687  /// scratch registers.
4688  virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
4689    return nullptr;
4690  }
4691
4692  /// Returns a 0 terminated array of rounding control registers that can be
4693  /// attached into strict FP call.
4694  virtual ArrayRef<MCPhysReg> getRoundingControlRegisters() const {
4695    return ArrayRef<MCPhysReg>();
4696  }
4697
4698  /// This callback is used to prepare for a volatile or atomic load.
4699  /// It takes a chain node as input and returns the chain for the load itself.
4700  ///
4701  /// Having a callback like this is necessary for targets like SystemZ,
4702  /// which allows a CPU to reuse the result of a previous load indefinitely,
4703  /// even if a cache-coherent store is performed by another CPU.  The default
4704  /// implementation does nothing.
4705  virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL,
4706                                              SelectionDAG &DAG) const {
4707    return Chain;
4708  }
4709
4710  /// This callback is invoked by the type legalizer to legalize nodes with an
4711  /// illegal operand type but legal result types.  It replaces the
4712  /// LowerOperation callback in the type Legalizer.  The reason we can not do
4713  /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
4714  /// use this callback.
4715  ///
4716  /// TODO: Consider merging with ReplaceNodeResults.
4717  ///
4718  /// The target places new result values for the node in Results (their number
4719  /// and types must exactly match those of the original return values of
4720  /// the node), or leaves Results empty, which indicates that the node is not
4721  /// to be custom lowered after all.
4722  /// The default implementation calls LowerOperation.
4723  virtual void LowerOperationWrapper(SDNode *N,
4724                                     SmallVectorImpl<SDValue> &Results,
4725                                     SelectionDAG &DAG) const;
4726
4727  /// This callback is invoked for operations that are unsupported by the
4728  /// target, which are registered to use 'custom' lowering, and whose defined
4729  /// values are all legal.  If the target has no operations that require custom
4730  /// lowering, it need not implement this.  The default implementation of this
4731  /// aborts.
4732  virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
4733
4734  /// This callback is invoked when a node result type is illegal for the
4735  /// target, and the operation was registered to use 'custom' lowering for that
4736  /// result type.  The target places new result values for the node in Results
4737  /// (their number and types must exactly match those of the original return
4738  /// values of the node), or leaves Results empty, which indicates that the
4739  /// node is not to be custom lowered after all.
4740  ///
4741  /// If the target has no operations that require custom lowering, it need not
4742  /// implement this.  The default implementation aborts.
4743  virtual void ReplaceNodeResults(SDNode * /*N*/,
4744                                  SmallVectorImpl<SDValue> &/*Results*/,
4745                                  SelectionDAG &/*DAG*/) const {
4746    llvm_unreachable("ReplaceNodeResults not implemented for this target!");
4747  }
4748
4749  /// This method returns the name of a target specific DAG node.
4750  virtual const char *getTargetNodeName(unsigned Opcode) const;
4751
4752  /// This method returns a target specific FastISel object, or null if the
4753  /// target does not support "fast" ISel.
4754  virtual FastISel *createFastISel(FunctionLoweringInfo &,
4755                                   const TargetLibraryInfo *) const {
4756    return nullptr;
4757  }
4758
4759  bool verifyReturnAddressArgumentIsConstant(SDValue Op,
4760                                             SelectionDAG &DAG) const;
4761
4762  //===--------------------------------------------------------------------===//
4763  // Inline Asm Support hooks
4764  //
4765
4766  /// This hook allows the target to expand an inline asm call to be explicit
4767  /// llvm code if it wants to.  This is useful for turning simple inline asms
4768  /// into LLVM intrinsics, which gives the compiler more information about the
4769  /// behavior of the code.
4770  virtual bool ExpandInlineAsm(CallInst *) const {
4771    return false;
4772  }
4773
4774  enum ConstraintType {
4775    C_Register,            // Constraint represents specific register(s).
4776    C_RegisterClass,       // Constraint represents any of register(s) in class.
4777    C_Memory,              // Memory constraint.
4778    C_Address,             // Address constraint.
4779    C_Immediate,           // Requires an immediate.
4780    C_Other,               // Something else.
4781    C_Unknown              // Unsupported constraint.
4782  };
4783
4784  enum ConstraintWeight {
4785    // Generic weights.
4786    CW_Invalid  = -1,     // No match.
4787    CW_Okay     = 0,      // Acceptable.
4788    CW_Good     = 1,      // Good weight.
4789    CW_Better   = 2,      // Better weight.
4790    CW_Best     = 3,      // Best weight.
4791
4792    // Well-known weights.
4793    CW_SpecificReg  = CW_Okay,    // Specific register operands.
4794    CW_Register     = CW_Good,    // Register operands.
4795    CW_Memory       = CW_Better,  // Memory operands.
4796    CW_Constant     = CW_Best,    // Constant operand.
4797    CW_Default      = CW_Okay     // Default or don't know type.
4798  };
4799
4800  /// This contains information for each constraint that we are lowering.
4801  struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
4802    /// This contains the actual string for the code, like "m".  TargetLowering
4803    /// picks the 'best' code from ConstraintInfo::Codes that most closely
4804    /// matches the operand.
4805    std::string ConstraintCode;
4806
4807    /// Information about the constraint code, e.g. Register, RegisterClass,
4808    /// Memory, Other, Unknown.
4809    TargetLowering::ConstraintType ConstraintType = TargetLowering::C_Unknown;
4810
4811    /// If this is the result output operand or a clobber, this is null,
4812    /// otherwise it is the incoming operand to the CallInst.  This gets
4813    /// modified as the asm is processed.
4814    Value *CallOperandVal = nullptr;
4815
4816    /// The ValueType for the operand value.
4817    MVT ConstraintVT = MVT::Other;
4818
4819    /// Copy constructor for copying from a ConstraintInfo.
4820    AsmOperandInfo(InlineAsm::ConstraintInfo Info)
4821        : InlineAsm::ConstraintInfo(std::move(Info)) {}
4822
4823    /// Return true of this is an input operand that is a matching constraint
4824    /// like "4".
4825    bool isMatchingInputConstraint() const;
4826
4827    /// If this is an input matching constraint, this method returns the output
4828    /// operand it matches.
4829    unsigned getMatchedOperand() const;
4830  };
4831
4832  using AsmOperandInfoVector = std::vector<AsmOperandInfo>;
4833
4834  /// Split up the constraint string from the inline assembly value into the
4835  /// specific constraints and their prefixes, and also tie in the associated
4836  /// operand values.  If this returns an empty vector, and if the constraint
4837  /// string itself isn't empty, there was an error parsing.
4838  virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL,
4839                                                const TargetRegisterInfo *TRI,
4840                                                const CallBase &Call) const;
4841
4842  /// Examine constraint type and operand type and determine a weight value.
4843  /// The operand object must already have been set up with the operand type.
4844  virtual ConstraintWeight getMultipleConstraintMatchWeight(
4845      AsmOperandInfo &info, int maIndex) const;
4846
4847  /// Examine constraint string and operand type and determine a weight value.
4848  /// The operand object must already have been set up with the operand type.
4849  virtual ConstraintWeight getSingleConstraintMatchWeight(
4850      AsmOperandInfo &info, const char *constraint) const;
4851
4852  /// Determines the constraint code and constraint type to use for the specific
4853  /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
4854  /// If the actual operand being passed in is available, it can be passed in as
4855  /// Op, otherwise an empty SDValue can be passed.
4856  virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
4857                                      SDValue Op,
4858                                      SelectionDAG *DAG = nullptr) const;
4859
4860  /// Given a constraint, return the type of constraint it is for this target.
4861  virtual ConstraintType getConstraintType(StringRef Constraint) const;
4862
4863  using ConstraintPair = std::pair<StringRef, TargetLowering::ConstraintType>;
4864  using ConstraintGroup = SmallVector<ConstraintPair>;
4865  /// Given an OpInfo with list of constraints codes as strings, return a
4866  /// sorted Vector of pairs of constraint codes and their types in priority of
4867  /// what we'd prefer to lower them as. This may contain immediates that
4868  /// cannot be lowered, but it is meant to be a machine agnostic order of
4869  /// preferences.
4870  ConstraintGroup getConstraintPreferences(AsmOperandInfo &OpInfo) const;
4871
4872  /// Given a physical register constraint (e.g.  {edx}), return the register
4873  /// number and the register class for the register.
4874  ///
4875  /// Given a register class constraint, like 'r', if this corresponds directly
4876  /// to an LLVM register class, return a register of 0 and the register class
4877  /// pointer.
4878  ///
4879  /// This should only be used for C_Register constraints.  On error, this
4880  /// returns a register number of 0 and a null register class pointer.
4881  virtual std::pair<unsigned, const TargetRegisterClass *>
4882  getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
4883                               StringRef Constraint, MVT VT) const;
4884
4885  virtual InlineAsm::ConstraintCode
4886  getInlineAsmMemConstraint(StringRef ConstraintCode) const {
4887    if (ConstraintCode == "m")
4888      return InlineAsm::ConstraintCode::m;
4889    if (ConstraintCode == "o")
4890      return InlineAsm::ConstraintCode::o;
4891    if (ConstraintCode == "X")
4892      return InlineAsm::ConstraintCode::X;
4893    if (ConstraintCode == "p")
4894      return InlineAsm::ConstraintCode::p;
4895    return InlineAsm::ConstraintCode::Unknown;
4896  }
4897
4898  /// Try to replace an X constraint, which matches anything, with another that
4899  /// has more specific requirements based on the type of the corresponding
4900  /// operand.  This returns null if there is no replacement to make.
4901  virtual const char *LowerXConstraint(EVT ConstraintVT) const;
4902
4903  /// Lower the specified operand into the Ops vector.  If it is invalid, don't
4904  /// add anything to Ops.
4905  virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint,
4906                                            std::vector<SDValue> &Ops,
4907                                            SelectionDAG &DAG) const;
4908
4909  // Lower custom output constraints. If invalid, return SDValue().
4910  virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue,
4911                                              const SDLoc &DL,
4912                                              const AsmOperandInfo &OpInfo,
4913                                              SelectionDAG &DAG) const;
4914
4915  // Targets may override this function to collect operands from the CallInst
4916  // and for example, lower them into the SelectionDAG operands.
4917  virtual void CollectTargetIntrinsicOperands(const CallInst &I,
4918                                              SmallVectorImpl<SDValue> &Ops,
4919                                              SelectionDAG &DAG) const;
4920
4921  //===--------------------------------------------------------------------===//
4922  // Div utility functions
4923  //
4924
4925  SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
4926                    SmallVectorImpl<SDNode *> &Created) const;
4927  SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
4928                    SmallVectorImpl<SDNode *> &Created) const;
4929  // Build sdiv by power-of-2 with conditional move instructions
4930  SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor,
4931                                SelectionDAG &DAG,
4932                                SmallVectorImpl<SDNode *> &Created) const;
4933
4934  /// Targets may override this function to provide custom SDIV lowering for
4935  /// power-of-2 denominators.  If the target returns an empty SDValue, LLVM
4936  /// assumes SDIV is expensive and replaces it with a series of other integer
4937  /// operations.
4938  virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
4939                                SelectionDAG &DAG,
4940                                SmallVectorImpl<SDNode *> &Created) const;
4941
4942  /// Targets may override this function to provide custom SREM lowering for
4943  /// power-of-2 denominators.  If the target returns an empty SDValue, LLVM
4944  /// assumes SREM is expensive and replaces it with a series of other integer
4945  /// operations.
4946  virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor,
4947                                SelectionDAG &DAG,
4948                                SmallVectorImpl<SDNode *> &Created) const;
4949
4950  /// Indicate whether this target prefers to combine FDIVs with the same
4951  /// divisor. If the transform should never be done, return zero. If the
4952  /// transform should be done, return the minimum number of divisor uses
4953  /// that must exist.
4954  virtual unsigned combineRepeatedFPDivisors() const {
4955    return 0;
4956  }
4957
4958  /// Hooks for building estimates in place of slower divisions and square
4959  /// roots.
4960
4961  /// Return either a square root or its reciprocal estimate value for the input
4962  /// operand.
4963  /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
4964  /// 'Enabled' as set by a potential default override attribute.
4965  /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
4966  /// refinement iterations required to generate a sufficient (though not
4967  /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
4968  /// The boolean UseOneConstNR output is used to select a Newton-Raphson
4969  /// algorithm implementation that uses either one or two constants.
4970  /// The boolean Reciprocal is used to select whether the estimate is for the
4971  /// square root of the input operand or the reciprocal of its square root.
4972  /// A target may choose to implement its own refinement within this function.
4973  /// If that's true, then return '0' as the number of RefinementSteps to avoid
4974  /// any further refinement of the estimate.
4975  /// An empty SDValue return means no estimate sequence can be created.
4976  virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
4977                                  int Enabled, int &RefinementSteps,
4978                                  bool &UseOneConstNR, bool Reciprocal) const {
4979    return SDValue();
4980  }
4981
4982  /// Try to convert the fminnum/fmaxnum to a compare/select sequence. This is
4983  /// required for correctness since InstCombine might have canonicalized a
4984  /// fcmp+select sequence to a FMINNUM/FMAXNUM intrinsic.  If we were to fall
4985  /// through to the default expansion/soften to libcall, we might introduce a
4986  /// link-time dependency on libm into a file that originally did not have one.
4987  SDValue createSelectForFMINNUM_FMAXNUM(SDNode *Node, SelectionDAG &DAG) const;
4988
4989  /// Return a reciprocal estimate value for the input operand.
4990  /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
4991  /// 'Enabled' as set by a potential default override attribute.
4992  /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
4993  /// refinement iterations required to generate a sufficient (though not
4994  /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
4995  /// A target may choose to implement its own refinement within this function.
4996  /// If that's true, then return '0' as the number of RefinementSteps to avoid
4997  /// any further refinement of the estimate.
4998  /// An empty SDValue return means no estimate sequence can be created.
4999  virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
5000                                   int Enabled, int &RefinementSteps) const {
5001    return SDValue();
5002  }
5003
5004  /// Return a target-dependent comparison result if the input operand is
5005  /// suitable for use with a square root estimate calculation. For example, the
5006  /// comparison may check if the operand is NAN, INF, zero, normal, etc. The
5007  /// result should be used as the condition operand for a select or branch.
5008  virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
5009                                   const DenormalMode &Mode) const;
5010
5011  /// Return a target-dependent result if the input operand is not suitable for
5012  /// use with a square root estimate calculation.
5013  virtual SDValue getSqrtResultForDenormInput(SDValue Operand,
5014                                              SelectionDAG &DAG) const {
5015    return DAG.getConstantFP(0.0, SDLoc(Operand), Operand.getValueType());
5016  }
5017
5018  //===--------------------------------------------------------------------===//
5019  // Legalization utility functions
5020  //
5021
5022  /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
5023  /// respectively, each computing an n/2-bit part of the result.
5024  /// \param Result A vector that will be filled with the parts of the result
5025  ///        in little-endian order.
5026  /// \param LL Low bits of the LHS of the MUL.  You can use this parameter
5027  ///        if you want to control how low bits are extracted from the LHS.
5028  /// \param LH High bits of the LHS of the MUL.  See LL for meaning.
5029  /// \param RL Low bits of the RHS of the MUL.  See LL for meaning
5030  /// \param RH High bits of the RHS of the MUL.  See LL for meaning.
5031  /// \returns true if the node has been expanded, false if it has not
5032  bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS,
5033                      SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
5034                      SelectionDAG &DAG, MulExpansionKind Kind,
5035                      SDValue LL = SDValue(), SDValue LH = SDValue(),
5036                      SDValue RL = SDValue(), SDValue RH = SDValue()) const;
5037
5038  /// Expand a MUL into two nodes.  One that computes the high bits of
5039  /// the result and one that computes the low bits.
5040  /// \param HiLoVT The value type to use for the Lo and Hi nodes.
5041  /// \param LL Low bits of the LHS of the MUL.  You can use this parameter
5042  ///        if you want to control how low bits are extracted from the LHS.
5043  /// \param LH High bits of the LHS of the MUL.  See LL for meaning.
5044  /// \param RL Low bits of the RHS of the MUL.  See LL for meaning
5045  /// \param RH High bits of the RHS of the MUL.  See LL for meaning.
5046  /// \returns true if the node has been expanded. false if it has not
5047  bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
5048                 SelectionDAG &DAG, MulExpansionKind Kind,
5049                 SDValue LL = SDValue(), SDValue LH = SDValue(),
5050                 SDValue RL = SDValue(), SDValue RH = SDValue()) const;
5051
5052  /// Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit
5053  /// urem by constant and other arithmetic ops. The n/2-bit urem by constant
5054  /// will be expanded by DAGCombiner. This is not possible for all constant
5055  /// divisors.
5056  /// \param N Node to expand
5057  /// \param Result A vector that will be filled with the lo and high parts of
5058  ///        the results. For *DIVREM, this will be the quotient parts followed
5059  ///        by the remainder parts.
5060  /// \param HiLoVT The value type to use for the Lo and Hi parts. Should be
5061  ///        half of VT.
5062  /// \param LL Low bits of the LHS of the operation. You can use this
5063  ///        parameter if you want to control how low bits are extracted from
5064  ///        the LHS.
5065  /// \param LH High bits of the LHS of the operation. See LL for meaning.
5066  /// \returns true if the node has been expanded, false if it has not.
5067  bool expandDIVREMByConstant(SDNode *N, SmallVectorImpl<SDValue> &Result,
5068                              EVT HiLoVT, SelectionDAG &DAG,
5069                              SDValue LL = SDValue(),
5070                              SDValue LH = SDValue()) const;
5071
5072  /// Expand funnel shift.
5073  /// \param N Node to expand
5074  /// \returns The expansion if successful, SDValue() otherwise
5075  SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const;
5076
5077  /// Expand rotations.
5078  /// \param N Node to expand
5079  /// \param AllowVectorOps expand vector rotate, this should only be performed
5080  ///        if the legalization is happening outside of LegalizeVectorOps
5081  /// \returns The expansion if successful, SDValue() otherwise
5082  SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const;
5083
5084  /// Expand shift-by-parts.
5085  /// \param N Node to expand
5086  /// \param Lo lower-output-part after conversion
5087  /// \param Hi upper-output-part after conversion
5088  void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi,
5089                        SelectionDAG &DAG) const;
5090
5091  /// Expand float(f32) to SINT(i64) conversion
5092  /// \param N Node to expand
5093  /// \param Result output after conversion
5094  /// \returns True, if the expansion was successful, false otherwise
5095  bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
5096
5097  /// Expand float to UINT conversion
5098  /// \param N Node to expand
5099  /// \param Result output after conversion
5100  /// \param Chain output chain after conversion
5101  /// \returns True, if the expansion was successful, false otherwise
5102  bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain,
5103                        SelectionDAG &DAG) const;
5104
5105  /// Expand UINT(i64) to double(f64) conversion
5106  /// \param N Node to expand
5107  /// \param Result output after conversion
5108  /// \param Chain output chain after conversion
5109  /// \returns True, if the expansion was successful, false otherwise
5110  bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain,
5111                        SelectionDAG &DAG) const;
5112
5113  /// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
5114  SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const;
5115
5116  /// Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
5117  /// \param N Node to expand
5118  /// \returns The expansion result
5119  SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const;
5120
5121  /// Expand check for floating point class.
5122  /// \param ResultVT The type of intrinsic call result.
5123  /// \param Op The tested value.
5124  /// \param Test The test to perform.
5125  /// \param Flags The optimization flags.
5126  /// \returns The expansion result or SDValue() if it fails.
5127  SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test,
5128                           SDNodeFlags Flags, const SDLoc &DL,
5129                           SelectionDAG &DAG) const;
5130
5131  /// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes,
5132  /// vector nodes can only succeed if all operations are legal/custom.
5133  /// \param N Node to expand
5134  /// \returns The expansion result or SDValue() if it fails.
5135  SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const;
5136
5137  /// Expand VP_CTPOP nodes.
5138  /// \returns The expansion result or SDValue() if it fails.
5139  SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const;
5140
5141  /// Expand CTLZ/CTLZ_ZERO_UNDEF nodes. Expands vector/scalar CTLZ nodes,
5142  /// vector nodes can only succeed if all operations are legal/custom.
5143  /// \param N Node to expand
5144  /// \returns The expansion result or SDValue() if it fails.
5145  SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const;
5146
5147  /// Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes.
5148  /// \param N Node to expand
5149  /// \returns The expansion result or SDValue() if it fails.
5150  SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const;
5151
5152  /// Expand CTTZ via Table Lookup.
5153  /// \param N Node to expand
5154  /// \returns The expansion result or SDValue() if it fails.
5155  SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT,
5156                          SDValue Op, unsigned NumBitsPerElt) const;
5157
5158  /// Expand CTTZ/CTTZ_ZERO_UNDEF nodes. Expands vector/scalar CTTZ nodes,
5159  /// vector nodes can only succeed if all operations are legal/custom.
5160  /// \param N Node to expand
5161  /// \returns The expansion result or SDValue() if it fails.
5162  SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const;
5163
5164  /// Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes.
5165  /// \param N Node to expand
5166  /// \returns The expansion result or SDValue() if it fails.
5167  SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const;
5168
5169  /// Expand ABS nodes. Expands vector/scalar ABS nodes,
5170  /// vector nodes can only succeed if all operations are legal/custom.
5171  /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size))
5172  /// \param N Node to expand
5173  /// \param IsNegative indicate negated abs
5174  /// \returns The expansion result or SDValue() if it fails.
5175  SDValue expandABS(SDNode *N, SelectionDAG &DAG,
5176                    bool IsNegative = false) const;
5177
5178  /// Expand ABDS/ABDU nodes. Expands vector/scalar ABDS/ABDU nodes.
5179  /// \param N Node to expand
5180  /// \returns The expansion result or SDValue() if it fails.
5181  SDValue expandABD(SDNode *N, SelectionDAG &DAG) const;
5182
5183  /// Expand BSWAP nodes. Expands scalar/vector BSWAP nodes with i16/i32/i64
5184  /// scalar types. Returns SDValue() if expand fails.
5185  /// \param N Node to expand
5186  /// \returns The expansion result or SDValue() if it fails.
5187  SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const;
5188
5189  /// Expand VP_BSWAP nodes. Expands VP_BSWAP nodes with
5190  /// i16/i32/i64 scalar types. Returns SDValue() if expand fails. \param N Node
5191  /// to expand \returns The expansion result or SDValue() if it fails.
5192  SDValue expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const;
5193
5194  /// Expand BITREVERSE nodes. Expands scalar/vector BITREVERSE nodes.
5195  /// Returns SDValue() if expand fails.
5196  /// \param N Node to expand
5197  /// \returns The expansion result or SDValue() if it fails.
5198  SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const;
5199
5200  /// Expand VP_BITREVERSE nodes. Expands VP_BITREVERSE nodes with
5201  /// i8/i16/i32/i64 scalar types. \param N Node to expand \returns The
5202  /// expansion result or SDValue() if it fails.
5203  SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const;
5204
5205  /// Turn load of vector type into a load of the individual elements.
5206  /// \param LD load to expand
5207  /// \returns BUILD_VECTOR and TokenFactor nodes.
5208  std::pair<SDValue, SDValue> scalarizeVectorLoad(LoadSDNode *LD,
5209                                                  SelectionDAG &DAG) const;
5210
5211  // Turn a store of a vector type into stores of the individual elements.
5212  /// \param ST Store with a vector value type
5213  /// \returns TokenFactor of the individual store chains.
5214  SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const;
5215
5216  /// Expands an unaligned load to 2 half-size loads for an integer, and
5217  /// possibly more for vectors.
5218  std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD,
5219                                                  SelectionDAG &DAG) const;
5220
5221  /// Expands an unaligned store to 2 half-size stores for integer values, and
5222  /// possibly more for vectors.
5223  SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const;
5224
5225  /// Increments memory address \p Addr according to the type of the value
5226  /// \p DataVT that should be stored. If the data is stored in compressed
5227  /// form, the memory address should be incremented according to the number of
5228  /// the stored elements. This number is equal to the number of '1's bits
5229  /// in the \p Mask.
5230  /// \p DataVT is a vector type. \p Mask is a vector value.
5231  /// \p DataVT and \p Mask have the same number of vector elements.
5232  SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL,
5233                                 EVT DataVT, SelectionDAG &DAG,
5234                                 bool IsCompressedMemory) const;
5235
5236  /// Get a pointer to vector element \p Idx located in memory for a vector of
5237  /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
5238  /// bounds the returned pointer is unspecified, but will be within the vector
5239  /// bounds.
5240  SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
5241                                  SDValue Index) const;
5242
5243  /// Get a pointer to a sub-vector of type \p SubVecVT at index \p Idx located
5244  /// in memory for a vector of type \p VecVT starting at a base address of
5245  /// \p VecPtr. If \p Idx plus the size of \p SubVecVT is out of bounds the
5246  /// returned pointer is unspecified, but the value returned will be such that
5247  /// the entire subvector would be within the vector bounds.
5248  SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
5249                                 EVT SubVecVT, SDValue Index) const;
5250
5251  /// Method for building the DAG expansion of ISD::[US][MIN|MAX]. This
5252  /// method accepts integers as its arguments.
5253  SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const;
5254
5255  /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This
5256  /// method accepts integers as its arguments.
5257  SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const;
5258
5259  /// Method for building the DAG expansion of ISD::[US]SHLSAT. This
5260  /// method accepts integers as its arguments.
5261  SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const;
5262
5263  /// Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT]. This
5264  /// method accepts integers as its arguments.
5265  SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const;
5266
5267  /// Method for building the DAG expansion of ISD::[US]DIVFIX[SAT]. This
5268  /// method accepts integers as its arguments.
5269  /// Note: This method may fail if the division could not be performed
5270  /// within the type. Clients must retry with a wider type if this happens.
5271  SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl,
5272                              SDValue LHS, SDValue RHS,
5273                              unsigned Scale, SelectionDAG &DAG) const;
5274
5275  /// Method for building the DAG expansion of ISD::U(ADD|SUB)O. Expansion
5276  /// always suceeds and populates the Result and Overflow arguments.
5277  void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow,
5278                      SelectionDAG &DAG) const;
5279
5280  /// Method for building the DAG expansion of ISD::S(ADD|SUB)O. Expansion
5281  /// always suceeds and populates the Result and Overflow arguments.
5282  void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow,
5283                      SelectionDAG &DAG) const;
5284
5285  /// Method for building the DAG expansion of ISD::[US]MULO. Returns whether
5286  /// expansion was successful and populates the Result and Overflow arguments.
5287  bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow,
5288                  SelectionDAG &DAG) const;
5289
5290  /// Expand a VECREDUCE_* into an explicit calculation. If Count is specified,
5291  /// only the first Count elements of the vector are used.
5292  SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const;
5293
5294  /// Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
5295  SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const;
5296
5297  /// Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
5298  /// Returns true if the expansion was successful.
5299  bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const;
5300
5301  /// Method for building the DAG expansion of ISD::VECTOR_SPLICE. This
5302  /// method accepts vectors as its arguments.
5303  SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const;
5304
5305  /// Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC
5306  /// on the current target. A VP_SETCC will additionally be given a Mask
5307  /// and/or EVL not equal to SDValue().
5308  ///
5309  /// If the SETCC has been legalized using AND / OR, then the legalized node
5310  /// will be stored in LHS. RHS and CC will be set to SDValue(). NeedInvert
5311  /// will be set to false. This will also hold if the VP_SETCC has been
5312  /// legalized using VP_AND / VP_OR.
5313  ///
5314  /// If the SETCC / VP_SETCC has been legalized by using
5315  /// getSetCCSwappedOperands(), then the values of LHS and RHS will be
5316  /// swapped, CC will be set to the new condition, and NeedInvert will be set
5317  /// to false.
5318  ///
5319  /// If the SETCC / VP_SETCC has been legalized using the inverse condcode,
5320  /// then LHS and RHS will be unchanged, CC will set to the inverted condcode,
5321  /// and NeedInvert will be set to true. The caller must invert the result of
5322  /// the SETCC with SelectionDAG::getLogicalNOT() or take equivalent action to
5323  /// swap the effect of a true/false result.
5324  ///
5325  /// \returns true if the SETCC / VP_SETCC has been legalized, false if it
5326  /// hasn't.
5327  bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS,
5328                             SDValue &RHS, SDValue &CC, SDValue Mask,
5329                             SDValue EVL, bool &NeedInvert, const SDLoc &dl,
5330                             SDValue &Chain, bool IsSignaling = false) const;
5331
5332  //===--------------------------------------------------------------------===//
5333  // Instruction Emitting Hooks
5334  //
5335
5336  /// This method should be implemented by targets that mark instructions with
5337  /// the 'usesCustomInserter' flag.  These instructions are special in various
5338  /// ways, which require special support to insert.  The specified MachineInstr
5339  /// is created but not inserted into any basic blocks, and this method is
5340  /// called to expand it into a sequence of instructions, potentially also
5341  /// creating new basic blocks and control flow.
5342  /// As long as the returned basic block is different (i.e., we created a new
5343  /// one), the custom inserter is free to modify the rest of \p MBB.
5344  virtual MachineBasicBlock *
5345  EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;
5346
5347  /// This method should be implemented by targets that mark instructions with
5348  /// the 'hasPostISelHook' flag. These instructions must be adjusted after
5349  /// instruction selection by target hooks.  e.g. To fill in optional defs for
5350  /// ARM 's' setting instructions.
5351  virtual void AdjustInstrPostInstrSelection(MachineInstr &MI,
5352                                             SDNode *Node) const;
5353
5354  /// If this function returns true, SelectionDAGBuilder emits a
5355  /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
5356  virtual bool useLoadStackGuardNode() const {
5357    return false;
5358  }
5359
5360  virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
5361                                      const SDLoc &DL) const {
5362    llvm_unreachable("not implemented for this target");
5363  }
5364
5365  /// Lower TLS global address SDNode for target independent emulated TLS model.
5366  virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
5367                                          SelectionDAG &DAG) const;
5368
5369  /// Expands target specific indirect branch for the case of JumpTable
5370  /// expansion.
5371  virtual SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value,
5372                                         SDValue Addr, int JTI,
5373                                         SelectionDAG &DAG) const;
5374
5375  // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
5376  // If we're comparing for equality to zero and isCtlzFast is true, expose the
5377  // fact that this can be implemented as a ctlz/srl pair, so that the dag
5378  // combiner can fold the new nodes.
5379  SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const;
5380
5381  // Return true if `X & Y eq/ne 0` is preferable to `X & Y ne/eq Y`
5382  virtual bool isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode, EVT) const {
5383    return true;
5384  }
5385
5386private:
5387  SDValue foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
5388                           const SDLoc &DL, DAGCombinerInfo &DCI) const;
5389  SDValue foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
5390                             const SDLoc &DL, DAGCombinerInfo &DCI) const;
5391
5392  SDValue optimizeSetCCOfSignedTruncationCheck(EVT SCCVT, SDValue N0,
5393                                               SDValue N1, ISD::CondCode Cond,
5394                                               DAGCombinerInfo &DCI,
5395                                               const SDLoc &DL) const;
5396
5397  // (X & (C l>>/<< Y)) ==/!= 0  -->  ((X <</l>> Y) & C) ==/!= 0
5398  SDValue optimizeSetCCByHoistingAndByConstFromLogicalShift(
5399      EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond,
5400      DAGCombinerInfo &DCI, const SDLoc &DL) const;
5401
5402  SDValue prepareUREMEqFold(EVT SETCCVT, SDValue REMNode,
5403                            SDValue CompTargetNode, ISD::CondCode Cond,
5404                            DAGCombinerInfo &DCI, const SDLoc &DL,
5405                            SmallVectorImpl<SDNode *> &Created) const;
5406  SDValue buildUREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode,
5407                          ISD::CondCode Cond, DAGCombinerInfo &DCI,
5408                          const SDLoc &DL) const;
5409
5410  SDValue prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
5411                            SDValue CompTargetNode, ISD::CondCode Cond,
5412                            DAGCombinerInfo &DCI, const SDLoc &DL,
5413                            SmallVectorImpl<SDNode *> &Created) const;
5414  SDValue buildSREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode,
5415                          ISD::CondCode Cond, DAGCombinerInfo &DCI,
5416                          const SDLoc &DL) const;
5417};
5418
5419/// Given an LLVM IR type and return type attributes, compute the return value
5420/// EVTs and flags, and optionally also the offsets, if the return value is
5421/// being lowered to memory.
5422void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr,
5423                   SmallVectorImpl<ISD::OutputArg> &Outs,
5424                   const TargetLowering &TLI, const DataLayout &DL);
5425
5426} // end namespace llvm
5427
5428#endif // LLVM_CODEGEN_TARGETLOWERING_H
5429