1//===- TargetTransformInfo.h ------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This pass exposes codegen information to IR-level passes. Every
10/// transformation that uses codegen information is broken into three parts:
11/// 1. The IR-level analysis pass.
12/// 2. The IR-level transformation interface which provides the needed
13///    information.
14/// 3. Codegen-level implementation which uses target-specific hooks.
15///
16/// This file defines #2, which is the interface that IR-level transformations
17/// use for querying the codegen.
18///
19//===----------------------------------------------------------------------===//
20
21#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
22#define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
23
24#include "llvm/Analysis/IVDescriptors.h"
25#include "llvm/IR/InstrTypes.h"
26#include "llvm/IR/Operator.h"
27#include "llvm/IR/PassManager.h"
28#include "llvm/Pass.h"
29#include "llvm/Support/AtomicOrdering.h"
30#include "llvm/Support/BranchProbability.h"
31#include "llvm/Support/DataTypes.h"
32#include "llvm/Support/InstructionCost.h"
33#include <functional>
34
35namespace llvm {
36
37namespace Intrinsic {
38typedef unsigned ID;
39}
40
41class AssumptionCache;
42class BlockFrequencyInfo;
43class DominatorTree;
44class BranchInst;
45class CallBase;
46class ExtractElementInst;
47class Function;
48class GlobalValue;
49class InstCombiner;
50class IntrinsicInst;
51class LoadInst;
52class LoopAccessInfo;
53class Loop;
54class LoopInfo;
55class ProfileSummaryInfo;
56class SCEV;
57class ScalarEvolution;
58class StoreInst;
59class SwitchInst;
60class TargetLibraryInfo;
61class Type;
62class User;
63class Value;
64class VPIntrinsic;
65struct KnownBits;
66template <typename T> class Optional;
67
68/// Information about a load/store intrinsic defined by the target.
69struct MemIntrinsicInfo {
70  /// This is the pointer that the intrinsic is loading from or storing to.
71  /// If this is non-null, then analysis/optimization passes can assume that
72  /// this intrinsic is functionally equivalent to a load/store from this
73  /// pointer.
74  Value *PtrVal = nullptr;
75
76  // Ordering for atomic operations.
77  AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
78
79  // Same Id is set by the target for corresponding load/store intrinsics.
80  unsigned short MatchingId = 0;
81
82  bool ReadMem = false;
83  bool WriteMem = false;
84  bool IsVolatile = false;
85
86  bool isUnordered() const {
87    return (Ordering == AtomicOrdering::NotAtomic ||
88            Ordering == AtomicOrdering::Unordered) &&
89           !IsVolatile;
90  }
91};
92
93/// Attributes of a target dependent hardware loop.
94struct HardwareLoopInfo {
95  HardwareLoopInfo() = delete;
96  HardwareLoopInfo(Loop *L) : L(L) {}
97  Loop *L = nullptr;
98  BasicBlock *ExitBlock = nullptr;
99  BranchInst *ExitBranch = nullptr;
100  const SCEV *TripCount = nullptr;
101  IntegerType *CountType = nullptr;
102  Value *LoopDecrement = nullptr; // Decrement the loop counter by this
103                                  // value in every iteration.
104  bool IsNestingLegal = false;    // Can a hardware loop be a parent to
105                                  // another hardware loop?
106  bool CounterInReg = false;      // Should loop counter be updated in
107                                  // the loop via a phi?
108  bool PerformEntryTest = false;  // Generate the intrinsic which also performs
109                                  // icmp ne zero on the loop counter value and
110                                  // produces an i1 to guard the loop entry.
111  bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI,
112                               DominatorTree &DT, bool ForceNestedLoop = false,
113                               bool ForceHardwareLoopPHI = false);
114  bool canAnalyze(LoopInfo &LI);
115};
116
117class IntrinsicCostAttributes {
118  const IntrinsicInst *II = nullptr;
119  Type *RetTy = nullptr;
120  Intrinsic::ID IID;
121  SmallVector<Type *, 4> ParamTys;
122  SmallVector<const Value *, 4> Arguments;
123  FastMathFlags FMF;
124  // If ScalarizationCost is UINT_MAX, the cost of scalarizing the
125  // arguments and the return value will be computed based on types.
126  InstructionCost ScalarizationCost = InstructionCost::getInvalid();
127
128public:
129  IntrinsicCostAttributes(
130      Intrinsic::ID Id, const CallBase &CI,
131      InstructionCost ScalarCost = InstructionCost::getInvalid());
132
133  IntrinsicCostAttributes(
134      Intrinsic::ID Id, Type *RTy, ArrayRef<Type *> Tys,
135      FastMathFlags Flags = FastMathFlags(), const IntrinsicInst *I = nullptr,
136      InstructionCost ScalarCost = InstructionCost::getInvalid());
137
138  IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
139                          ArrayRef<const Value *> Args);
140
141  IntrinsicCostAttributes(
142      Intrinsic::ID Id, Type *RTy, ArrayRef<const Value *> Args,
143      ArrayRef<Type *> Tys, FastMathFlags Flags = FastMathFlags(),
144      const IntrinsicInst *I = nullptr,
145      InstructionCost ScalarCost = InstructionCost::getInvalid());
146
147  Intrinsic::ID getID() const { return IID; }
148  const IntrinsicInst *getInst() const { return II; }
149  Type *getReturnType() const { return RetTy; }
150  FastMathFlags getFlags() const { return FMF; }
151  InstructionCost getScalarizationCost() const { return ScalarizationCost; }
152  const SmallVectorImpl<const Value *> &getArgs() const { return Arguments; }
153  const SmallVectorImpl<Type *> &getArgTypes() const { return ParamTys; }
154
155  bool isTypeBasedOnly() const {
156    return Arguments.empty();
157  }
158
159  bool skipScalarizationCost() const { return ScalarizationCost.isValid(); }
160};
161
162class TargetTransformInfo;
163typedef TargetTransformInfo TTI;
164
165/// This pass provides access to the codegen interfaces that are needed
166/// for IR-level transformations.
167class TargetTransformInfo {
168public:
169  /// Construct a TTI object using a type implementing the \c Concept
170  /// API below.
171  ///
172  /// This is used by targets to construct a TTI wrapping their target-specific
173  /// implementation that encodes appropriate costs for their target.
174  template <typename T> TargetTransformInfo(T Impl);
175
176  /// Construct a baseline TTI object using a minimal implementation of
177  /// the \c Concept API below.
178  ///
179  /// The TTI implementation will reflect the information in the DataLayout
180  /// provided if non-null.
181  explicit TargetTransformInfo(const DataLayout &DL);
182
183  // Provide move semantics.
184  TargetTransformInfo(TargetTransformInfo &&Arg);
185  TargetTransformInfo &operator=(TargetTransformInfo &&RHS);
186
187  // We need to define the destructor out-of-line to define our sub-classes
188  // out-of-line.
189  ~TargetTransformInfo();
190
191  /// Handle the invalidation of this information.
192  ///
193  /// When used as a result of \c TargetIRAnalysis this method will be called
194  /// when the function this was computed for changes. When it returns false,
195  /// the information is preserved across those changes.
196  bool invalidate(Function &, const PreservedAnalyses &,
197                  FunctionAnalysisManager::Invalidator &) {
198    // FIXME: We should probably in some way ensure that the subtarget
199    // information for a function hasn't changed.
200    return false;
201  }
202
203  /// \name Generic Target Information
204  /// @{
205
206  /// The kind of cost model.
207  ///
208  /// There are several different cost models that can be customized by the
209  /// target. The normalization of each cost model may be target specific.
210  enum TargetCostKind {
211    TCK_RecipThroughput, ///< Reciprocal throughput.
212    TCK_Latency,         ///< The latency of instruction.
213    TCK_CodeSize,        ///< Instruction code size.
214    TCK_SizeAndLatency   ///< The weighted sum of size and latency.
215  };
216
217  /// Query the cost of a specified instruction.
218  ///
219  /// Clients should use this interface to query the cost of an existing
220  /// instruction. The instruction must have a valid parent (basic block).
221  ///
222  /// Note, this method does not cache the cost calculation and it
223  /// can be expensive in some cases.
224  InstructionCost getInstructionCost(const Instruction *I,
225                                     enum TargetCostKind kind) const {
226    InstructionCost Cost;
227    switch (kind) {
228    case TCK_RecipThroughput:
229      Cost = getInstructionThroughput(I);
230      break;
231    case TCK_Latency:
232      Cost = getInstructionLatency(I);
233      break;
234    case TCK_CodeSize:
235    case TCK_SizeAndLatency:
236      Cost = getUserCost(I, kind);
237      break;
238    }
239    return Cost;
240  }
241
242  /// Underlying constants for 'cost' values in this interface.
243  ///
244  /// Many APIs in this interface return a cost. This enum defines the
245  /// fundamental values that should be used to interpret (and produce) those
246  /// costs. The costs are returned as an int rather than a member of this
247  /// enumeration because it is expected that the cost of one IR instruction
248  /// may have a multiplicative factor to it or otherwise won't fit directly
249  /// into the enum. Moreover, it is common to sum or average costs which works
250  /// better as simple integral values. Thus this enum only provides constants.
251  /// Also note that the returned costs are signed integers to make it natural
252  /// to add, subtract, and test with zero (a common boundary condition). It is
253  /// not expected that 2^32 is a realistic cost to be modeling at any point.
254  ///
255  /// Note that these costs should usually reflect the intersection of code-size
256  /// cost and execution cost. A free instruction is typically one that folds
257  /// into another instruction. For example, reg-to-reg moves can often be
258  /// skipped by renaming the registers in the CPU, but they still are encoded
259  /// and thus wouldn't be considered 'free' here.
260  enum TargetCostConstants {
261    TCC_Free = 0,     ///< Expected to fold away in lowering.
262    TCC_Basic = 1,    ///< The cost of a typical 'add' instruction.
263    TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86.
264  };
265
266  /// Estimate the cost of a GEP operation when lowered.
267  InstructionCost
268  getGEPCost(Type *PointeeType, const Value *Ptr,
269             ArrayRef<const Value *> Operands,
270             TargetCostKind CostKind = TCK_SizeAndLatency) const;
271
272  /// \returns A value by which our inlining threshold should be multiplied.
273  /// This is primarily used to bump up the inlining threshold wholesale on
274  /// targets where calls are unusually expensive.
275  ///
276  /// TODO: This is a rather blunt instrument.  Perhaps altering the costs of
277  /// individual classes of instructions would be better.
278  unsigned getInliningThresholdMultiplier() const;
279
280  /// \returns A value to be added to the inlining threshold.
281  unsigned adjustInliningThreshold(const CallBase *CB) const;
282
283  /// \returns Vector bonus in percent.
284  ///
285  /// Vector bonuses: We want to more aggressively inline vector-dense kernels
286  /// and apply this bonus based on the percentage of vector instructions. A
287  /// bonus is applied if the vector instructions exceed 50% and half that
288  /// amount is applied if it exceeds 10%. Note that these bonuses are some what
289  /// arbitrary and evolved over time by accident as much as because they are
290  /// principled bonuses.
291  /// FIXME: It would be nice to base the bonus values on something more
292  /// scientific. A target may has no bonus on vector instructions.
293  int getInlinerVectorBonusPercent() const;
294
295  /// \return the expected cost of a memcpy, which could e.g. depend on the
296  /// source/destination type and alignment and the number of bytes copied.
297  InstructionCost getMemcpyCost(const Instruction *I) const;
298
299  /// \return The estimated number of case clusters when lowering \p 'SI'.
300  /// \p JTSize Set a jump table size only when \p SI is suitable for a jump
301  /// table.
302  unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
303                                            unsigned &JTSize,
304                                            ProfileSummaryInfo *PSI,
305                                            BlockFrequencyInfo *BFI) const;
306
307  /// Estimate the cost of a given IR user when lowered.
308  ///
309  /// This can estimate the cost of either a ConstantExpr or Instruction when
310  /// lowered.
311  ///
312  /// \p Operands is a list of operands which can be a result of transformations
313  /// of the current operands. The number of the operands on the list must equal
314  /// to the number of the current operands the IR user has. Their order on the
315  /// list must be the same as the order of the current operands the IR user
316  /// has.
317  ///
318  /// The returned cost is defined in terms of \c TargetCostConstants, see its
319  /// comments for a detailed explanation of the cost values.
320  InstructionCost getUserCost(const User *U, ArrayRef<const Value *> Operands,
321                              TargetCostKind CostKind) const;
322
323  /// This is a helper function which calls the two-argument getUserCost
324  /// with \p Operands which are the current operands U has.
325  InstructionCost getUserCost(const User *U, TargetCostKind CostKind) const {
326    SmallVector<const Value *, 4> Operands(U->operand_values());
327    return getUserCost(U, Operands, CostKind);
328  }
329
330  /// If a branch or a select condition is skewed in one direction by more than
331  /// this factor, it is very likely to be predicted correctly.
332  BranchProbability getPredictableBranchThreshold() const;
333
334  /// Return true if branch divergence exists.
335  ///
336  /// Branch divergence has a significantly negative impact on GPU performance
337  /// when threads in the same wavefront take different paths due to conditional
338  /// branches.
339  bool hasBranchDivergence() const;
340
341  /// Return true if the target prefers to use GPU divergence analysis to
342  /// replace the legacy version.
343  bool useGPUDivergenceAnalysis() const;
344
345  /// Returns whether V is a source of divergence.
346  ///
347  /// This function provides the target-dependent information for
348  /// the target-independent LegacyDivergenceAnalysis. LegacyDivergenceAnalysis
349  /// first builds the dependency graph, and then runs the reachability
350  /// algorithm starting with the sources of divergence.
351  bool isSourceOfDivergence(const Value *V) const;
352
353  // Returns true for the target specific
354  // set of operations which produce uniform result
355  // even taking non-uniform arguments
356  bool isAlwaysUniform(const Value *V) const;
357
358  /// Returns the address space ID for a target's 'flat' address space. Note
359  /// this is not necessarily the same as addrspace(0), which LLVM sometimes
360  /// refers to as the generic address space. The flat address space is a
361  /// generic address space that can be used access multiple segments of memory
362  /// with different address spaces. Access of a memory location through a
363  /// pointer with this address space is expected to be legal but slower
364  /// compared to the same memory location accessed through a pointer with a
365  /// different address space.
366  //
367  /// This is for targets with different pointer representations which can
368  /// be converted with the addrspacecast instruction. If a pointer is converted
369  /// to this address space, optimizations should attempt to replace the access
370  /// with the source address space.
371  ///
372  /// \returns ~0u if the target does not have such a flat address space to
373  /// optimize away.
374  unsigned getFlatAddressSpace() const;
375
376  /// Return any intrinsic address operand indexes which may be rewritten if
377  /// they use a flat address space pointer.
378  ///
379  /// \returns true if the intrinsic was handled.
380  bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
381                                  Intrinsic::ID IID) const;
382
383  bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
384
385  unsigned getAssumedAddrSpace(const Value *V) const;
386
387  /// Rewrite intrinsic call \p II such that \p OldV will be replaced with \p
388  /// NewV, which has a different address space. This should happen for every
389  /// operand index that collectFlatAddressOperands returned for the intrinsic.
390  /// \returns nullptr if the intrinsic was not handled. Otherwise, returns the
391  /// new value (which may be the original \p II with modified operands).
392  Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
393                                          Value *NewV) const;
394
395  /// Test whether calls to a function lower to actual program function
396  /// calls.
397  ///
398  /// The idea is to test whether the program is likely to require a 'call'
399  /// instruction or equivalent in order to call the given function.
400  ///
401  /// FIXME: It's not clear that this is a good or useful query API. Client's
402  /// should probably move to simpler cost metrics using the above.
403  /// Alternatively, we could split the cost interface into distinct code-size
404  /// and execution-speed costs. This would allow modelling the core of this
405  /// query more accurately as a call is a single small instruction, but
406  /// incurs significant execution cost.
407  bool isLoweredToCall(const Function *F) const;
408
409  struct LSRCost {
410    /// TODO: Some of these could be merged. Also, a lexical ordering
411    /// isn't always optimal.
412    unsigned Insns;
413    unsigned NumRegs;
414    unsigned AddRecCost;
415    unsigned NumIVMuls;
416    unsigned NumBaseAdds;
417    unsigned ImmCost;
418    unsigned SetupCost;
419    unsigned ScaleCost;
420  };
421
422  /// Parameters that control the generic loop unrolling transformation.
423  struct UnrollingPreferences {
424    /// The cost threshold for the unrolled loop. Should be relative to the
425    /// getUserCost values returned by this API, and the expectation is that
426    /// the unrolled loop's instructions when run through that interface should
427    /// not exceed this cost. However, this is only an estimate. Also, specific
428    /// loops may be unrolled even with a cost above this threshold if deemed
429    /// profitable. Set this to UINT_MAX to disable the loop body cost
430    /// restriction.
431    unsigned Threshold;
432    /// If complete unrolling will reduce the cost of the loop, we will boost
433    /// the Threshold by a certain percent to allow more aggressive complete
434    /// unrolling. This value provides the maximum boost percentage that we
435    /// can apply to Threshold (The value should be no less than 100).
436    /// BoostedThreshold = Threshold * min(RolledCost / UnrolledCost,
437    ///                                    MaxPercentThresholdBoost / 100)
438    /// E.g. if complete unrolling reduces the loop execution time by 50%
439    /// then we boost the threshold by the factor of 2x. If unrolling is not
440    /// expected to reduce the running time, then we do not increase the
441    /// threshold.
442    unsigned MaxPercentThresholdBoost;
443    /// The cost threshold for the unrolled loop when optimizing for size (set
444    /// to UINT_MAX to disable).
445    unsigned OptSizeThreshold;
446    /// The cost threshold for the unrolled loop, like Threshold, but used
447    /// for partial/runtime unrolling (set to UINT_MAX to disable).
448    unsigned PartialThreshold;
449    /// The cost threshold for the unrolled loop when optimizing for size, like
450    /// OptSizeThreshold, but used for partial/runtime unrolling (set to
451    /// UINT_MAX to disable).
452    unsigned PartialOptSizeThreshold;
453    /// A forced unrolling factor (the number of concatenated bodies of the
454    /// original loop in the unrolled loop body). When set to 0, the unrolling
455    /// transformation will select an unrolling factor based on the current cost
456    /// threshold and other factors.
457    unsigned Count;
458    /// Default unroll count for loops with run-time trip count.
459    unsigned DefaultUnrollRuntimeCount;
460    // Set the maximum unrolling factor. The unrolling factor may be selected
461    // using the appropriate cost threshold, but may not exceed this number
462    // (set to UINT_MAX to disable). This does not apply in cases where the
463    // loop is being fully unrolled.
464    unsigned MaxCount;
465    /// Set the maximum unrolling factor for full unrolling. Like MaxCount, but
466    /// applies even if full unrolling is selected. This allows a target to fall
467    /// back to Partial unrolling if full unrolling is above FullUnrollMaxCount.
468    unsigned FullUnrollMaxCount;
469    // Represents number of instructions optimized when "back edge"
470    // becomes "fall through" in unrolled loop.
471    // For now we count a conditional branch on a backedge and a comparison
472    // feeding it.
473    unsigned BEInsns;
474    /// Allow partial unrolling (unrolling of loops to expand the size of the
475    /// loop body, not only to eliminate small constant-trip-count loops).
476    bool Partial;
477    /// Allow runtime unrolling (unrolling of loops to expand the size of the
478    /// loop body even when the number of loop iterations is not known at
479    /// compile time).
480    bool Runtime;
481    /// Allow generation of a loop remainder (extra iterations after unroll).
482    bool AllowRemainder;
483    /// Allow emitting expensive instructions (such as divisions) when computing
484    /// the trip count of a loop for runtime unrolling.
485    bool AllowExpensiveTripCount;
486    /// Apply loop unroll on any kind of loop
487    /// (mainly to loops that fail runtime unrolling).
488    bool Force;
489    /// Allow using trip count upper bound to unroll loops.
490    bool UpperBound;
491    /// Allow unrolling of all the iterations of the runtime loop remainder.
492    bool UnrollRemainder;
493    /// Allow unroll and jam. Used to enable unroll and jam for the target.
494    bool UnrollAndJam;
495    /// Threshold for unroll and jam, for inner loop size. The 'Threshold'
496    /// value above is used during unroll and jam for the outer loop size.
497    /// This value is used in the same manner to limit the size of the inner
498    /// loop.
499    unsigned UnrollAndJamInnerLoopThreshold;
500    /// Don't allow loop unrolling to simulate more than this number of
501    /// iterations when checking full unroll profitability
502    unsigned MaxIterationsCountToAnalyze;
503  };
504
505  /// Get target-customized preferences for the generic loop unrolling
506  /// transformation. The caller will initialize UP with the current
507  /// target-independent defaults.
508  void getUnrollingPreferences(Loop *L, ScalarEvolution &,
509                               UnrollingPreferences &UP) const;
510
511  /// Query the target whether it would be profitable to convert the given loop
512  /// into a hardware loop.
513  bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
514                                AssumptionCache &AC, TargetLibraryInfo *LibInfo,
515                                HardwareLoopInfo &HWLoopInfo) const;
516
517  /// Query the target whether it would be prefered to create a predicated
518  /// vector loop, which can avoid the need to emit a scalar epilogue loop.
519  bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
520                                   AssumptionCache &AC, TargetLibraryInfo *TLI,
521                                   DominatorTree *DT,
522                                   const LoopAccessInfo *LAI) const;
523
524  /// Query the target whether lowering of the llvm.get.active.lane.mask
525  /// intrinsic is supported.
526  bool emitGetActiveLaneMask() const;
527
528  // Parameters that control the loop peeling transformation
529  struct PeelingPreferences {
530    /// A forced peeling factor (the number of bodied of the original loop
531    /// that should be peeled off before the loop body). When set to 0, the
532    /// a peeling factor based on profile information and other factors.
533    unsigned PeelCount;
534    /// Allow peeling off loop iterations.
535    bool AllowPeeling;
536    /// Allow peeling off loop iterations for loop nests.
537    bool AllowLoopNestsPeeling;
538    /// Allow peeling basing on profile. Uses to enable peeling off all
539    /// iterations basing on provided profile.
540    /// If the value is true the peeling cost model can decide to peel only
541    /// some iterations and in this case it will set this to false.
542    bool PeelProfiledIterations;
543  };
544
545  /// Get target-customized preferences for the generic loop peeling
546  /// transformation. The caller will initialize \p PP with the current
547  /// target-independent defaults with information from \p L and \p SE.
548  void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
549                             PeelingPreferences &PP) const;
550
551  /// Targets can implement their own combinations for target-specific
552  /// intrinsics. This function will be called from the InstCombine pass every
553  /// time a target-specific intrinsic is encountered.
554  ///
555  /// \returns None to not do anything target specific or a value that will be
556  /// returned from the InstCombiner. It is possible to return null and stop
557  /// further processing of the intrinsic by returning nullptr.
558  Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
559                                               IntrinsicInst &II) const;
560  /// Can be used to implement target-specific instruction combining.
561  /// \see instCombineIntrinsic
562  Optional<Value *>
563  simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
564                                   APInt DemandedMask, KnownBits &Known,
565                                   bool &KnownBitsComputed) const;
566  /// Can be used to implement target-specific instruction combining.
567  /// \see instCombineIntrinsic
568  Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
569      InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
570      APInt &UndefElts2, APInt &UndefElts3,
571      std::function<void(Instruction *, unsigned, APInt, APInt &)>
572          SimplifyAndSetOp) const;
573  /// @}
574
575  /// \name Scalar Target Information
576  /// @{
577
578  /// Flags indicating the kind of support for population count.
579  ///
580  /// Compared to the SW implementation, HW support is supposed to
581  /// significantly boost the performance when the population is dense, and it
582  /// may or may not degrade performance if the population is sparse. A HW
583  /// support is considered as "Fast" if it can outperform, or is on a par
584  /// with, SW implementation when the population is sparse; otherwise, it is
585  /// considered as "Slow".
586  enum PopcntSupportKind { PSK_Software, PSK_SlowHardware, PSK_FastHardware };
587
588  /// Return true if the specified immediate is legal add immediate, that
589  /// is the target has add instructions which can add a register with the
590  /// immediate without having to materialize the immediate into a register.
591  bool isLegalAddImmediate(int64_t Imm) const;
592
593  /// Return true if the specified immediate is legal icmp immediate,
594  /// that is the target has icmp instructions which can compare a register
595  /// against the immediate without having to materialize the immediate into a
596  /// register.
597  bool isLegalICmpImmediate(int64_t Imm) const;
598
599  /// Return true if the addressing mode represented by AM is legal for
600  /// this target, for a load/store of the specified type.
601  /// The type may be VoidTy, in which case only return true if the addressing
602  /// mode is legal for a load/store of any legal type.
603  /// If target returns true in LSRWithInstrQueries(), I may be valid.
604  /// TODO: Handle pre/postinc as well.
605  bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
606                             bool HasBaseReg, int64_t Scale,
607                             unsigned AddrSpace = 0,
608                             Instruction *I = nullptr) const;
609
610  /// Return true if LSR cost of C1 is lower than C1.
611  bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
612                     TargetTransformInfo::LSRCost &C2) const;
613
614  /// Return true if LSR major cost is number of registers. Targets which
615  /// implement their own isLSRCostLess and unset number of registers as major
616  /// cost should return false, otherwise return true.
617  bool isNumRegsMajorCostOfLSR() const;
618
619  /// \returns true if LSR should not optimize a chain that includes \p I.
620  bool isProfitableLSRChainElement(Instruction *I) const;
621
622  /// Return true if the target can fuse a compare and branch.
623  /// Loop-strength-reduction (LSR) uses that knowledge to adjust its cost
624  /// calculation for the instructions in a loop.
625  bool canMacroFuseCmp() const;
626
627  /// Return true if the target can save a compare for loop count, for example
628  /// hardware loop saves a compare.
629  bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
630                  DominatorTree *DT, AssumptionCache *AC,
631                  TargetLibraryInfo *LibInfo) const;
632
633  enum AddressingModeKind {
634    AMK_PreIndexed,
635    AMK_PostIndexed,
636    AMK_None
637  };
638
639  /// Return the preferred addressing mode LSR should make efforts to generate.
640  AddressingModeKind getPreferredAddressingMode(const Loop *L,
641                                                ScalarEvolution *SE) const;
642
643  /// Return true if the target supports masked store.
644  bool isLegalMaskedStore(Type *DataType, Align Alignment) const;
645  /// Return true if the target supports masked load.
646  bool isLegalMaskedLoad(Type *DataType, Align Alignment) const;
647
648  /// Return true if the target supports nontemporal store.
649  bool isLegalNTStore(Type *DataType, Align Alignment) const;
650  /// Return true if the target supports nontemporal load.
651  bool isLegalNTLoad(Type *DataType, Align Alignment) const;
652
653  /// Return true if the target supports masked scatter.
654  bool isLegalMaskedScatter(Type *DataType, Align Alignment) const;
655  /// Return true if the target supports masked gather.
656  bool isLegalMaskedGather(Type *DataType, Align Alignment) const;
657
658  /// Return true if the target supports masked compress store.
659  bool isLegalMaskedCompressStore(Type *DataType) const;
660  /// Return true if the target supports masked expand load.
661  bool isLegalMaskedExpandLoad(Type *DataType) const;
662
663  /// Return true if the target has a unified operation to calculate division
664  /// and remainder. If so, the additional implicit multiplication and
665  /// subtraction required to calculate a remainder from division are free. This
666  /// can enable more aggressive transformations for division and remainder than
667  /// would typically be allowed using throughput or size cost models.
668  bool hasDivRemOp(Type *DataType, bool IsSigned) const;
669
670  /// Return true if the given instruction (assumed to be a memory access
671  /// instruction) has a volatile variant. If that's the case then we can avoid
672  /// addrspacecast to generic AS for volatile loads/stores. Default
673  /// implementation returns false, which prevents address space inference for
674  /// volatile loads/stores.
675  bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const;
676
677  /// Return true if target doesn't mind addresses in vectors.
678  bool prefersVectorizedAddressing() const;
679
680  /// Return the cost of the scaling factor used in the addressing
681  /// mode represented by AM for this target, for a load/store
682  /// of the specified type.
683  /// If the AM is supported, the return value must be >= 0.
684  /// If the AM is not supported, it returns a negative value.
685  /// TODO: Handle pre/postinc as well.
686  InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
687                                       int64_t BaseOffset, bool HasBaseReg,
688                                       int64_t Scale,
689                                       unsigned AddrSpace = 0) const;
690
691  /// Return true if the loop strength reduce pass should make
692  /// Instruction* based TTI queries to isLegalAddressingMode(). This is
693  /// needed on SystemZ, where e.g. a memcpy can only have a 12 bit unsigned
694  /// immediate offset and no index register.
695  bool LSRWithInstrQueries() const;
696
697  /// Return true if it's free to truncate a value of type Ty1 to type
698  /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
699  /// by referencing its sub-register AX.
700  bool isTruncateFree(Type *Ty1, Type *Ty2) const;
701
702  /// Return true if it is profitable to hoist instruction in the
703  /// then/else to before if.
704  bool isProfitableToHoist(Instruction *I) const;
705
706  bool useAA() const;
707
708  /// Return true if this type is legal.
709  bool isTypeLegal(Type *Ty) const;
710
711  /// Returns the estimated number of registers required to represent \p Ty.
712  InstructionCost getRegUsageForType(Type *Ty) const;
713
714  /// Return true if switches should be turned into lookup tables for the
715  /// target.
716  bool shouldBuildLookupTables() const;
717
718  /// Return true if switches should be turned into lookup tables
719  /// containing this constant value for the target.
720  bool shouldBuildLookupTablesForConstant(Constant *C) const;
721
722  /// Return true if lookup tables should be turned into relative lookup tables.
723  bool shouldBuildRelLookupTables() const;
724
725  /// Return true if the input function which is cold at all call sites,
726  ///  should use coldcc calling convention.
727  bool useColdCCForColdCall(Function &F) const;
728
729  /// Estimate the overhead of scalarizing an instruction. Insert and Extract
730  /// are set if the demanded result elements need to be inserted and/or
731  /// extracted from vectors.
732  InstructionCost getScalarizationOverhead(VectorType *Ty,
733                                           const APInt &DemandedElts,
734                                           bool Insert, bool Extract) const;
735
736  /// Estimate the overhead of scalarizing an instructions unique
737  /// non-constant operands. The (potentially vector) types to use for each of
738  /// argument are passes via Tys.
739  InstructionCost getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
740                                                   ArrayRef<Type *> Tys) const;
741
742  /// If target has efficient vector element load/store instructions, it can
743  /// return true here so that insertion/extraction costs are not added to
744  /// the scalarization cost of a load/store.
745  bool supportsEfficientVectorElementLoadStore() const;
746
747  /// Don't restrict interleaved unrolling to small loops.
748  bool enableAggressiveInterleaving(bool LoopHasReductions) const;
749
750  /// Returns options for expansion of memcmp. IsZeroCmp is
751  // true if this is the expansion of memcmp(p1, p2, s) == 0.
752  struct MemCmpExpansionOptions {
753    // Return true if memcmp expansion is enabled.
754    operator bool() const { return MaxNumLoads > 0; }
755
756    // Maximum number of load operations.
757    unsigned MaxNumLoads = 0;
758
759    // The list of available load sizes (in bytes), sorted in decreasing order.
760    SmallVector<unsigned, 8> LoadSizes;
761
762    // For memcmp expansion when the memcmp result is only compared equal or
763    // not-equal to 0, allow up to this number of load pairs per block. As an
764    // example, this may allow 'memcmp(a, b, 3) == 0' in a single block:
765    //   a0 = load2bytes &a[0]
766    //   b0 = load2bytes &b[0]
767    //   a2 = load1byte  &a[2]
768    //   b2 = load1byte  &b[2]
769    //   r  = cmp eq (a0 ^ b0 | a2 ^ b2), 0
770    unsigned NumLoadsPerBlock = 1;
771
772    // Set to true to allow overlapping loads. For example, 7-byte compares can
773    // be done with two 4-byte compares instead of 4+2+1-byte compares. This
774    // requires all loads in LoadSizes to be doable in an unaligned way.
775    bool AllowOverlappingLoads = false;
776  };
777  MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
778                                               bool IsZeroCmp) const;
779
780  /// Enable matching of interleaved access groups.
781  bool enableInterleavedAccessVectorization() const;
782
783  /// Enable matching of interleaved access groups that contain predicated
784  /// accesses or gaps and therefore vectorized using masked
785  /// vector loads/stores.
786  bool enableMaskedInterleavedAccessVectorization() const;
787
788  /// Indicate that it is potentially unsafe to automatically vectorize
789  /// floating-point operations because the semantics of vector and scalar
790  /// floating-point semantics may differ. For example, ARM NEON v7 SIMD math
791  /// does not support IEEE-754 denormal numbers, while depending on the
792  /// platform, scalar floating-point math does.
793  /// This applies to floating-point math operations and calls, not memory
794  /// operations, shuffles, or casts.
795  bool isFPVectorizationPotentiallyUnsafe() const;
796
797  /// Determine if the target supports unaligned memory accesses.
798  bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
799                                      unsigned AddressSpace = 0,
800                                      Align Alignment = Align(1),
801                                      bool *Fast = nullptr) const;
802
803  /// Return hardware support for population count.
804  PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
805
806  /// Return true if the hardware has a fast square-root instruction.
807  bool haveFastSqrt(Type *Ty) const;
808
809  /// Return true if it is faster to check if a floating-point value is NaN
810  /// (or not-NaN) versus a comparison against a constant FP zero value.
811  /// Targets should override this if materializing a 0.0 for comparison is
812  /// generally as cheap as checking for ordered/unordered.
813  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const;
814
815  /// Return the expected cost of supporting the floating point operation
816  /// of the specified type.
817  InstructionCost getFPOpCost(Type *Ty) const;
818
819  /// Return the expected cost of materializing for the given integer
820  /// immediate of the specified type.
821  InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
822                                TargetCostKind CostKind) const;
823
824  /// Return the expected cost of materialization for the given integer
825  /// immediate of the specified type for a given instruction. The cost can be
826  /// zero if the immediate can be folded into the specified instruction.
827  InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx,
828                                    const APInt &Imm, Type *Ty,
829                                    TargetCostKind CostKind,
830                                    Instruction *Inst = nullptr) const;
831  InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
832                                      const APInt &Imm, Type *Ty,
833                                      TargetCostKind CostKind) const;
834
835  /// Return the expected cost for the given integer when optimising
836  /// for size. This is different than the other integer immediate cost
837  /// functions in that it is subtarget agnostic. This is useful when you e.g.
838  /// target one ISA such as Aarch32 but smaller encodings could be possible
839  /// with another such as Thumb. This return value is used as a penalty when
840  /// the total costs for a constant is calculated (the bigger the cost, the
841  /// more beneficial constant hoisting is).
842  int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm,
843                            Type *Ty) const;
844  /// @}
845
846  /// \name Vector Target Information
847  /// @{
848
849  /// The various kinds of shuffle patterns for vector queries.
850  enum ShuffleKind {
851    SK_Broadcast,        ///< Broadcast element 0 to all other elements.
852    SK_Reverse,          ///< Reverse the order of the vector.
853    SK_Select,           ///< Selects elements from the corresponding lane of
854                         ///< either source operand. This is equivalent to a
855                         ///< vector select with a constant condition operand.
856    SK_Transpose,        ///< Transpose two vectors.
857    SK_InsertSubvector,  ///< InsertSubvector. Index indicates start offset.
858    SK_ExtractSubvector, ///< ExtractSubvector Index indicates start offset.
859    SK_PermuteTwoSrc,    ///< Merge elements from two source vectors into one
860                         ///< with any shuffle mask.
861    SK_PermuteSingleSrc  ///< Shuffle elements of single source vector with any
862                         ///< shuffle mask.
863  };
864
865  /// Kind of the reduction data.
866  enum ReductionKind {
867    RK_None,           /// Not a reduction.
868    RK_Arithmetic,     /// Binary reduction data.
869    RK_MinMax,         /// Min/max reduction data.
870    RK_UnsignedMinMax, /// Unsigned min/max reduction data.
871  };
872
873  /// Contains opcode + LHS/RHS parts of the reduction operations.
874  struct ReductionData {
875    ReductionData() = delete;
876    ReductionData(ReductionKind Kind, unsigned Opcode, Value *LHS, Value *RHS)
877        : Opcode(Opcode), LHS(LHS), RHS(RHS), Kind(Kind) {
878      assert(Kind != RK_None && "expected binary or min/max reduction only.");
879    }
880    unsigned Opcode = 0;
881    Value *LHS = nullptr;
882    Value *RHS = nullptr;
883    ReductionKind Kind = RK_None;
884    bool hasSameData(ReductionData &RD) const {
885      return Kind == RD.Kind && Opcode == RD.Opcode;
886    }
887  };
888
889  static ReductionKind matchPairwiseReduction(
890    const ExtractElementInst *ReduxRoot, unsigned &Opcode, VectorType *&Ty);
891
892  static ReductionKind matchVectorSplittingReduction(
893    const ExtractElementInst *ReduxRoot, unsigned &Opcode, VectorType *&Ty);
894
895  static ReductionKind matchVectorReduction(const ExtractElementInst *ReduxRoot,
896                                            unsigned &Opcode, VectorType *&Ty,
897                                            bool &IsPairwise);
898
899  /// Additional information about an operand's possible values.
900  enum OperandValueKind {
901    OK_AnyValue,               // Operand can have any value.
902    OK_UniformValue,           // Operand is uniform (splat of a value).
903    OK_UniformConstantValue,   // Operand is uniform constant.
904    OK_NonUniformConstantValue // Operand is a non uniform constant value.
905  };
906
907  /// Additional properties of an operand's values.
908  enum OperandValueProperties { OP_None = 0, OP_PowerOf2 = 1 };
909
910  /// \return the number of registers in the target-provided register class.
911  unsigned getNumberOfRegisters(unsigned ClassID) const;
912
913  /// \return the target-provided register class ID for the provided type,
914  /// accounting for type promotion and other type-legalization techniques that
915  /// the target might apply. However, it specifically does not account for the
916  /// scalarization or splitting of vector types. Should a vector type require
917  /// scalarization or splitting into multiple underlying vector registers, that
918  /// type should be mapped to a register class containing no registers.
919  /// Specifically, this is designed to provide a simple, high-level view of the
920  /// register allocation later performed by the backend. These register classes
921  /// don't necessarily map onto the register classes used by the backend.
922  /// FIXME: It's not currently possible to determine how many registers
923  /// are used by the provided type.
924  unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const;
925
926  /// \return the target-provided register class name
927  const char *getRegisterClassName(unsigned ClassID) const;
928
929  enum RegisterKind { RGK_Scalar, RGK_FixedWidthVector, RGK_ScalableVector };
930
931  /// \return The width of the largest scalar or vector register type.
932  TypeSize getRegisterBitWidth(RegisterKind K) const;
933
934  /// \return The width of the smallest vector register type.
935  unsigned getMinVectorRegisterBitWidth() const;
936
937  /// \return The maximum value of vscale if the target specifies an
938  ///  architectural maximum vector length, and None otherwise.
939  Optional<unsigned> getMaxVScale() const;
940
941  /// \return True if the vectorization factor should be chosen to
942  /// make the vector of the smallest element type match the size of a
943  /// vector register. For wider element types, this could result in
944  /// creating vectors that span multiple vector registers.
945  /// If false, the vectorization factor will be chosen based on the
946  /// size of the widest element type.
947  bool shouldMaximizeVectorBandwidth() const;
948
949  /// \return The minimum vectorization factor for types of given element
950  /// bit width, or 0 if there is no minimum VF. The returned value only
951  /// applies when shouldMaximizeVectorBandwidth returns true.
952  /// If IsScalable is true, the returned ElementCount must be a scalable VF.
953  ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const;
954
955  /// \return The maximum vectorization factor for types of given element
956  /// bit width and opcode, or 0 if there is no maximum VF.
957  /// Currently only used by the SLP vectorizer.
958  unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const;
959
960  /// \return True if it should be considered for address type promotion.
961  /// \p AllowPromotionWithoutCommonHeader Set true if promoting \p I is
962  /// profitable without finding other extensions fed by the same input.
963  bool shouldConsiderAddressTypePromotion(
964      const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const;
965
966  /// \return The size of a cache line in bytes.
967  unsigned getCacheLineSize() const;
968
969  /// The possible cache levels
970  enum class CacheLevel {
971    L1D, // The L1 data cache
972    L2D, // The L2 data cache
973
974    // We currently do not model L3 caches, as their sizes differ widely between
975    // microarchitectures. Also, we currently do not have a use for L3 cache
976    // size modeling yet.
977  };
978
979  /// \return The size of the cache level in bytes, if available.
980  Optional<unsigned> getCacheSize(CacheLevel Level) const;
981
982  /// \return The associativity of the cache level, if available.
983  Optional<unsigned> getCacheAssociativity(CacheLevel Level) const;
984
985  /// \return How much before a load we should place the prefetch
986  /// instruction.  This is currently measured in number of
987  /// instructions.
988  unsigned getPrefetchDistance() const;
989
990  /// Some HW prefetchers can handle accesses up to a certain constant stride.
991  /// Sometimes prefetching is beneficial even below the HW prefetcher limit,
992  /// and the arguments provided are meant to serve as a basis for deciding this
993  /// for a particular loop.
994  ///
995  /// \param NumMemAccesses        Number of memory accesses in the loop.
996  /// \param NumStridedMemAccesses Number of the memory accesses that
997  ///                              ScalarEvolution could find a known stride
998  ///                              for.
999  /// \param NumPrefetches         Number of software prefetches that will be
1000  ///                              emitted as determined by the addresses
1001  ///                              involved and the cache line size.
1002  /// \param HasCall               True if the loop contains a call.
1003  ///
1004  /// \return This is the minimum stride in bytes where it makes sense to start
1005  ///         adding SW prefetches. The default is 1, i.e. prefetch with any
1006  ///         stride.
1007  unsigned getMinPrefetchStride(unsigned NumMemAccesses,
1008                                unsigned NumStridedMemAccesses,
1009                                unsigned NumPrefetches, bool HasCall) const;
1010
1011  /// \return The maximum number of iterations to prefetch ahead.  If
1012  /// the required number of iterations is more than this number, no
1013  /// prefetching is performed.
1014  unsigned getMaxPrefetchIterationsAhead() const;
1015
1016  /// \return True if prefetching should also be done for writes.
1017  bool enableWritePrefetching() const;
1018
1019  /// \return The maximum interleave factor that any transform should try to
1020  /// perform for this target. This number depends on the level of parallelism
1021  /// and the number of execution units in the CPU.
1022  unsigned getMaxInterleaveFactor(unsigned VF) const;
1023
1024  /// Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
1025  static OperandValueKind getOperandInfo(const Value *V,
1026                                         OperandValueProperties &OpProps);
1027
1028  /// This is an approximation of reciprocal throughput of a math/logic op.
1029  /// A higher cost indicates less expected throughput.
1030  /// From Agner Fog's guides, reciprocal throughput is "the average number of
1031  /// clock cycles per instruction when the instructions are not part of a
1032  /// limiting dependency chain."
1033  /// Therefore, costs should be scaled to account for multiple execution units
1034  /// on the target that can process this type of instruction. For example, if
1035  /// there are 5 scalar integer units and 2 vector integer units that can
1036  /// calculate an 'add' in a single cycle, this model should indicate that the
1037  /// cost of the vector add instruction is 2.5 times the cost of the scalar
1038  /// add instruction.
1039  /// \p Args is an optional argument which holds the instruction operands
1040  /// values so the TTI can analyze those values searching for special
1041  /// cases or optimizations based on those values.
1042  /// \p CxtI is the optional original context instruction, if one exists, to
1043  /// provide even more information.
1044  InstructionCost getArithmeticInstrCost(
1045      unsigned Opcode, Type *Ty,
1046      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1047      OperandValueKind Opd1Info = OK_AnyValue,
1048      OperandValueKind Opd2Info = OK_AnyValue,
1049      OperandValueProperties Opd1PropInfo = OP_None,
1050      OperandValueProperties Opd2PropInfo = OP_None,
1051      ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
1052      const Instruction *CxtI = nullptr) const;
1053
1054  /// \return The cost of a shuffle instruction of kind Kind and of type Tp.
1055  /// The exact mask may be passed as Mask, or else the array will be empty.
1056  /// The index and subtype parameters are used by the subvector insertion and
1057  /// extraction shuffle kinds to show the insert/extract point and the type of
1058  /// the subvector being inserted/extracted.
1059  /// NOTE: For subvector extractions Tp represents the source type.
1060  InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *Tp,
1061                                 ArrayRef<int> Mask = None, int Index = 0,
1062                                 VectorType *SubTp = nullptr) const;
1063
1064  /// Represents a hint about the context in which a cast is used.
1065  ///
1066  /// For zext/sext, the context of the cast is the operand, which must be a
1067  /// load of some kind. For trunc, the context is of the cast is the single
1068  /// user of the instruction, which must be a store of some kind.
1069  ///
1070  /// This enum allows the vectorizer to give getCastInstrCost an idea of the
1071  /// type of cast it's dealing with, as not every cast is equal. For instance,
1072  /// the zext of a load may be free, but the zext of an interleaving load can
1073  //// be (very) expensive!
1074  ///
1075  /// See \c getCastContextHint to compute a CastContextHint from a cast
1076  /// Instruction*. Callers can use it if they don't need to override the
1077  /// context and just want it to be calculated from the instruction.
1078  ///
1079  /// FIXME: This handles the types of load/store that the vectorizer can
1080  /// produce, which are the cases where the context instruction is most
1081  /// likely to be incorrect. There are other situations where that can happen
1082  /// too, which might be handled here but in the long run a more general
1083  /// solution of costing multiple instructions at the same times may be better.
1084  enum class CastContextHint : uint8_t {
1085    None,          ///< The cast is not used with a load/store of any kind.
1086    Normal,        ///< The cast is used with a normal load/store.
1087    Masked,        ///< The cast is used with a masked load/store.
1088    GatherScatter, ///< The cast is used with a gather/scatter.
1089    Interleave,    ///< The cast is used with an interleaved load/store.
1090    Reversed,      ///< The cast is used with a reversed load/store.
1091  };
1092
1093  /// Calculates a CastContextHint from \p I.
1094  /// This should be used by callers of getCastInstrCost if they wish to
1095  /// determine the context from some instruction.
1096  /// \returns the CastContextHint for ZExt/SExt/Trunc, None if \p I is nullptr,
1097  /// or if it's another type of cast.
1098  static CastContextHint getCastContextHint(const Instruction *I);
1099
1100  /// \return The expected cost of cast instructions, such as bitcast, trunc,
1101  /// zext, etc. If there is an existing instruction that holds Opcode, it
1102  /// may be passed in the 'I' parameter.
1103  InstructionCost
1104  getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1105                   TTI::CastContextHint CCH,
1106                   TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
1107                   const Instruction *I = nullptr) const;
1108
1109  /// \return The expected cost of a sign- or zero-extended vector extract. Use
1110  /// -1 to indicate that there is no information about the index value.
1111  InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
1112                                           VectorType *VecTy,
1113                                           unsigned Index = -1) const;
1114
1115  /// \return The expected cost of control-flow related instructions such as
1116  /// Phi, Ret, Br, Switch.
1117  InstructionCost
1118  getCFInstrCost(unsigned Opcode,
1119                 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
1120                 const Instruction *I = nullptr) const;
1121
1122  /// \returns The expected cost of compare and select instructions. If there
1123  /// is an existing instruction that holds Opcode, it may be passed in the
1124  /// 'I' parameter. The \p VecPred parameter can be used to indicate the select
1125  /// is using a compare with the specified predicate as condition. When vector
1126  /// types are passed, \p VecPred must be used for all lanes.
1127  InstructionCost
1128  getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy = nullptr,
1129                     CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE,
1130                     TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1131                     const Instruction *I = nullptr) const;
1132
1133  /// \return The expected cost of vector Insert and Extract.
1134  /// Use -1 to indicate that there is no information on the index value.
1135  InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
1136                                     unsigned Index = -1) const;
1137
1138  /// \return The cost of Load and Store instructions.
1139  InstructionCost
1140  getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
1141                  unsigned AddressSpace,
1142                  TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1143                  const Instruction *I = nullptr) const;
1144
1145  /// \return The cost of masked Load and Store instructions.
1146  InstructionCost getMaskedMemoryOpCost(
1147      unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
1148      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1149
1150  /// \return The cost of Gather or Scatter operation
1151  /// \p Opcode - is a type of memory access Load or Store
1152  /// \p DataTy - a vector type of the data to be loaded or stored
1153  /// \p Ptr - pointer [or vector of pointers] - address[es] in memory
1154  /// \p VariableMask - true when the memory access is predicated with a mask
1155  ///                   that is not a compile-time constant
1156  /// \p Alignment - alignment of single element
1157  /// \p I - the optional original context instruction, if one exists, e.g. the
1158  ///        load/store to transform or the call to the gather/scatter intrinsic
1159  InstructionCost getGatherScatterOpCost(
1160      unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1161      Align Alignment, TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1162      const Instruction *I = nullptr) const;
1163
1164  /// \return The cost of the interleaved memory operation.
1165  /// \p Opcode is the memory operation code
1166  /// \p VecTy is the vector type of the interleaved access.
1167  /// \p Factor is the interleave factor
1168  /// \p Indices is the indices for interleaved load members (as interleaved
1169  ///    load allows gaps)
1170  /// \p Alignment is the alignment of the memory operation
1171  /// \p AddressSpace is address space of the pointer.
1172  /// \p UseMaskForCond indicates if the memory access is predicated.
1173  /// \p UseMaskForGaps indicates if gaps should be masked.
1174  InstructionCost getInterleavedMemoryOpCost(
1175      unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1176      Align Alignment, unsigned AddressSpace,
1177      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1178      bool UseMaskForCond = false, bool UseMaskForGaps = false) const;
1179
1180  /// Calculate the cost of performing a vector reduction.
1181  ///
1182  /// This is the cost of reducing the vector value of type \p Ty to a scalar
1183  /// value using the operation denoted by \p Opcode. The form of the reduction
1184  /// can either be a pairwise reduction or a reduction that splits the vector
1185  /// at every reduction level.
1186  ///
1187  /// Pairwise:
1188  ///  (v0, v1, v2, v3)
1189  ///  ((v0+v1), (v2+v3), undef, undef)
1190  /// Split:
1191  ///  (v0, v1, v2, v3)
1192  ///  ((v0+v2), (v1+v3), undef, undef)
1193  InstructionCost getArithmeticReductionCost(
1194      unsigned Opcode, VectorType *Ty, bool IsPairwiseForm,
1195      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1196
1197  InstructionCost getMinMaxReductionCost(
1198      VectorType *Ty, VectorType *CondTy, bool IsPairwiseForm, bool IsUnsigned,
1199      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1200
1201  /// Calculate the cost of an extended reduction pattern, similar to
1202  /// getArithmeticReductionCost of an Add reduction with an extension and
1203  /// optional multiply. This is the cost of as:
1204  /// ResTy vecreduce.add(ext(Ty A)), or if IsMLA flag is set then:
1205  /// ResTy vecreduce.add(mul(ext(Ty A), ext(Ty B)). The reduction happens
1206  /// on a VectorType with ResTy elements and Ty lanes.
1207  InstructionCost getExtendedAddReductionCost(
1208      bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
1209      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1210
1211  /// \returns The cost of Intrinsic instructions. Analyses the real arguments.
1212  /// Three cases are handled: 1. scalar instruction 2. vector instruction
1213  /// 3. scalar instruction which is to be vectorized.
1214  InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1215                                        TTI::TargetCostKind CostKind) const;
1216
1217  /// \returns The cost of Call instructions.
1218  InstructionCost getCallInstrCost(
1219      Function *F, Type *RetTy, ArrayRef<Type *> Tys,
1220      TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) const;
1221
1222  /// \returns The number of pieces into which the provided type must be
1223  /// split during legalization. Zero is returned when the answer is unknown.
1224  unsigned getNumberOfParts(Type *Tp) const;
1225
1226  /// \returns The cost of the address computation. For most targets this can be
1227  /// merged into the instruction indexing mode. Some targets might want to
1228  /// distinguish between address computation for memory operations on vector
1229  /// types and scalar types. Such targets should override this function.
1230  /// The 'SE' parameter holds pointer for the scalar evolution object which
1231  /// is used in order to get the Ptr step value in case of constant stride.
1232  /// The 'Ptr' parameter holds SCEV of the access pointer.
1233  InstructionCost getAddressComputationCost(Type *Ty,
1234                                            ScalarEvolution *SE = nullptr,
1235                                            const SCEV *Ptr = nullptr) const;
1236
1237  /// \returns The cost, if any, of keeping values of the given types alive
1238  /// over a callsite.
1239  ///
1240  /// Some types may require the use of register classes that do not have
1241  /// any callee-saved registers, so would require a spill and fill.
1242  InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
1243
1244  /// \returns True if the intrinsic is a supported memory intrinsic.  Info
1245  /// will contain additional information - whether the intrinsic may write
1246  /// or read to memory, volatility and the pointer.  Info is undefined
1247  /// if false is returned.
1248  bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
1249
1250  /// \returns The maximum element size, in bytes, for an element
1251  /// unordered-atomic memory intrinsic.
1252  unsigned getAtomicMemIntrinsicMaxElementSize() const;
1253
1254  /// \returns A value which is the result of the given memory intrinsic.  New
1255  /// instructions may be created to extract the result from the given intrinsic
1256  /// memory operation.  Returns nullptr if the target cannot create a result
1257  /// from the given intrinsic.
1258  Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
1259                                           Type *ExpectedType) const;
1260
1261  /// \returns The type to use in a loop expansion of a memcpy call.
1262  Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
1263                                  unsigned SrcAddrSpace, unsigned DestAddrSpace,
1264                                  unsigned SrcAlign, unsigned DestAlign) const;
1265
1266  /// \param[out] OpsOut The operand types to copy RemainingBytes of memory.
1267  /// \param RemainingBytes The number of bytes to copy.
1268  ///
1269  /// Calculates the operand types to use when copying \p RemainingBytes of
1270  /// memory, where source and destination alignments are \p SrcAlign and
1271  /// \p DestAlign respectively.
1272  void getMemcpyLoopResidualLoweringType(
1273      SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
1274      unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
1275      unsigned SrcAlign, unsigned DestAlign) const;
1276
1277  /// \returns True if the two functions have compatible attributes for inlining
1278  /// purposes.
1279  bool areInlineCompatible(const Function *Caller,
1280                           const Function *Callee) const;
1281
1282  /// \returns True if the caller and callee agree on how \p Args will be passed
1283  /// to the callee.
1284  /// \param[out] Args The list of compatible arguments.  The implementation may
1285  /// filter out any incompatible args from this list.
1286  bool areFunctionArgsABICompatible(const Function *Caller,
1287                                    const Function *Callee,
1288                                    SmallPtrSetImpl<Argument *> &Args) const;
1289
1290  /// The type of load/store indexing.
1291  enum MemIndexedMode {
1292    MIM_Unindexed, ///< No indexing.
1293    MIM_PreInc,    ///< Pre-incrementing.
1294    MIM_PreDec,    ///< Pre-decrementing.
1295    MIM_PostInc,   ///< Post-incrementing.
1296    MIM_PostDec    ///< Post-decrementing.
1297  };
1298
1299  /// \returns True if the specified indexed load for the given type is legal.
1300  bool isIndexedLoadLegal(enum MemIndexedMode Mode, Type *Ty) const;
1301
1302  /// \returns True if the specified indexed store for the given type is legal.
1303  bool isIndexedStoreLegal(enum MemIndexedMode Mode, Type *Ty) const;
1304
1305  /// \returns The bitwidth of the largest vector type that should be used to
1306  /// load/store in the given address space.
1307  unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
1308
1309  /// \returns True if the load instruction is legal to vectorize.
1310  bool isLegalToVectorizeLoad(LoadInst *LI) const;
1311
1312  /// \returns True if the store instruction is legal to vectorize.
1313  bool isLegalToVectorizeStore(StoreInst *SI) const;
1314
1315  /// \returns True if it is legal to vectorize the given load chain.
1316  bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
1317                                   unsigned AddrSpace) const;
1318
1319  /// \returns True if it is legal to vectorize the given store chain.
1320  bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
1321                                    unsigned AddrSpace) const;
1322
1323  /// \returns True if it is legal to vectorize the given reduction kind.
1324  bool isLegalToVectorizeReduction(RecurrenceDescriptor RdxDesc,
1325                                   ElementCount VF) const;
1326
1327  /// \returns The new vector factor value if the target doesn't support \p
1328  /// SizeInBytes loads or has a better vector factor.
1329  unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
1330                               unsigned ChainSizeInBytes,
1331                               VectorType *VecTy) const;
1332
1333  /// \returns The new vector factor value if the target doesn't support \p
1334  /// SizeInBytes stores or has a better vector factor.
1335  unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
1336                                unsigned ChainSizeInBytes,
1337                                VectorType *VecTy) const;
1338
1339  /// Flags describing the kind of vector reduction.
1340  struct ReductionFlags {
1341    ReductionFlags() : IsMaxOp(false), IsSigned(false), NoNaN(false) {}
1342    bool IsMaxOp;  ///< If the op a min/max kind, true if it's a max operation.
1343    bool IsSigned; ///< Whether the operation is a signed int reduction.
1344    bool NoNaN;    ///< If op is an fp min/max, whether NaNs may be present.
1345  };
1346
1347  /// \returns True if the target prefers reductions in loop.
1348  bool preferInLoopReduction(unsigned Opcode, Type *Ty,
1349                             ReductionFlags Flags) const;
1350
1351  /// \returns True if the target prefers reductions select kept in the loop
1352  /// when tail folding. i.e.
1353  /// loop:
1354  ///   p = phi (0, s)
1355  ///   a = add (p, x)
1356  ///   s = select (mask, a, p)
1357  /// vecreduce.add(s)
1358  ///
1359  /// As opposed to the normal scheme of p = phi (0, a) which allows the select
1360  /// to be pulled out of the loop. If the select(.., add, ..) can be predicated
1361  /// by the target, this can lead to cleaner code generation.
1362  bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
1363                                       ReductionFlags Flags) const;
1364
1365  /// \returns True if the target wants to expand the given reduction intrinsic
1366  /// into a shuffle sequence.
1367  bool shouldExpandReduction(const IntrinsicInst *II) const;
1368
1369  /// \returns the size cost of rematerializing a GlobalValue address relative
1370  /// to a stack reload.
1371  unsigned getGISelRematGlobalCost() const;
1372
1373  /// \returns True if the target supports scalable vectors.
1374  bool supportsScalableVectors() const;
1375
1376  /// \name Vector Predication Information
1377  /// @{
1378  /// Whether the target supports the %evl parameter of VP intrinsic efficiently
1379  /// in hardware. (see LLVM Language Reference - "Vector Predication
1380  /// Intrinsics") Use of %evl is discouraged when that is not the case.
1381  bool hasActiveVectorLength() const;
1382
1383  struct VPLegalization {
1384    enum VPTransform {
1385      // keep the predicating parameter
1386      Legal = 0,
1387      // where legal, discard the predicate parameter
1388      Discard = 1,
1389      // transform into something else that is also predicating
1390      Convert = 2
1391    };
1392
1393    // How to transform the EVL parameter.
1394    // Legal:   keep the EVL parameter as it is.
1395    // Discard: Ignore the EVL parameter where it is safe to do so.
1396    // Convert: Fold the EVL into the mask parameter.
1397    VPTransform EVLParamStrategy;
1398
1399    // How to transform the operator.
1400    // Legal:   The target supports this operator.
1401    // Convert: Convert this to a non-VP operation.
1402    // The 'Discard' strategy is invalid.
1403    VPTransform OpStrategy;
1404
1405    bool shouldDoNothing() const {
1406      return (EVLParamStrategy == Legal) && (OpStrategy == Legal);
1407    }
1408    VPLegalization(VPTransform EVLParamStrategy, VPTransform OpStrategy)
1409        : EVLParamStrategy(EVLParamStrategy), OpStrategy(OpStrategy) {}
1410  };
1411
1412  /// \returns How the target needs this vector-predicated operation to be
1413  /// transformed.
1414  VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const;
1415  /// @}
1416
1417  /// @}
1418
1419private:
1420  /// Estimate the latency of specified instruction.
1421  /// Returns 1 as the default value.
1422  InstructionCost getInstructionLatency(const Instruction *I) const;
1423
1424  /// Returns the expected throughput cost of the instruction.
1425  /// Returns -1 if the cost is unknown.
1426  InstructionCost getInstructionThroughput(const Instruction *I) const;
1427
1428  /// The abstract base class used to type erase specific TTI
1429  /// implementations.
1430  class Concept;
1431
1432  /// The template model for the base class which wraps a concrete
1433  /// implementation in a type erased interface.
1434  template <typename T> class Model;
1435
1436  std::unique_ptr<Concept> TTIImpl;
1437};
1438
1439class TargetTransformInfo::Concept {
1440public:
1441  virtual ~Concept() = 0;
1442  virtual const DataLayout &getDataLayout() const = 0;
1443  virtual InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
1444                                     ArrayRef<const Value *> Operands,
1445                                     TTI::TargetCostKind CostKind) = 0;
1446  virtual unsigned getInliningThresholdMultiplier() = 0;
1447  virtual unsigned adjustInliningThreshold(const CallBase *CB) = 0;
1448  virtual int getInlinerVectorBonusPercent() = 0;
1449  virtual InstructionCost getMemcpyCost(const Instruction *I) = 0;
1450  virtual unsigned
1451  getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize,
1452                                   ProfileSummaryInfo *PSI,
1453                                   BlockFrequencyInfo *BFI) = 0;
1454  virtual InstructionCost getUserCost(const User *U,
1455                                      ArrayRef<const Value *> Operands,
1456                                      TargetCostKind CostKind) = 0;
1457  virtual BranchProbability getPredictableBranchThreshold() = 0;
1458  virtual bool hasBranchDivergence() = 0;
1459  virtual bool useGPUDivergenceAnalysis() = 0;
1460  virtual bool isSourceOfDivergence(const Value *V) = 0;
1461  virtual bool isAlwaysUniform(const Value *V) = 0;
1462  virtual unsigned getFlatAddressSpace() = 0;
1463  virtual bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
1464                                          Intrinsic::ID IID) const = 0;
1465  virtual bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const = 0;
1466  virtual unsigned getAssumedAddrSpace(const Value *V) const = 0;
1467  virtual Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
1468                                                  Value *OldV,
1469                                                  Value *NewV) const = 0;
1470  virtual bool isLoweredToCall(const Function *F) = 0;
1471  virtual void getUnrollingPreferences(Loop *L, ScalarEvolution &,
1472                                       UnrollingPreferences &UP) = 0;
1473  virtual void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1474                                     PeelingPreferences &PP) = 0;
1475  virtual bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1476                                        AssumptionCache &AC,
1477                                        TargetLibraryInfo *LibInfo,
1478                                        HardwareLoopInfo &HWLoopInfo) = 0;
1479  virtual bool
1480  preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
1481                              AssumptionCache &AC, TargetLibraryInfo *TLI,
1482                              DominatorTree *DT, const LoopAccessInfo *LAI) = 0;
1483  virtual bool emitGetActiveLaneMask() = 0;
1484  virtual Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
1485                                                       IntrinsicInst &II) = 0;
1486  virtual Optional<Value *>
1487  simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
1488                                   APInt DemandedMask, KnownBits &Known,
1489                                   bool &KnownBitsComputed) = 0;
1490  virtual Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
1491      InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
1492      APInt &UndefElts2, APInt &UndefElts3,
1493      std::function<void(Instruction *, unsigned, APInt, APInt &)>
1494          SimplifyAndSetOp) = 0;
1495  virtual bool isLegalAddImmediate(int64_t Imm) = 0;
1496  virtual bool isLegalICmpImmediate(int64_t Imm) = 0;
1497  virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
1498                                     int64_t BaseOffset, bool HasBaseReg,
1499                                     int64_t Scale, unsigned AddrSpace,
1500                                     Instruction *I) = 0;
1501  virtual bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
1502                             TargetTransformInfo::LSRCost &C2) = 0;
1503  virtual bool isNumRegsMajorCostOfLSR() = 0;
1504  virtual bool isProfitableLSRChainElement(Instruction *I) = 0;
1505  virtual bool canMacroFuseCmp() = 0;
1506  virtual bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
1507                          LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC,
1508                          TargetLibraryInfo *LibInfo) = 0;
1509  virtual AddressingModeKind
1510    getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const = 0;
1511  virtual bool isLegalMaskedStore(Type *DataType, Align Alignment) = 0;
1512  virtual bool isLegalMaskedLoad(Type *DataType, Align Alignment) = 0;
1513  virtual bool isLegalNTStore(Type *DataType, Align Alignment) = 0;
1514  virtual bool isLegalNTLoad(Type *DataType, Align Alignment) = 0;
1515  virtual bool isLegalMaskedScatter(Type *DataType, Align Alignment) = 0;
1516  virtual bool isLegalMaskedGather(Type *DataType, Align Alignment) = 0;
1517  virtual bool isLegalMaskedCompressStore(Type *DataType) = 0;
1518  virtual bool isLegalMaskedExpandLoad(Type *DataType) = 0;
1519  virtual bool hasDivRemOp(Type *DataType, bool IsSigned) = 0;
1520  virtual bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) = 0;
1521  virtual bool prefersVectorizedAddressing() = 0;
1522  virtual InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
1523                                               int64_t BaseOffset,
1524                                               bool HasBaseReg, int64_t Scale,
1525                                               unsigned AddrSpace) = 0;
1526  virtual bool LSRWithInstrQueries() = 0;
1527  virtual bool isTruncateFree(Type *Ty1, Type *Ty2) = 0;
1528  virtual bool isProfitableToHoist(Instruction *I) = 0;
1529  virtual bool useAA() = 0;
1530  virtual bool isTypeLegal(Type *Ty) = 0;
1531  virtual InstructionCost getRegUsageForType(Type *Ty) = 0;
1532  virtual bool shouldBuildLookupTables() = 0;
1533  virtual bool shouldBuildLookupTablesForConstant(Constant *C) = 0;
1534  virtual bool shouldBuildRelLookupTables() = 0;
1535  virtual bool useColdCCForColdCall(Function &F) = 0;
1536  virtual InstructionCost getScalarizationOverhead(VectorType *Ty,
1537                                                   const APInt &DemandedElts,
1538                                                   bool Insert,
1539                                                   bool Extract) = 0;
1540  virtual InstructionCost
1541  getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
1542                                   ArrayRef<Type *> Tys) = 0;
1543  virtual bool supportsEfficientVectorElementLoadStore() = 0;
1544  virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0;
1545  virtual MemCmpExpansionOptions
1546  enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const = 0;
1547  virtual bool enableInterleavedAccessVectorization() = 0;
1548  virtual bool enableMaskedInterleavedAccessVectorization() = 0;
1549  virtual bool isFPVectorizationPotentiallyUnsafe() = 0;
1550  virtual bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
1551                                              unsigned BitWidth,
1552                                              unsigned AddressSpace,
1553                                              Align Alignment,
1554                                              bool *Fast) = 0;
1555  virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0;
1556  virtual bool haveFastSqrt(Type *Ty) = 0;
1557  virtual bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) = 0;
1558  virtual InstructionCost getFPOpCost(Type *Ty) = 0;
1559  virtual int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx,
1560                                    const APInt &Imm, Type *Ty) = 0;
1561  virtual InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
1562                                        TargetCostKind CostKind) = 0;
1563  virtual InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx,
1564                                            const APInt &Imm, Type *Ty,
1565                                            TargetCostKind CostKind,
1566                                            Instruction *Inst = nullptr) = 0;
1567  virtual InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
1568                                              const APInt &Imm, Type *Ty,
1569                                              TargetCostKind CostKind) = 0;
1570  virtual unsigned getNumberOfRegisters(unsigned ClassID) const = 0;
1571  virtual unsigned getRegisterClassForType(bool Vector,
1572                                           Type *Ty = nullptr) const = 0;
1573  virtual const char *getRegisterClassName(unsigned ClassID) const = 0;
1574  virtual TypeSize getRegisterBitWidth(RegisterKind K) const = 0;
1575  virtual unsigned getMinVectorRegisterBitWidth() = 0;
1576  virtual Optional<unsigned> getMaxVScale() const = 0;
1577  virtual bool shouldMaximizeVectorBandwidth() const = 0;
1578  virtual ElementCount getMinimumVF(unsigned ElemWidth,
1579                                    bool IsScalable) const = 0;
1580  virtual unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const = 0;
1581  virtual bool shouldConsiderAddressTypePromotion(
1582      const Instruction &I, bool &AllowPromotionWithoutCommonHeader) = 0;
1583  virtual unsigned getCacheLineSize() const = 0;
1584  virtual Optional<unsigned> getCacheSize(CacheLevel Level) const = 0;
1585  virtual Optional<unsigned> getCacheAssociativity(CacheLevel Level) const = 0;
1586
1587  /// \return How much before a load we should place the prefetch
1588  /// instruction.  This is currently measured in number of
1589  /// instructions.
1590  virtual unsigned getPrefetchDistance() const = 0;
1591
1592  /// \return Some HW prefetchers can handle accesses up to a certain
1593  /// constant stride.  This is the minimum stride in bytes where it
1594  /// makes sense to start adding SW prefetches.  The default is 1,
1595  /// i.e. prefetch with any stride.  Sometimes prefetching is beneficial
1596  /// even below the HW prefetcher limit, and the arguments provided are
1597  /// meant to serve as a basis for deciding this for a particular loop.
1598  virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses,
1599                                        unsigned NumStridedMemAccesses,
1600                                        unsigned NumPrefetches,
1601                                        bool HasCall) const = 0;
1602
1603  /// \return The maximum number of iterations to prefetch ahead.  If
1604  /// the required number of iterations is more than this number, no
1605  /// prefetching is performed.
1606  virtual unsigned getMaxPrefetchIterationsAhead() const = 0;
1607
1608  /// \return True if prefetching should also be done for writes.
1609  virtual bool enableWritePrefetching() const = 0;
1610
1611  virtual unsigned getMaxInterleaveFactor(unsigned VF) = 0;
1612  virtual InstructionCost getArithmeticInstrCost(
1613      unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1614      OperandValueKind Opd1Info, OperandValueKind Opd2Info,
1615      OperandValueProperties Opd1PropInfo, OperandValueProperties Opd2PropInfo,
1616      ArrayRef<const Value *> Args, const Instruction *CxtI = nullptr) = 0;
1617  virtual InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *Tp,
1618                                         ArrayRef<int> Mask, int Index,
1619                                         VectorType *SubTp) = 0;
1620  virtual InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst,
1621                                           Type *Src, CastContextHint CCH,
1622                                           TTI::TargetCostKind CostKind,
1623                                           const Instruction *I) = 0;
1624  virtual InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
1625                                                   VectorType *VecTy,
1626                                                   unsigned Index) = 0;
1627  virtual InstructionCost getCFInstrCost(unsigned Opcode,
1628                                         TTI::TargetCostKind CostKind,
1629                                         const Instruction *I = nullptr) = 0;
1630  virtual InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
1631                                             Type *CondTy,
1632                                             CmpInst::Predicate VecPred,
1633                                             TTI::TargetCostKind CostKind,
1634                                             const Instruction *I) = 0;
1635  virtual InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
1636                                             unsigned Index) = 0;
1637  virtual InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src,
1638                                          Align Alignment,
1639                                          unsigned AddressSpace,
1640                                          TTI::TargetCostKind CostKind,
1641                                          const Instruction *I) = 0;
1642  virtual InstructionCost
1643  getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
1644                        unsigned AddressSpace,
1645                        TTI::TargetCostKind CostKind) = 0;
1646  virtual InstructionCost
1647  getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
1648                         bool VariableMask, Align Alignment,
1649                         TTI::TargetCostKind CostKind,
1650                         const Instruction *I = nullptr) = 0;
1651
1652  virtual InstructionCost getInterleavedMemoryOpCost(
1653      unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1654      Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1655      bool UseMaskForCond = false, bool UseMaskForGaps = false) = 0;
1656  virtual InstructionCost
1657  getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
1658                             bool IsPairwiseForm,
1659                             TTI::TargetCostKind CostKind) = 0;
1660  virtual InstructionCost
1661  getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
1662                         bool IsPairwiseForm, bool IsUnsigned,
1663                         TTI::TargetCostKind CostKind) = 0;
1664  virtual InstructionCost getExtendedAddReductionCost(
1665      bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
1666      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) = 0;
1667  virtual InstructionCost
1668  getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1669                        TTI::TargetCostKind CostKind) = 0;
1670  virtual InstructionCost getCallInstrCost(Function *F, Type *RetTy,
1671                                           ArrayRef<Type *> Tys,
1672                                           TTI::TargetCostKind CostKind) = 0;
1673  virtual unsigned getNumberOfParts(Type *Tp) = 0;
1674  virtual InstructionCost
1675  getAddressComputationCost(Type *Ty, ScalarEvolution *SE, const SCEV *Ptr) = 0;
1676  virtual InstructionCost
1677  getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) = 0;
1678  virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst,
1679                                  MemIntrinsicInfo &Info) = 0;
1680  virtual unsigned getAtomicMemIntrinsicMaxElementSize() const = 0;
1681  virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
1682                                                   Type *ExpectedType) = 0;
1683  virtual Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
1684                                          unsigned SrcAddrSpace,
1685                                          unsigned DestAddrSpace,
1686                                          unsigned SrcAlign,
1687                                          unsigned DestAlign) const = 0;
1688  virtual void getMemcpyLoopResidualLoweringType(
1689      SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
1690      unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
1691      unsigned SrcAlign, unsigned DestAlign) const = 0;
1692  virtual bool areInlineCompatible(const Function *Caller,
1693                                   const Function *Callee) const = 0;
1694  virtual bool
1695  areFunctionArgsABICompatible(const Function *Caller, const Function *Callee,
1696                               SmallPtrSetImpl<Argument *> &Args) const = 0;
1697  virtual bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const = 0;
1698  virtual bool isIndexedStoreLegal(MemIndexedMode Mode, Type *Ty) const = 0;
1699  virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0;
1700  virtual bool isLegalToVectorizeLoad(LoadInst *LI) const = 0;
1701  virtual bool isLegalToVectorizeStore(StoreInst *SI) const = 0;
1702  virtual bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
1703                                           Align Alignment,
1704                                           unsigned AddrSpace) const = 0;
1705  virtual bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
1706                                            Align Alignment,
1707                                            unsigned AddrSpace) const = 0;
1708  virtual bool isLegalToVectorizeReduction(RecurrenceDescriptor RdxDesc,
1709                                           ElementCount VF) const = 0;
1710  virtual unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
1711                                       unsigned ChainSizeInBytes,
1712                                       VectorType *VecTy) const = 0;
1713  virtual unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
1714                                        unsigned ChainSizeInBytes,
1715                                        VectorType *VecTy) const = 0;
1716  virtual bool preferInLoopReduction(unsigned Opcode, Type *Ty,
1717                                     ReductionFlags) const = 0;
1718  virtual bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
1719                                               ReductionFlags) const = 0;
1720  virtual bool shouldExpandReduction(const IntrinsicInst *II) const = 0;
1721  virtual unsigned getGISelRematGlobalCost() const = 0;
1722  virtual bool supportsScalableVectors() const = 0;
1723  virtual bool hasActiveVectorLength() const = 0;
1724  virtual InstructionCost getInstructionLatency(const Instruction *I) = 0;
1725  virtual VPLegalization
1726  getVPLegalizationStrategy(const VPIntrinsic &PI) const = 0;
1727};
1728
1729template <typename T>
1730class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
1731  T Impl;
1732
1733public:
1734  Model(T Impl) : Impl(std::move(Impl)) {}
1735  ~Model() override {}
1736
1737  const DataLayout &getDataLayout() const override {
1738    return Impl.getDataLayout();
1739  }
1740
1741  InstructionCost
1742  getGEPCost(Type *PointeeType, const Value *Ptr,
1743             ArrayRef<const Value *> Operands,
1744             enum TargetTransformInfo::TargetCostKind CostKind) override {
1745    return Impl.getGEPCost(PointeeType, Ptr, Operands);
1746  }
1747  unsigned getInliningThresholdMultiplier() override {
1748    return Impl.getInliningThresholdMultiplier();
1749  }
1750  unsigned adjustInliningThreshold(const CallBase *CB) override {
1751    return Impl.adjustInliningThreshold(CB);
1752  }
1753  int getInlinerVectorBonusPercent() override {
1754    return Impl.getInlinerVectorBonusPercent();
1755  }
1756  InstructionCost getMemcpyCost(const Instruction *I) override {
1757    return Impl.getMemcpyCost(I);
1758  }
1759  InstructionCost getUserCost(const User *U, ArrayRef<const Value *> Operands,
1760                              TargetCostKind CostKind) override {
1761    return Impl.getUserCost(U, Operands, CostKind);
1762  }
1763  BranchProbability getPredictableBranchThreshold() override {
1764    return Impl.getPredictableBranchThreshold();
1765  }
1766  bool hasBranchDivergence() override { return Impl.hasBranchDivergence(); }
1767  bool useGPUDivergenceAnalysis() override {
1768    return Impl.useGPUDivergenceAnalysis();
1769  }
1770  bool isSourceOfDivergence(const Value *V) override {
1771    return Impl.isSourceOfDivergence(V);
1772  }
1773
1774  bool isAlwaysUniform(const Value *V) override {
1775    return Impl.isAlwaysUniform(V);
1776  }
1777
1778  unsigned getFlatAddressSpace() override { return Impl.getFlatAddressSpace(); }
1779
1780  bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
1781                                  Intrinsic::ID IID) const override {
1782    return Impl.collectFlatAddressOperands(OpIndexes, IID);
1783  }
1784
1785  bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
1786    return Impl.isNoopAddrSpaceCast(FromAS, ToAS);
1787  }
1788
1789  unsigned getAssumedAddrSpace(const Value *V) const override {
1790    return Impl.getAssumedAddrSpace(V);
1791  }
1792
1793  Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
1794                                          Value *NewV) const override {
1795    return Impl.rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
1796  }
1797
1798  bool isLoweredToCall(const Function *F) override {
1799    return Impl.isLoweredToCall(F);
1800  }
1801  void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1802                               UnrollingPreferences &UP) override {
1803    return Impl.getUnrollingPreferences(L, SE, UP);
1804  }
1805  void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1806                             PeelingPreferences &PP) override {
1807    return Impl.getPeelingPreferences(L, SE, PP);
1808  }
1809  bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1810                                AssumptionCache &AC, TargetLibraryInfo *LibInfo,
1811                                HardwareLoopInfo &HWLoopInfo) override {
1812    return Impl.isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
1813  }
1814  bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
1815                                   AssumptionCache &AC, TargetLibraryInfo *TLI,
1816                                   DominatorTree *DT,
1817                                   const LoopAccessInfo *LAI) override {
1818    return Impl.preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI);
1819  }
1820  bool emitGetActiveLaneMask() override {
1821    return Impl.emitGetActiveLaneMask();
1822  }
1823  Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
1824                                               IntrinsicInst &II) override {
1825    return Impl.instCombineIntrinsic(IC, II);
1826  }
1827  Optional<Value *>
1828  simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
1829                                   APInt DemandedMask, KnownBits &Known,
1830                                   bool &KnownBitsComputed) override {
1831    return Impl.simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
1832                                                 KnownBitsComputed);
1833  }
1834  Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
1835      InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
1836      APInt &UndefElts2, APInt &UndefElts3,
1837      std::function<void(Instruction *, unsigned, APInt, APInt &)>
1838          SimplifyAndSetOp) override {
1839    return Impl.simplifyDemandedVectorEltsIntrinsic(
1840        IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
1841        SimplifyAndSetOp);
1842  }
1843  bool isLegalAddImmediate(int64_t Imm) override {
1844    return Impl.isLegalAddImmediate(Imm);
1845  }
1846  bool isLegalICmpImmediate(int64_t Imm) override {
1847    return Impl.isLegalICmpImmediate(Imm);
1848  }
1849  bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
1850                             bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
1851                             Instruction *I) override {
1852    return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, Scale,
1853                                      AddrSpace, I);
1854  }
1855  bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
1856                     TargetTransformInfo::LSRCost &C2) override {
1857    return Impl.isLSRCostLess(C1, C2);
1858  }
1859  bool isNumRegsMajorCostOfLSR() override {
1860    return Impl.isNumRegsMajorCostOfLSR();
1861  }
1862  bool isProfitableLSRChainElement(Instruction *I) override {
1863    return Impl.isProfitableLSRChainElement(I);
1864  }
1865  bool canMacroFuseCmp() override { return Impl.canMacroFuseCmp(); }
1866  bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
1867                  DominatorTree *DT, AssumptionCache *AC,
1868                  TargetLibraryInfo *LibInfo) override {
1869    return Impl.canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
1870  }
1871  AddressingModeKind
1872    getPreferredAddressingMode(const Loop *L,
1873                               ScalarEvolution *SE) const override {
1874    return Impl.getPreferredAddressingMode(L, SE);
1875  }
1876  bool isLegalMaskedStore(Type *DataType, Align Alignment) override {
1877    return Impl.isLegalMaskedStore(DataType, Alignment);
1878  }
1879  bool isLegalMaskedLoad(Type *DataType, Align Alignment) override {
1880    return Impl.isLegalMaskedLoad(DataType, Alignment);
1881  }
1882  bool isLegalNTStore(Type *DataType, Align Alignment) override {
1883    return Impl.isLegalNTStore(DataType, Alignment);
1884  }
1885  bool isLegalNTLoad(Type *DataType, Align Alignment) override {
1886    return Impl.isLegalNTLoad(DataType, Alignment);
1887  }
1888  bool isLegalMaskedScatter(Type *DataType, Align Alignment) override {
1889    return Impl.isLegalMaskedScatter(DataType, Alignment);
1890  }
1891  bool isLegalMaskedGather(Type *DataType, Align Alignment) override {
1892    return Impl.isLegalMaskedGather(DataType, Alignment);
1893  }
1894  bool isLegalMaskedCompressStore(Type *DataType) override {
1895    return Impl.isLegalMaskedCompressStore(DataType);
1896  }
1897  bool isLegalMaskedExpandLoad(Type *DataType) override {
1898    return Impl.isLegalMaskedExpandLoad(DataType);
1899  }
1900  bool hasDivRemOp(Type *DataType, bool IsSigned) override {
1901    return Impl.hasDivRemOp(DataType, IsSigned);
1902  }
1903  bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) override {
1904    return Impl.hasVolatileVariant(I, AddrSpace);
1905  }
1906  bool prefersVectorizedAddressing() override {
1907    return Impl.prefersVectorizedAddressing();
1908  }
1909  InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
1910                                       int64_t BaseOffset, bool HasBaseReg,
1911                                       int64_t Scale,
1912                                       unsigned AddrSpace) override {
1913    return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg, Scale,
1914                                     AddrSpace);
1915  }
1916  bool LSRWithInstrQueries() override { return Impl.LSRWithInstrQueries(); }
1917  bool isTruncateFree(Type *Ty1, Type *Ty2) override {
1918    return Impl.isTruncateFree(Ty1, Ty2);
1919  }
1920  bool isProfitableToHoist(Instruction *I) override {
1921    return Impl.isProfitableToHoist(I);
1922  }
1923  bool useAA() override { return Impl.useAA(); }
1924  bool isTypeLegal(Type *Ty) override { return Impl.isTypeLegal(Ty); }
1925  InstructionCost getRegUsageForType(Type *Ty) override {
1926    return Impl.getRegUsageForType(Ty);
1927  }
1928  bool shouldBuildLookupTables() override {
1929    return Impl.shouldBuildLookupTables();
1930  }
1931  bool shouldBuildLookupTablesForConstant(Constant *C) override {
1932    return Impl.shouldBuildLookupTablesForConstant(C);
1933  }
1934  bool shouldBuildRelLookupTables() override {
1935    return Impl.shouldBuildRelLookupTables();
1936  }
1937  bool useColdCCForColdCall(Function &F) override {
1938    return Impl.useColdCCForColdCall(F);
1939  }
1940
1941  InstructionCost getScalarizationOverhead(VectorType *Ty,
1942                                           const APInt &DemandedElts,
1943                                           bool Insert, bool Extract) override {
1944    return Impl.getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
1945  }
1946  InstructionCost
1947  getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
1948                                   ArrayRef<Type *> Tys) override {
1949    return Impl.getOperandsScalarizationOverhead(Args, Tys);
1950  }
1951
1952  bool supportsEfficientVectorElementLoadStore() override {
1953    return Impl.supportsEfficientVectorElementLoadStore();
1954  }
1955
1956  bool enableAggressiveInterleaving(bool LoopHasReductions) override {
1957    return Impl.enableAggressiveInterleaving(LoopHasReductions);
1958  }
1959  MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
1960                                               bool IsZeroCmp) const override {
1961    return Impl.enableMemCmpExpansion(OptSize, IsZeroCmp);
1962  }
1963  bool enableInterleavedAccessVectorization() override {
1964    return Impl.enableInterleavedAccessVectorization();
1965  }
1966  bool enableMaskedInterleavedAccessVectorization() override {
1967    return Impl.enableMaskedInterleavedAccessVectorization();
1968  }
1969  bool isFPVectorizationPotentiallyUnsafe() override {
1970    return Impl.isFPVectorizationPotentiallyUnsafe();
1971  }
1972  bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
1973                                      unsigned AddressSpace, Align Alignment,
1974                                      bool *Fast) override {
1975    return Impl.allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace,
1976                                               Alignment, Fast);
1977  }
1978  PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) override {
1979    return Impl.getPopcntSupport(IntTyWidthInBit);
1980  }
1981  bool haveFastSqrt(Type *Ty) override { return Impl.haveFastSqrt(Ty); }
1982
1983  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) override {
1984    return Impl.isFCmpOrdCheaperThanFCmpZero(Ty);
1985  }
1986
1987  InstructionCost getFPOpCost(Type *Ty) override {
1988    return Impl.getFPOpCost(Ty);
1989  }
1990
1991  int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm,
1992                            Type *Ty) override {
1993    return Impl.getIntImmCodeSizeCost(Opc, Idx, Imm, Ty);
1994  }
1995  InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
1996                                TargetCostKind CostKind) override {
1997    return Impl.getIntImmCost(Imm, Ty, CostKind);
1998  }
1999  InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx,
2000                                    const APInt &Imm, Type *Ty,
2001                                    TargetCostKind CostKind,
2002                                    Instruction *Inst = nullptr) override {
2003    return Impl.getIntImmCostInst(Opc, Idx, Imm, Ty, CostKind, Inst);
2004  }
2005  InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
2006                                      const APInt &Imm, Type *Ty,
2007                                      TargetCostKind CostKind) override {
2008    return Impl.getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
2009  }
2010  unsigned getNumberOfRegisters(unsigned ClassID) const override {
2011    return Impl.getNumberOfRegisters(ClassID);
2012  }
2013  unsigned getRegisterClassForType(bool Vector,
2014                                   Type *Ty = nullptr) const override {
2015    return Impl.getRegisterClassForType(Vector, Ty);
2016  }
2017  const char *getRegisterClassName(unsigned ClassID) const override {
2018    return Impl.getRegisterClassName(ClassID);
2019  }
2020  TypeSize getRegisterBitWidth(RegisterKind K) const override {
2021    return Impl.getRegisterBitWidth(K);
2022  }
2023  unsigned getMinVectorRegisterBitWidth() override {
2024    return Impl.getMinVectorRegisterBitWidth();
2025  }
2026  Optional<unsigned> getMaxVScale() const override {
2027    return Impl.getMaxVScale();
2028  }
2029  bool shouldMaximizeVectorBandwidth() const override {
2030    return Impl.shouldMaximizeVectorBandwidth();
2031  }
2032  ElementCount getMinimumVF(unsigned ElemWidth,
2033                            bool IsScalable) const override {
2034    return Impl.getMinimumVF(ElemWidth, IsScalable);
2035  }
2036  unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const override {
2037    return Impl.getMaximumVF(ElemWidth, Opcode);
2038  }
2039  bool shouldConsiderAddressTypePromotion(
2040      const Instruction &I, bool &AllowPromotionWithoutCommonHeader) override {
2041    return Impl.shouldConsiderAddressTypePromotion(
2042        I, AllowPromotionWithoutCommonHeader);
2043  }
2044  unsigned getCacheLineSize() const override { return Impl.getCacheLineSize(); }
2045  Optional<unsigned> getCacheSize(CacheLevel Level) const override {
2046    return Impl.getCacheSize(Level);
2047  }
2048  Optional<unsigned> getCacheAssociativity(CacheLevel Level) const override {
2049    return Impl.getCacheAssociativity(Level);
2050  }
2051
2052  /// Return the preferred prefetch distance in terms of instructions.
2053  ///
2054  unsigned getPrefetchDistance() const override {
2055    return Impl.getPrefetchDistance();
2056  }
2057
2058  /// Return the minimum stride necessary to trigger software
2059  /// prefetching.
2060  ///
2061  unsigned getMinPrefetchStride(unsigned NumMemAccesses,
2062                                unsigned NumStridedMemAccesses,
2063                                unsigned NumPrefetches,
2064                                bool HasCall) const override {
2065    return Impl.getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
2066                                     NumPrefetches, HasCall);
2067  }
2068
2069  /// Return the maximum prefetch distance in terms of loop
2070  /// iterations.
2071  ///
2072  unsigned getMaxPrefetchIterationsAhead() const override {
2073    return Impl.getMaxPrefetchIterationsAhead();
2074  }
2075
2076  /// \return True if prefetching should also be done for writes.
2077  bool enableWritePrefetching() const override {
2078    return Impl.enableWritePrefetching();
2079  }
2080
2081  unsigned getMaxInterleaveFactor(unsigned VF) override {
2082    return Impl.getMaxInterleaveFactor(VF);
2083  }
2084  unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
2085                                            unsigned &JTSize,
2086                                            ProfileSummaryInfo *PSI,
2087                                            BlockFrequencyInfo *BFI) override {
2088    return Impl.getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI);
2089  }
2090  InstructionCost getArithmeticInstrCost(
2091      unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
2092      OperandValueKind Opd1Info, OperandValueKind Opd2Info,
2093      OperandValueProperties Opd1PropInfo, OperandValueProperties Opd2PropInfo,
2094      ArrayRef<const Value *> Args,
2095      const Instruction *CxtI = nullptr) override {
2096    return Impl.getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
2097                                       Opd1PropInfo, Opd2PropInfo, Args, CxtI);
2098  }
2099  InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *Tp,
2100                                 ArrayRef<int> Mask, int Index,
2101                                 VectorType *SubTp) override {
2102    return Impl.getShuffleCost(Kind, Tp, Mask, Index, SubTp);
2103  }
2104  InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
2105                                   CastContextHint CCH,
2106                                   TTI::TargetCostKind CostKind,
2107                                   const Instruction *I) override {
2108    return Impl.getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
2109  }
2110  InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
2111                                           VectorType *VecTy,
2112                                           unsigned Index) override {
2113    return Impl.getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
2114  }
2115  InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
2116                                 const Instruction *I = nullptr) override {
2117    return Impl.getCFInstrCost(Opcode, CostKind, I);
2118  }
2119  InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
2120                                     CmpInst::Predicate VecPred,
2121                                     TTI::TargetCostKind CostKind,
2122                                     const Instruction *I) override {
2123    return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
2124  }
2125  InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
2126                                     unsigned Index) override {
2127    return Impl.getVectorInstrCost(Opcode, Val, Index);
2128  }
2129  InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
2130                                  unsigned AddressSpace,
2131                                  TTI::TargetCostKind CostKind,
2132                                  const Instruction *I) override {
2133    return Impl.getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
2134                                CostKind, I);
2135  }
2136  InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
2137                                        Align Alignment, unsigned AddressSpace,
2138                                        TTI::TargetCostKind CostKind) override {
2139    return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
2140                                      CostKind);
2141  }
2142  InstructionCost
2143  getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
2144                         bool VariableMask, Align Alignment,
2145                         TTI::TargetCostKind CostKind,
2146                         const Instruction *I = nullptr) override {
2147    return Impl.getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
2148                                       Alignment, CostKind, I);
2149  }
2150  InstructionCost getInterleavedMemoryOpCost(
2151      unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
2152      Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
2153      bool UseMaskForCond, bool UseMaskForGaps) override {
2154    return Impl.getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
2155                                           Alignment, AddressSpace, CostKind,
2156                                           UseMaskForCond, UseMaskForGaps);
2157  }
2158  InstructionCost
2159  getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
2160                             bool IsPairwiseForm,
2161                             TTI::TargetCostKind CostKind) override {
2162    return Impl.getArithmeticReductionCost(Opcode, Ty, IsPairwiseForm,
2163                                           CostKind);
2164  }
2165  InstructionCost
2166  getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
2167                         bool IsPairwiseForm, bool IsUnsigned,
2168                         TTI::TargetCostKind CostKind) override {
2169    return Impl.getMinMaxReductionCost(Ty, CondTy, IsPairwiseForm, IsUnsigned,
2170                                       CostKind);
2171  }
2172  InstructionCost getExtendedAddReductionCost(
2173      bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
2174      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) override {
2175    return Impl.getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, Ty,
2176                                            CostKind);
2177  }
2178  InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
2179                                        TTI::TargetCostKind CostKind) override {
2180    return Impl.getIntrinsicInstrCost(ICA, CostKind);
2181  }
2182  InstructionCost getCallInstrCost(Function *F, Type *RetTy,
2183                                   ArrayRef<Type *> Tys,
2184                                   TTI::TargetCostKind CostKind) override {
2185    return Impl.getCallInstrCost(F, RetTy, Tys, CostKind);
2186  }
2187  unsigned getNumberOfParts(Type *Tp) override {
2188    return Impl.getNumberOfParts(Tp);
2189  }
2190  InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
2191                                            const SCEV *Ptr) override {
2192    return Impl.getAddressComputationCost(Ty, SE, Ptr);
2193  }
2194  InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) override {
2195    return Impl.getCostOfKeepingLiveOverCall(Tys);
2196  }
2197  bool getTgtMemIntrinsic(IntrinsicInst *Inst,
2198                          MemIntrinsicInfo &Info) override {
2199    return Impl.getTgtMemIntrinsic(Inst, Info);
2200  }
2201  unsigned getAtomicMemIntrinsicMaxElementSize() const override {
2202    return Impl.getAtomicMemIntrinsicMaxElementSize();
2203  }
2204  Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
2205                                           Type *ExpectedType) override {
2206    return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
2207  }
2208  Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
2209                                  unsigned SrcAddrSpace, unsigned DestAddrSpace,
2210                                  unsigned SrcAlign,
2211                                  unsigned DestAlign) const override {
2212    return Impl.getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace,
2213                                          DestAddrSpace, SrcAlign, DestAlign);
2214  }
2215  void getMemcpyLoopResidualLoweringType(
2216      SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
2217      unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
2218      unsigned SrcAlign, unsigned DestAlign) const override {
2219    Impl.getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes,
2220                                           SrcAddrSpace, DestAddrSpace,
2221                                           SrcAlign, DestAlign);
2222  }
2223  bool areInlineCompatible(const Function *Caller,
2224                           const Function *Callee) const override {
2225    return Impl.areInlineCompatible(Caller, Callee);
2226  }
2227  bool areFunctionArgsABICompatible(
2228      const Function *Caller, const Function *Callee,
2229      SmallPtrSetImpl<Argument *> &Args) const override {
2230    return Impl.areFunctionArgsABICompatible(Caller, Callee, Args);
2231  }
2232  bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const override {
2233    return Impl.isIndexedLoadLegal(Mode, Ty, getDataLayout());
2234  }
2235  bool isIndexedStoreLegal(MemIndexedMode Mode, Type *Ty) const override {
2236    return Impl.isIndexedStoreLegal(Mode, Ty, getDataLayout());
2237  }
2238  unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const override {
2239    return Impl.getLoadStoreVecRegBitWidth(AddrSpace);
2240  }
2241  bool isLegalToVectorizeLoad(LoadInst *LI) const override {
2242    return Impl.isLegalToVectorizeLoad(LI);
2243  }
2244  bool isLegalToVectorizeStore(StoreInst *SI) const override {
2245    return Impl.isLegalToVectorizeStore(SI);
2246  }
2247  bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
2248                                   unsigned AddrSpace) const override {
2249    return Impl.isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
2250                                            AddrSpace);
2251  }
2252  bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
2253                                    unsigned AddrSpace) const override {
2254    return Impl.isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
2255                                             AddrSpace);
2256  }
2257  bool isLegalToVectorizeReduction(RecurrenceDescriptor RdxDesc,
2258                                   ElementCount VF) const override {
2259    return Impl.isLegalToVectorizeReduction(RdxDesc, VF);
2260  }
2261  unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
2262                               unsigned ChainSizeInBytes,
2263                               VectorType *VecTy) const override {
2264    return Impl.getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
2265  }
2266  unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
2267                                unsigned ChainSizeInBytes,
2268                                VectorType *VecTy) const override {
2269    return Impl.getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
2270  }
2271  bool preferInLoopReduction(unsigned Opcode, Type *Ty,
2272                             ReductionFlags Flags) const override {
2273    return Impl.preferInLoopReduction(Opcode, Ty, Flags);
2274  }
2275  bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
2276                                       ReductionFlags Flags) const override {
2277    return Impl.preferPredicatedReductionSelect(Opcode, Ty, Flags);
2278  }
2279  bool shouldExpandReduction(const IntrinsicInst *II) const override {
2280    return Impl.shouldExpandReduction(II);
2281  }
2282
2283  unsigned getGISelRematGlobalCost() const override {
2284    return Impl.getGISelRematGlobalCost();
2285  }
2286
2287  bool supportsScalableVectors() const override {
2288    return Impl.supportsScalableVectors();
2289  }
2290
2291  bool hasActiveVectorLength() const override {
2292    return Impl.hasActiveVectorLength();
2293  }
2294
2295  InstructionCost getInstructionLatency(const Instruction *I) override {
2296    return Impl.getInstructionLatency(I);
2297  }
2298
2299  VPLegalization
2300  getVPLegalizationStrategy(const VPIntrinsic &PI) const override {
2301    return Impl.getVPLegalizationStrategy(PI);
2302  }
2303};
2304
2305template <typename T>
2306TargetTransformInfo::TargetTransformInfo(T Impl)
2307    : TTIImpl(new Model<T>(Impl)) {}
2308
2309/// Analysis pass providing the \c TargetTransformInfo.
2310///
2311/// The core idea of the TargetIRAnalysis is to expose an interface through
2312/// which LLVM targets can analyze and provide information about the middle
2313/// end's target-independent IR. This supports use cases such as target-aware
2314/// cost modeling of IR constructs.
2315///
2316/// This is a function analysis because much of the cost modeling for targets
2317/// is done in a subtarget specific way and LLVM supports compiling different
2318/// functions targeting different subtargets in order to support runtime
2319/// dispatch according to the observed subtarget.
2320class TargetIRAnalysis : public AnalysisInfoMixin<TargetIRAnalysis> {
2321public:
2322  typedef TargetTransformInfo Result;
2323
2324  /// Default construct a target IR analysis.
2325  ///
2326  /// This will use the module's datalayout to construct a baseline
2327  /// conservative TTI result.
2328  TargetIRAnalysis();
2329
2330  /// Construct an IR analysis pass around a target-provide callback.
2331  ///
2332  /// The callback will be called with a particular function for which the TTI
2333  /// is needed and must return a TTI object for that function.
2334  TargetIRAnalysis(std::function<Result(const Function &)> TTICallback);
2335
2336  // Value semantics. We spell out the constructors for MSVC.
2337  TargetIRAnalysis(const TargetIRAnalysis &Arg)
2338      : TTICallback(Arg.TTICallback) {}
2339  TargetIRAnalysis(TargetIRAnalysis &&Arg)
2340      : TTICallback(std::move(Arg.TTICallback)) {}
2341  TargetIRAnalysis &operator=(const TargetIRAnalysis &RHS) {
2342    TTICallback = RHS.TTICallback;
2343    return *this;
2344  }
2345  TargetIRAnalysis &operator=(TargetIRAnalysis &&RHS) {
2346    TTICallback = std::move(RHS.TTICallback);
2347    return *this;
2348  }
2349
2350  Result run(const Function &F, FunctionAnalysisManager &);
2351
2352private:
2353  friend AnalysisInfoMixin<TargetIRAnalysis>;
2354  static AnalysisKey Key;
2355
2356  /// The callback used to produce a result.
2357  ///
2358  /// We use a completely opaque callback so that targets can provide whatever
2359  /// mechanism they desire for constructing the TTI for a given function.
2360  ///
2361  /// FIXME: Should we really use std::function? It's relatively inefficient.
2362  /// It might be possible to arrange for even stateful callbacks to outlive
2363  /// the analysis and thus use a function_ref which would be lighter weight.
2364  /// This may also be less error prone as the callback is likely to reference
2365  /// the external TargetMachine, and that reference needs to never dangle.
2366  std::function<Result(const Function &)> TTICallback;
2367
2368  /// Helper function used as the callback in the default constructor.
2369  static Result getDefaultTTI(const Function &F);
2370};
2371
2372/// Wrapper pass for TargetTransformInfo.
2373///
2374/// This pass can be constructed from a TTI object which it stores internally
2375/// and is queried by passes.
2376class TargetTransformInfoWrapperPass : public ImmutablePass {
2377  TargetIRAnalysis TIRA;
2378  Optional<TargetTransformInfo> TTI;
2379
2380  virtual void anchor();
2381
2382public:
2383  static char ID;
2384
2385  /// We must provide a default constructor for the pass but it should
2386  /// never be used.
2387  ///
2388  /// Use the constructor below or call one of the creation routines.
2389  TargetTransformInfoWrapperPass();
2390
2391  explicit TargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
2392
2393  TargetTransformInfo &getTTI(const Function &F);
2394};
2395
2396/// Create an analysis pass wrapper around a TTI object.
2397///
2398/// This analysis pass just holds the TTI instance and makes it available to
2399/// clients.
2400ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
2401
2402} // namespace llvm
2403
2404#endif
2405