1//===- llvm/Analysis/TargetTransformInfo.cpp ------------------------------===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8 9#include "llvm/Analysis/TargetTransformInfo.h" 10#include "llvm/Analysis/CFG.h" 11#include "llvm/Analysis/LoopIterator.h" 12#include "llvm/Analysis/TargetTransformInfoImpl.h" 13#include "llvm/IR/CFG.h" 14#include "llvm/IR/Dominators.h" 15#include "llvm/IR/Instruction.h" 16#include "llvm/IR/Instructions.h" 17#include "llvm/IR/IntrinsicInst.h" 18#include "llvm/IR/Module.h" 19#include "llvm/IR/Operator.h" 20#include "llvm/IR/PatternMatch.h" 21#include "llvm/InitializePasses.h" 22#include "llvm/Support/CommandLine.h" 23#include <optional> 24#include <utility> 25 26using namespace llvm; 27using namespace PatternMatch; 28 29#define DEBUG_TYPE "tti" 30 31static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false), 32 cl::Hidden, 33 cl::desc("Recognize reduction patterns.")); 34 35static cl::opt<unsigned> CacheLineSize( 36 "cache-line-size", cl::init(0), cl::Hidden, 37 cl::desc("Use this to override the target cache line size when " 38 "specified by the user.")); 39 40static cl::opt<unsigned> MinPageSize( 41 "min-page-size", cl::init(0), cl::Hidden, 42 cl::desc("Use this to override the target's minimum page size.")); 43 44static cl::opt<unsigned> PredictableBranchThreshold( 45 "predictable-branch-threshold", cl::init(99), cl::Hidden, 46 cl::desc( 47 "Use this to override the target's predictable branch threshold (%).")); 48 49namespace { 50/// No-op implementation of the TTI interface using the utility base 51/// classes. 52/// 53/// This is used when no target specific information is available. 54struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> { 55 explicit NoTTIImpl(const DataLayout &DL) 56 : TargetTransformInfoImplCRTPBase<NoTTIImpl>(DL) {} 57}; 58} // namespace 59 60bool HardwareLoopInfo::canAnalyze(LoopInfo &LI) { 61 // If the loop has irreducible control flow, it can not be converted to 62 // Hardware loop. 63 LoopBlocksRPO RPOT(L); 64 RPOT.perform(&LI); 65 if (containsIrreducibleCFG<const BasicBlock *>(RPOT, LI)) 66 return false; 67 return true; 68} 69 70IntrinsicCostAttributes::IntrinsicCostAttributes( 71 Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarizationCost, 72 bool TypeBasedOnly) 73 : II(dyn_cast<IntrinsicInst>(&CI)), RetTy(CI.getType()), IID(Id), 74 ScalarizationCost(ScalarizationCost) { 75 76 if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI)) 77 FMF = FPMO->getFastMathFlags(); 78 79 if (!TypeBasedOnly) 80 Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end()); 81 FunctionType *FTy = CI.getCalledFunction()->getFunctionType(); 82 ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end()); 83} 84 85IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy, 86 ArrayRef<Type *> Tys, 87 FastMathFlags Flags, 88 const IntrinsicInst *I, 89 InstructionCost ScalarCost) 90 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) { 91 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end()); 92} 93 94IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *Ty, 95 ArrayRef<const Value *> Args) 96 : RetTy(Ty), IID(Id) { 97 98 Arguments.insert(Arguments.begin(), Args.begin(), Args.end()); 99 ParamTys.reserve(Arguments.size()); 100 for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx) 101 ParamTys.push_back(Arguments[Idx]->getType()); 102} 103 104IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy, 105 ArrayRef<const Value *> Args, 106 ArrayRef<Type *> Tys, 107 FastMathFlags Flags, 108 const IntrinsicInst *I, 109 InstructionCost ScalarCost) 110 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) { 111 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end()); 112 Arguments.insert(Arguments.begin(), Args.begin(), Args.end()); 113} 114 115HardwareLoopInfo::HardwareLoopInfo(Loop *L) : L(L) { 116 // Match default options: 117 // - hardware-loop-counter-bitwidth = 32 118 // - hardware-loop-decrement = 1 119 CountType = Type::getInt32Ty(L->getHeader()->getContext()); 120 LoopDecrement = ConstantInt::get(CountType, 1); 121} 122 123bool HardwareLoopInfo::isHardwareLoopCandidate(ScalarEvolution &SE, 124 LoopInfo &LI, DominatorTree &DT, 125 bool ForceNestedLoop, 126 bool ForceHardwareLoopPHI) { 127 SmallVector<BasicBlock *, 4> ExitingBlocks; 128 L->getExitingBlocks(ExitingBlocks); 129 130 for (BasicBlock *BB : ExitingBlocks) { 131 // If we pass the updated counter back through a phi, we need to know 132 // which latch the updated value will be coming from. 133 if (!L->isLoopLatch(BB)) { 134 if (ForceHardwareLoopPHI || CounterInReg) 135 continue; 136 } 137 138 const SCEV *EC = SE.getExitCount(L, BB); 139 if (isa<SCEVCouldNotCompute>(EC)) 140 continue; 141 if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) { 142 if (ConstEC->getValue()->isZero()) 143 continue; 144 } else if (!SE.isLoopInvariant(EC, L)) 145 continue; 146 147 if (SE.getTypeSizeInBits(EC->getType()) > CountType->getBitWidth()) 148 continue; 149 150 // If this exiting block is contained in a nested loop, it is not eligible 151 // for insertion of the branch-and-decrement since the inner loop would 152 // end up messing up the value in the CTR. 153 if (!IsNestingLegal && LI.getLoopFor(BB) != L && !ForceNestedLoop) 154 continue; 155 156 // We now have a loop-invariant count of loop iterations (which is not the 157 // constant zero) for which we know that this loop will not exit via this 158 // existing block. 159 160 // We need to make sure that this block will run on every loop iteration. 161 // For this to be true, we must dominate all blocks with backedges. Such 162 // blocks are in-loop predecessors to the header block. 163 bool NotAlways = false; 164 for (BasicBlock *Pred : predecessors(L->getHeader())) { 165 if (!L->contains(Pred)) 166 continue; 167 168 if (!DT.dominates(BB, Pred)) { 169 NotAlways = true; 170 break; 171 } 172 } 173 174 if (NotAlways) 175 continue; 176 177 // Make sure this blocks ends with a conditional branch. 178 Instruction *TI = BB->getTerminator(); 179 if (!TI) 180 continue; 181 182 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 183 if (!BI->isConditional()) 184 continue; 185 186 ExitBranch = BI; 187 } else 188 continue; 189 190 // Note that this block may not be the loop latch block, even if the loop 191 // has a latch block. 192 ExitBlock = BB; 193 ExitCount = EC; 194 break; 195 } 196 197 if (!ExitBlock) 198 return false; 199 return true; 200} 201 202TargetTransformInfo::TargetTransformInfo(const DataLayout &DL) 203 : TTIImpl(new Model<NoTTIImpl>(NoTTIImpl(DL))) {} 204 205TargetTransformInfo::~TargetTransformInfo() = default; 206 207TargetTransformInfo::TargetTransformInfo(TargetTransformInfo &&Arg) 208 : TTIImpl(std::move(Arg.TTIImpl)) {} 209 210TargetTransformInfo &TargetTransformInfo::operator=(TargetTransformInfo &&RHS) { 211 TTIImpl = std::move(RHS.TTIImpl); 212 return *this; 213} 214 215unsigned TargetTransformInfo::getInliningThresholdMultiplier() const { 216 return TTIImpl->getInliningThresholdMultiplier(); 217} 218 219unsigned 220TargetTransformInfo::getInliningCostBenefitAnalysisSavingsMultiplier() const { 221 return TTIImpl->getInliningCostBenefitAnalysisSavingsMultiplier(); 222} 223 224unsigned 225TargetTransformInfo::getInliningCostBenefitAnalysisProfitableMultiplier() 226 const { 227 return TTIImpl->getInliningCostBenefitAnalysisProfitableMultiplier(); 228} 229 230unsigned 231TargetTransformInfo::adjustInliningThreshold(const CallBase *CB) const { 232 return TTIImpl->adjustInliningThreshold(CB); 233} 234 235unsigned TargetTransformInfo::getCallerAllocaCost(const CallBase *CB, 236 const AllocaInst *AI) const { 237 return TTIImpl->getCallerAllocaCost(CB, AI); 238} 239 240int TargetTransformInfo::getInlinerVectorBonusPercent() const { 241 return TTIImpl->getInlinerVectorBonusPercent(); 242} 243 244InstructionCost TargetTransformInfo::getGEPCost( 245 Type *PointeeType, const Value *Ptr, ArrayRef<const Value *> Operands, 246 Type *AccessType, TTI::TargetCostKind CostKind) const { 247 return TTIImpl->getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind); 248} 249 250InstructionCost TargetTransformInfo::getPointersChainCost( 251 ArrayRef<const Value *> Ptrs, const Value *Base, 252 const TTI::PointersChainInfo &Info, Type *AccessTy, 253 TTI::TargetCostKind CostKind) const { 254 assert((Base || !Info.isSameBase()) && 255 "If pointers have same base address it has to be provided."); 256 return TTIImpl->getPointersChainCost(Ptrs, Base, Info, AccessTy, CostKind); 257} 258 259unsigned TargetTransformInfo::getEstimatedNumberOfCaseClusters( 260 const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI, 261 BlockFrequencyInfo *BFI) const { 262 return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI); 263} 264 265InstructionCost 266TargetTransformInfo::getInstructionCost(const User *U, 267 ArrayRef<const Value *> Operands, 268 enum TargetCostKind CostKind) const { 269 InstructionCost Cost = TTIImpl->getInstructionCost(U, Operands, CostKind); 270 assert((CostKind == TTI::TCK_RecipThroughput || Cost >= 0) && 271 "TTI should not produce negative costs!"); 272 return Cost; 273} 274 275BranchProbability TargetTransformInfo::getPredictableBranchThreshold() const { 276 return PredictableBranchThreshold.getNumOccurrences() > 0 277 ? BranchProbability(PredictableBranchThreshold, 100) 278 : TTIImpl->getPredictableBranchThreshold(); 279} 280 281bool TargetTransformInfo::hasBranchDivergence(const Function *F) const { 282 return TTIImpl->hasBranchDivergence(F); 283} 284 285bool TargetTransformInfo::isSourceOfDivergence(const Value *V) const { 286 return TTIImpl->isSourceOfDivergence(V); 287} 288 289bool llvm::TargetTransformInfo::isAlwaysUniform(const Value *V) const { 290 return TTIImpl->isAlwaysUniform(V); 291} 292 293bool llvm::TargetTransformInfo::isValidAddrSpaceCast(unsigned FromAS, 294 unsigned ToAS) const { 295 return TTIImpl->isValidAddrSpaceCast(FromAS, ToAS); 296} 297 298bool llvm::TargetTransformInfo::addrspacesMayAlias(unsigned FromAS, 299 unsigned ToAS) const { 300 return TTIImpl->addrspacesMayAlias(FromAS, ToAS); 301} 302 303unsigned TargetTransformInfo::getFlatAddressSpace() const { 304 return TTIImpl->getFlatAddressSpace(); 305} 306 307bool TargetTransformInfo::collectFlatAddressOperands( 308 SmallVectorImpl<int> &OpIndexes, Intrinsic::ID IID) const { 309 return TTIImpl->collectFlatAddressOperands(OpIndexes, IID); 310} 311 312bool TargetTransformInfo::isNoopAddrSpaceCast(unsigned FromAS, 313 unsigned ToAS) const { 314 return TTIImpl->isNoopAddrSpaceCast(FromAS, ToAS); 315} 316 317bool TargetTransformInfo::canHaveNonUndefGlobalInitializerInAddressSpace( 318 unsigned AS) const { 319 return TTIImpl->canHaveNonUndefGlobalInitializerInAddressSpace(AS); 320} 321 322unsigned TargetTransformInfo::getAssumedAddrSpace(const Value *V) const { 323 return TTIImpl->getAssumedAddrSpace(V); 324} 325 326bool TargetTransformInfo::isSingleThreaded() const { 327 return TTIImpl->isSingleThreaded(); 328} 329 330std::pair<const Value *, unsigned> 331TargetTransformInfo::getPredicatedAddrSpace(const Value *V) const { 332 return TTIImpl->getPredicatedAddrSpace(V); 333} 334 335Value *TargetTransformInfo::rewriteIntrinsicWithAddressSpace( 336 IntrinsicInst *II, Value *OldV, Value *NewV) const { 337 return TTIImpl->rewriteIntrinsicWithAddressSpace(II, OldV, NewV); 338} 339 340bool TargetTransformInfo::isLoweredToCall(const Function *F) const { 341 return TTIImpl->isLoweredToCall(F); 342} 343 344bool TargetTransformInfo::isHardwareLoopProfitable( 345 Loop *L, ScalarEvolution &SE, AssumptionCache &AC, 346 TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const { 347 return TTIImpl->isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo); 348} 349 350bool TargetTransformInfo::preferPredicateOverEpilogue( 351 TailFoldingInfo *TFI) const { 352 return TTIImpl->preferPredicateOverEpilogue(TFI); 353} 354 355TailFoldingStyle TargetTransformInfo::getPreferredTailFoldingStyle( 356 bool IVUpdateMayOverflow) const { 357 return TTIImpl->getPreferredTailFoldingStyle(IVUpdateMayOverflow); 358} 359 360std::optional<Instruction *> 361TargetTransformInfo::instCombineIntrinsic(InstCombiner &IC, 362 IntrinsicInst &II) const { 363 return TTIImpl->instCombineIntrinsic(IC, II); 364} 365 366std::optional<Value *> TargetTransformInfo::simplifyDemandedUseBitsIntrinsic( 367 InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, 368 bool &KnownBitsComputed) const { 369 return TTIImpl->simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known, 370 KnownBitsComputed); 371} 372 373std::optional<Value *> TargetTransformInfo::simplifyDemandedVectorEltsIntrinsic( 374 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, 375 APInt &UndefElts2, APInt &UndefElts3, 376 std::function<void(Instruction *, unsigned, APInt, APInt &)> 377 SimplifyAndSetOp) const { 378 return TTIImpl->simplifyDemandedVectorEltsIntrinsic( 379 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3, 380 SimplifyAndSetOp); 381} 382 383void TargetTransformInfo::getUnrollingPreferences( 384 Loop *L, ScalarEvolution &SE, UnrollingPreferences &UP, 385 OptimizationRemarkEmitter *ORE) const { 386 return TTIImpl->getUnrollingPreferences(L, SE, UP, ORE); 387} 388 389void TargetTransformInfo::getPeelingPreferences(Loop *L, ScalarEvolution &SE, 390 PeelingPreferences &PP) const { 391 return TTIImpl->getPeelingPreferences(L, SE, PP); 392} 393 394bool TargetTransformInfo::isLegalAddImmediate(int64_t Imm) const { 395 return TTIImpl->isLegalAddImmediate(Imm); 396} 397 398bool TargetTransformInfo::isLegalICmpImmediate(int64_t Imm) const { 399 return TTIImpl->isLegalICmpImmediate(Imm); 400} 401 402bool TargetTransformInfo::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, 403 int64_t BaseOffset, 404 bool HasBaseReg, int64_t Scale, 405 unsigned AddrSpace, 406 Instruction *I) const { 407 return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, 408 Scale, AddrSpace, I); 409} 410 411bool TargetTransformInfo::isLSRCostLess(const LSRCost &C1, 412 const LSRCost &C2) const { 413 return TTIImpl->isLSRCostLess(C1, C2); 414} 415 416bool TargetTransformInfo::isNumRegsMajorCostOfLSR() const { 417 return TTIImpl->isNumRegsMajorCostOfLSR(); 418} 419 420bool TargetTransformInfo::shouldFoldTerminatingConditionAfterLSR() const { 421 return TTIImpl->shouldFoldTerminatingConditionAfterLSR(); 422} 423 424bool TargetTransformInfo::isProfitableLSRChainElement(Instruction *I) const { 425 return TTIImpl->isProfitableLSRChainElement(I); 426} 427 428bool TargetTransformInfo::canMacroFuseCmp() const { 429 return TTIImpl->canMacroFuseCmp(); 430} 431 432bool TargetTransformInfo::canSaveCmp(Loop *L, BranchInst **BI, 433 ScalarEvolution *SE, LoopInfo *LI, 434 DominatorTree *DT, AssumptionCache *AC, 435 TargetLibraryInfo *LibInfo) const { 436 return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo); 437} 438 439TTI::AddressingModeKind 440TargetTransformInfo::getPreferredAddressingMode(const Loop *L, 441 ScalarEvolution *SE) const { 442 return TTIImpl->getPreferredAddressingMode(L, SE); 443} 444 445bool TargetTransformInfo::isLegalMaskedStore(Type *DataType, 446 Align Alignment) const { 447 return TTIImpl->isLegalMaskedStore(DataType, Alignment); 448} 449 450bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType, 451 Align Alignment) const { 452 return TTIImpl->isLegalMaskedLoad(DataType, Alignment); 453} 454 455bool TargetTransformInfo::isLegalNTStore(Type *DataType, 456 Align Alignment) const { 457 return TTIImpl->isLegalNTStore(DataType, Alignment); 458} 459 460bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const { 461 return TTIImpl->isLegalNTLoad(DataType, Alignment); 462} 463 464bool TargetTransformInfo::isLegalBroadcastLoad(Type *ElementTy, 465 ElementCount NumElements) const { 466 return TTIImpl->isLegalBroadcastLoad(ElementTy, NumElements); 467} 468 469bool TargetTransformInfo::isLegalMaskedGather(Type *DataType, 470 Align Alignment) const { 471 return TTIImpl->isLegalMaskedGather(DataType, Alignment); 472} 473 474bool TargetTransformInfo::isLegalAltInstr( 475 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, 476 const SmallBitVector &OpcodeMask) const { 477 return TTIImpl->isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask); 478} 479 480bool TargetTransformInfo::isLegalMaskedScatter(Type *DataType, 481 Align Alignment) const { 482 return TTIImpl->isLegalMaskedScatter(DataType, Alignment); 483} 484 485bool TargetTransformInfo::forceScalarizeMaskedGather(VectorType *DataType, 486 Align Alignment) const { 487 return TTIImpl->forceScalarizeMaskedGather(DataType, Alignment); 488} 489 490bool TargetTransformInfo::forceScalarizeMaskedScatter(VectorType *DataType, 491 Align Alignment) const { 492 return TTIImpl->forceScalarizeMaskedScatter(DataType, Alignment); 493} 494 495bool TargetTransformInfo::isLegalMaskedCompressStore(Type *DataType) const { 496 return TTIImpl->isLegalMaskedCompressStore(DataType); 497} 498 499bool TargetTransformInfo::isLegalMaskedExpandLoad(Type *DataType) const { 500 return TTIImpl->isLegalMaskedExpandLoad(DataType); 501} 502 503bool TargetTransformInfo::enableOrderedReductions() const { 504 return TTIImpl->enableOrderedReductions(); 505} 506 507bool TargetTransformInfo::hasDivRemOp(Type *DataType, bool IsSigned) const { 508 return TTIImpl->hasDivRemOp(DataType, IsSigned); 509} 510 511bool TargetTransformInfo::hasVolatileVariant(Instruction *I, 512 unsigned AddrSpace) const { 513 return TTIImpl->hasVolatileVariant(I, AddrSpace); 514} 515 516bool TargetTransformInfo::prefersVectorizedAddressing() const { 517 return TTIImpl->prefersVectorizedAddressing(); 518} 519 520InstructionCost TargetTransformInfo::getScalingFactorCost( 521 Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, 522 int64_t Scale, unsigned AddrSpace) const { 523 InstructionCost Cost = TTIImpl->getScalingFactorCost( 524 Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace); 525 assert(Cost >= 0 && "TTI should not produce negative costs!"); 526 return Cost; 527} 528 529bool TargetTransformInfo::LSRWithInstrQueries() const { 530 return TTIImpl->LSRWithInstrQueries(); 531} 532 533bool TargetTransformInfo::isTruncateFree(Type *Ty1, Type *Ty2) const { 534 return TTIImpl->isTruncateFree(Ty1, Ty2); 535} 536 537bool TargetTransformInfo::isProfitableToHoist(Instruction *I) const { 538 return TTIImpl->isProfitableToHoist(I); 539} 540 541bool TargetTransformInfo::useAA() const { return TTIImpl->useAA(); } 542 543bool TargetTransformInfo::isTypeLegal(Type *Ty) const { 544 return TTIImpl->isTypeLegal(Ty); 545} 546 547unsigned TargetTransformInfo::getRegUsageForType(Type *Ty) const { 548 return TTIImpl->getRegUsageForType(Ty); 549} 550 551bool TargetTransformInfo::shouldBuildLookupTables() const { 552 return TTIImpl->shouldBuildLookupTables(); 553} 554 555bool TargetTransformInfo::shouldBuildLookupTablesForConstant( 556 Constant *C) const { 557 return TTIImpl->shouldBuildLookupTablesForConstant(C); 558} 559 560bool TargetTransformInfo::shouldBuildRelLookupTables() const { 561 return TTIImpl->shouldBuildRelLookupTables(); 562} 563 564bool TargetTransformInfo::useColdCCForColdCall(Function &F) const { 565 return TTIImpl->useColdCCForColdCall(F); 566} 567 568InstructionCost TargetTransformInfo::getScalarizationOverhead( 569 VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, 570 TTI::TargetCostKind CostKind) const { 571 return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract, 572 CostKind); 573} 574 575InstructionCost TargetTransformInfo::getOperandsScalarizationOverhead( 576 ArrayRef<const Value *> Args, ArrayRef<Type *> Tys, 577 TTI::TargetCostKind CostKind) const { 578 return TTIImpl->getOperandsScalarizationOverhead(Args, Tys, CostKind); 579} 580 581bool TargetTransformInfo::supportsEfficientVectorElementLoadStore() const { 582 return TTIImpl->supportsEfficientVectorElementLoadStore(); 583} 584 585bool TargetTransformInfo::supportsTailCalls() const { 586 return TTIImpl->supportsTailCalls(); 587} 588 589bool TargetTransformInfo::supportsTailCallFor(const CallBase *CB) const { 590 return TTIImpl->supportsTailCallFor(CB); 591} 592 593bool TargetTransformInfo::enableAggressiveInterleaving( 594 bool LoopHasReductions) const { 595 return TTIImpl->enableAggressiveInterleaving(LoopHasReductions); 596} 597 598TargetTransformInfo::MemCmpExpansionOptions 599TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 600 return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp); 601} 602 603bool TargetTransformInfo::enableSelectOptimize() const { 604 return TTIImpl->enableSelectOptimize(); 605} 606 607bool TargetTransformInfo::shouldTreatInstructionLikeSelect( 608 const Instruction *I) const { 609 return TTIImpl->shouldTreatInstructionLikeSelect(I); 610} 611 612bool TargetTransformInfo::enableInterleavedAccessVectorization() const { 613 return TTIImpl->enableInterleavedAccessVectorization(); 614} 615 616bool TargetTransformInfo::enableMaskedInterleavedAccessVectorization() const { 617 return TTIImpl->enableMaskedInterleavedAccessVectorization(); 618} 619 620bool TargetTransformInfo::isFPVectorizationPotentiallyUnsafe() const { 621 return TTIImpl->isFPVectorizationPotentiallyUnsafe(); 622} 623 624bool 625TargetTransformInfo::allowsMisalignedMemoryAccesses(LLVMContext &Context, 626 unsigned BitWidth, 627 unsigned AddressSpace, 628 Align Alignment, 629 unsigned *Fast) const { 630 return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth, 631 AddressSpace, Alignment, Fast); 632} 633 634TargetTransformInfo::PopcntSupportKind 635TargetTransformInfo::getPopcntSupport(unsigned IntTyWidthInBit) const { 636 return TTIImpl->getPopcntSupport(IntTyWidthInBit); 637} 638 639bool TargetTransformInfo::haveFastSqrt(Type *Ty) const { 640 return TTIImpl->haveFastSqrt(Ty); 641} 642 643bool TargetTransformInfo::isExpensiveToSpeculativelyExecute( 644 const Instruction *I) const { 645 return TTIImpl->isExpensiveToSpeculativelyExecute(I); 646} 647 648bool TargetTransformInfo::isFCmpOrdCheaperThanFCmpZero(Type *Ty) const { 649 return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty); 650} 651 652InstructionCost TargetTransformInfo::getFPOpCost(Type *Ty) const { 653 InstructionCost Cost = TTIImpl->getFPOpCost(Ty); 654 assert(Cost >= 0 && "TTI should not produce negative costs!"); 655 return Cost; 656} 657 658InstructionCost TargetTransformInfo::getIntImmCodeSizeCost(unsigned Opcode, 659 unsigned Idx, 660 const APInt &Imm, 661 Type *Ty) const { 662 InstructionCost Cost = TTIImpl->getIntImmCodeSizeCost(Opcode, Idx, Imm, Ty); 663 assert(Cost >= 0 && "TTI should not produce negative costs!"); 664 return Cost; 665} 666 667InstructionCost 668TargetTransformInfo::getIntImmCost(const APInt &Imm, Type *Ty, 669 TTI::TargetCostKind CostKind) const { 670 InstructionCost Cost = TTIImpl->getIntImmCost(Imm, Ty, CostKind); 671 assert(Cost >= 0 && "TTI should not produce negative costs!"); 672 return Cost; 673} 674 675InstructionCost TargetTransformInfo::getIntImmCostInst( 676 unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, 677 TTI::TargetCostKind CostKind, Instruction *Inst) const { 678 InstructionCost Cost = 679 TTIImpl->getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst); 680 assert(Cost >= 0 && "TTI should not produce negative costs!"); 681 return Cost; 682} 683 684InstructionCost 685TargetTransformInfo::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 686 const APInt &Imm, Type *Ty, 687 TTI::TargetCostKind CostKind) const { 688 InstructionCost Cost = 689 TTIImpl->getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind); 690 assert(Cost >= 0 && "TTI should not produce negative costs!"); 691 return Cost; 692} 693 694bool TargetTransformInfo::preferToKeepConstantsAttached( 695 const Instruction &Inst, const Function &Fn) const { 696 return TTIImpl->preferToKeepConstantsAttached(Inst, Fn); 697} 698 699unsigned TargetTransformInfo::getNumberOfRegisters(unsigned ClassID) const { 700 return TTIImpl->getNumberOfRegisters(ClassID); 701} 702 703unsigned TargetTransformInfo::getRegisterClassForType(bool Vector, 704 Type *Ty) const { 705 return TTIImpl->getRegisterClassForType(Vector, Ty); 706} 707 708const char *TargetTransformInfo::getRegisterClassName(unsigned ClassID) const { 709 return TTIImpl->getRegisterClassName(ClassID); 710} 711 712TypeSize TargetTransformInfo::getRegisterBitWidth( 713 TargetTransformInfo::RegisterKind K) const { 714 return TTIImpl->getRegisterBitWidth(K); 715} 716 717unsigned TargetTransformInfo::getMinVectorRegisterBitWidth() const { 718 return TTIImpl->getMinVectorRegisterBitWidth(); 719} 720 721std::optional<unsigned> TargetTransformInfo::getMaxVScale() const { 722 return TTIImpl->getMaxVScale(); 723} 724 725std::optional<unsigned> TargetTransformInfo::getVScaleForTuning() const { 726 return TTIImpl->getVScaleForTuning(); 727} 728 729bool TargetTransformInfo::isVScaleKnownToBeAPowerOfTwo() const { 730 return TTIImpl->isVScaleKnownToBeAPowerOfTwo(); 731} 732 733bool TargetTransformInfo::shouldMaximizeVectorBandwidth( 734 TargetTransformInfo::RegisterKind K) const { 735 return TTIImpl->shouldMaximizeVectorBandwidth(K); 736} 737 738ElementCount TargetTransformInfo::getMinimumVF(unsigned ElemWidth, 739 bool IsScalable) const { 740 return TTIImpl->getMinimumVF(ElemWidth, IsScalable); 741} 742 743unsigned TargetTransformInfo::getMaximumVF(unsigned ElemWidth, 744 unsigned Opcode) const { 745 return TTIImpl->getMaximumVF(ElemWidth, Opcode); 746} 747 748unsigned TargetTransformInfo::getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, 749 Type *ScalarValTy) const { 750 return TTIImpl->getStoreMinimumVF(VF, ScalarMemTy, ScalarValTy); 751} 752 753bool TargetTransformInfo::shouldConsiderAddressTypePromotion( 754 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const { 755 return TTIImpl->shouldConsiderAddressTypePromotion( 756 I, AllowPromotionWithoutCommonHeader); 757} 758 759unsigned TargetTransformInfo::getCacheLineSize() const { 760 return CacheLineSize.getNumOccurrences() > 0 ? CacheLineSize 761 : TTIImpl->getCacheLineSize(); 762} 763 764std::optional<unsigned> 765TargetTransformInfo::getCacheSize(CacheLevel Level) const { 766 return TTIImpl->getCacheSize(Level); 767} 768 769std::optional<unsigned> 770TargetTransformInfo::getCacheAssociativity(CacheLevel Level) const { 771 return TTIImpl->getCacheAssociativity(Level); 772} 773 774std::optional<unsigned> TargetTransformInfo::getMinPageSize() const { 775 return MinPageSize.getNumOccurrences() > 0 ? MinPageSize 776 : TTIImpl->getMinPageSize(); 777} 778 779unsigned TargetTransformInfo::getPrefetchDistance() const { 780 return TTIImpl->getPrefetchDistance(); 781} 782 783unsigned TargetTransformInfo::getMinPrefetchStride( 784 unsigned NumMemAccesses, unsigned NumStridedMemAccesses, 785 unsigned NumPrefetches, bool HasCall) const { 786 return TTIImpl->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses, 787 NumPrefetches, HasCall); 788} 789 790unsigned TargetTransformInfo::getMaxPrefetchIterationsAhead() const { 791 return TTIImpl->getMaxPrefetchIterationsAhead(); 792} 793 794bool TargetTransformInfo::enableWritePrefetching() const { 795 return TTIImpl->enableWritePrefetching(); 796} 797 798bool TargetTransformInfo::shouldPrefetchAddressSpace(unsigned AS) const { 799 return TTIImpl->shouldPrefetchAddressSpace(AS); 800} 801 802unsigned TargetTransformInfo::getMaxInterleaveFactor(ElementCount VF) const { 803 return TTIImpl->getMaxInterleaveFactor(VF); 804} 805 806TargetTransformInfo::OperandValueInfo 807TargetTransformInfo::getOperandInfo(const Value *V) { 808 OperandValueKind OpInfo = OK_AnyValue; 809 OperandValueProperties OpProps = OP_None; 810 811 if (isa<ConstantInt>(V) || isa<ConstantFP>(V)) { 812 if (const auto *CI = dyn_cast<ConstantInt>(V)) { 813 if (CI->getValue().isPowerOf2()) 814 OpProps = OP_PowerOf2; 815 else if (CI->getValue().isNegatedPowerOf2()) 816 OpProps = OP_NegatedPowerOf2; 817 } 818 return {OK_UniformConstantValue, OpProps}; 819 } 820 821 // A broadcast shuffle creates a uniform value. 822 // TODO: Add support for non-zero index broadcasts. 823 // TODO: Add support for different source vector width. 824 if (const auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V)) 825 if (ShuffleInst->isZeroEltSplat()) 826 OpInfo = OK_UniformValue; 827 828 const Value *Splat = getSplatValue(V); 829 830 // Check for a splat of a constant or for a non uniform vector of constants 831 // and check if the constant(s) are all powers of two. 832 if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) { 833 OpInfo = OK_NonUniformConstantValue; 834 if (Splat) { 835 OpInfo = OK_UniformConstantValue; 836 if (auto *CI = dyn_cast<ConstantInt>(Splat)) { 837 if (CI->getValue().isPowerOf2()) 838 OpProps = OP_PowerOf2; 839 else if (CI->getValue().isNegatedPowerOf2()) 840 OpProps = OP_NegatedPowerOf2; 841 } 842 } else if (const auto *CDS = dyn_cast<ConstantDataSequential>(V)) { 843 bool AllPow2 = true, AllNegPow2 = true; 844 for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I) { 845 if (auto *CI = dyn_cast<ConstantInt>(CDS->getElementAsConstant(I))) { 846 AllPow2 &= CI->getValue().isPowerOf2(); 847 AllNegPow2 &= CI->getValue().isNegatedPowerOf2(); 848 if (AllPow2 || AllNegPow2) 849 continue; 850 } 851 AllPow2 = AllNegPow2 = false; 852 break; 853 } 854 OpProps = AllPow2 ? OP_PowerOf2 : OpProps; 855 OpProps = AllNegPow2 ? OP_NegatedPowerOf2 : OpProps; 856 } 857 } 858 859 // Check for a splat of a uniform value. This is not loop aware, so return 860 // true only for the obviously uniform cases (argument, globalvalue) 861 if (Splat && (isa<Argument>(Splat) || isa<GlobalValue>(Splat))) 862 OpInfo = OK_UniformValue; 863 864 return {OpInfo, OpProps}; 865} 866 867InstructionCost TargetTransformInfo::getArithmeticInstrCost( 868 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 869 OperandValueInfo Op1Info, OperandValueInfo Op2Info, 870 ArrayRef<const Value *> Args, const Instruction *CxtI) const { 871 InstructionCost Cost = 872 TTIImpl->getArithmeticInstrCost(Opcode, Ty, CostKind, 873 Op1Info, Op2Info, 874 Args, CxtI); 875 assert(Cost >= 0 && "TTI should not produce negative costs!"); 876 return Cost; 877} 878 879InstructionCost TargetTransformInfo::getAltInstrCost( 880 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, 881 const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind) const { 882 InstructionCost Cost = 883 TTIImpl->getAltInstrCost(VecTy, Opcode0, Opcode1, OpcodeMask, CostKind); 884 assert(Cost >= 0 && "TTI should not produce negative costs!"); 885 return Cost; 886} 887 888InstructionCost TargetTransformInfo::getShuffleCost( 889 ShuffleKind Kind, VectorType *Ty, ArrayRef<int> Mask, 890 TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, 891 ArrayRef<const Value *> Args) const { 892 InstructionCost Cost = 893 TTIImpl->getShuffleCost(Kind, Ty, Mask, CostKind, Index, SubTp, Args); 894 assert(Cost >= 0 && "TTI should not produce negative costs!"); 895 return Cost; 896} 897 898TTI::CastContextHint 899TargetTransformInfo::getCastContextHint(const Instruction *I) { 900 if (!I) 901 return CastContextHint::None; 902 903 auto getLoadStoreKind = [](const Value *V, unsigned LdStOp, unsigned MaskedOp, 904 unsigned GatScatOp) { 905 const Instruction *I = dyn_cast<Instruction>(V); 906 if (!I) 907 return CastContextHint::None; 908 909 if (I->getOpcode() == LdStOp) 910 return CastContextHint::Normal; 911 912 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 913 if (II->getIntrinsicID() == MaskedOp) 914 return TTI::CastContextHint::Masked; 915 if (II->getIntrinsicID() == GatScatOp) 916 return TTI::CastContextHint::GatherScatter; 917 } 918 919 return TTI::CastContextHint::None; 920 }; 921 922 switch (I->getOpcode()) { 923 case Instruction::ZExt: 924 case Instruction::SExt: 925 case Instruction::FPExt: 926 return getLoadStoreKind(I->getOperand(0), Instruction::Load, 927 Intrinsic::masked_load, Intrinsic::masked_gather); 928 case Instruction::Trunc: 929 case Instruction::FPTrunc: 930 if (I->hasOneUse()) 931 return getLoadStoreKind(*I->user_begin(), Instruction::Store, 932 Intrinsic::masked_store, 933 Intrinsic::masked_scatter); 934 break; 935 default: 936 return CastContextHint::None; 937 } 938 939 return TTI::CastContextHint::None; 940} 941 942InstructionCost TargetTransformInfo::getCastInstrCost( 943 unsigned Opcode, Type *Dst, Type *Src, CastContextHint CCH, 944 TTI::TargetCostKind CostKind, const Instruction *I) const { 945 assert((I == nullptr || I->getOpcode() == Opcode) && 946 "Opcode should reflect passed instruction."); 947 InstructionCost Cost = 948 TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I); 949 assert(Cost >= 0 && "TTI should not produce negative costs!"); 950 return Cost; 951} 952 953InstructionCost TargetTransformInfo::getExtractWithExtendCost( 954 unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) const { 955 InstructionCost Cost = 956 TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index); 957 assert(Cost >= 0 && "TTI should not produce negative costs!"); 958 return Cost; 959} 960 961InstructionCost TargetTransformInfo::getCFInstrCost( 962 unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I) const { 963 assert((I == nullptr || I->getOpcode() == Opcode) && 964 "Opcode should reflect passed instruction."); 965 InstructionCost Cost = TTIImpl->getCFInstrCost(Opcode, CostKind, I); 966 assert(Cost >= 0 && "TTI should not produce negative costs!"); 967 return Cost; 968} 969 970InstructionCost TargetTransformInfo::getCmpSelInstrCost( 971 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, 972 TTI::TargetCostKind CostKind, const Instruction *I) const { 973 assert((I == nullptr || I->getOpcode() == Opcode) && 974 "Opcode should reflect passed instruction."); 975 InstructionCost Cost = 976 TTIImpl->getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 977 assert(Cost >= 0 && "TTI should not produce negative costs!"); 978 return Cost; 979} 980 981InstructionCost TargetTransformInfo::getVectorInstrCost( 982 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, 983 Value *Op0, Value *Op1) const { 984 // FIXME: Assert that Opcode is either InsertElement or ExtractElement. 985 // This is mentioned in the interface description and respected by all 986 // callers, but never asserted upon. 987 InstructionCost Cost = 988 TTIImpl->getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1); 989 assert(Cost >= 0 && "TTI should not produce negative costs!"); 990 return Cost; 991} 992 993InstructionCost 994TargetTransformInfo::getVectorInstrCost(const Instruction &I, Type *Val, 995 TTI::TargetCostKind CostKind, 996 unsigned Index) const { 997 // FIXME: Assert that Opcode is either InsertElement or ExtractElement. 998 // This is mentioned in the interface description and respected by all 999 // callers, but never asserted upon. 1000 InstructionCost Cost = TTIImpl->getVectorInstrCost(I, Val, CostKind, Index); 1001 assert(Cost >= 0 && "TTI should not produce negative costs!"); 1002 return Cost; 1003} 1004 1005InstructionCost TargetTransformInfo::getReplicationShuffleCost( 1006 Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, 1007 TTI::TargetCostKind CostKind) { 1008 InstructionCost Cost = TTIImpl->getReplicationShuffleCost( 1009 EltTy, ReplicationFactor, VF, DemandedDstElts, CostKind); 1010 assert(Cost >= 0 && "TTI should not produce negative costs!"); 1011 return Cost; 1012} 1013 1014InstructionCost TargetTransformInfo::getMemoryOpCost( 1015 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, 1016 TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo, 1017 const Instruction *I) const { 1018 assert((I == nullptr || I->getOpcode() == Opcode) && 1019 "Opcode should reflect passed instruction."); 1020 InstructionCost Cost = TTIImpl->getMemoryOpCost( 1021 Opcode, Src, Alignment, AddressSpace, CostKind, OpInfo, I); 1022 assert(Cost >= 0 && "TTI should not produce negative costs!"); 1023 return Cost; 1024} 1025 1026InstructionCost TargetTransformInfo::getMaskedMemoryOpCost( 1027 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, 1028 TTI::TargetCostKind CostKind) const { 1029 InstructionCost Cost = TTIImpl->getMaskedMemoryOpCost(Opcode, Src, Alignment, 1030 AddressSpace, CostKind); 1031 assert(Cost >= 0 && "TTI should not produce negative costs!"); 1032 return Cost; 1033} 1034 1035InstructionCost TargetTransformInfo::getGatherScatterOpCost( 1036 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, 1037 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const { 1038 InstructionCost Cost = TTIImpl->getGatherScatterOpCost( 1039 Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I); 1040 assert(Cost >= 0 && "TTI should not produce negative costs!"); 1041 return Cost; 1042} 1043 1044InstructionCost TargetTransformInfo::getInterleavedMemoryOpCost( 1045 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 1046 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 1047 bool UseMaskForCond, bool UseMaskForGaps) const { 1048 InstructionCost Cost = TTIImpl->getInterleavedMemoryOpCost( 1049 Opcode, VecTy, Factor, Indices, Alignment, AddressSpace, CostKind, 1050 UseMaskForCond, UseMaskForGaps); 1051 assert(Cost >= 0 && "TTI should not produce negative costs!"); 1052 return Cost; 1053} 1054 1055InstructionCost 1056TargetTransformInfo::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 1057 TTI::TargetCostKind CostKind) const { 1058 InstructionCost Cost = TTIImpl->getIntrinsicInstrCost(ICA, CostKind); 1059 assert(Cost >= 0 && "TTI should not produce negative costs!"); 1060 return Cost; 1061} 1062 1063InstructionCost 1064TargetTransformInfo::getCallInstrCost(Function *F, Type *RetTy, 1065 ArrayRef<Type *> Tys, 1066 TTI::TargetCostKind CostKind) const { 1067 InstructionCost Cost = TTIImpl->getCallInstrCost(F, RetTy, Tys, CostKind); 1068 assert(Cost >= 0 && "TTI should not produce negative costs!"); 1069 return Cost; 1070} 1071 1072unsigned TargetTransformInfo::getNumberOfParts(Type *Tp) const { 1073 return TTIImpl->getNumberOfParts(Tp); 1074} 1075 1076InstructionCost 1077TargetTransformInfo::getAddressComputationCost(Type *Tp, ScalarEvolution *SE, 1078 const SCEV *Ptr) const { 1079 InstructionCost Cost = TTIImpl->getAddressComputationCost(Tp, SE, Ptr); 1080 assert(Cost >= 0 && "TTI should not produce negative costs!"); 1081 return Cost; 1082} 1083 1084InstructionCost TargetTransformInfo::getMemcpyCost(const Instruction *I) const { 1085 InstructionCost Cost = TTIImpl->getMemcpyCost(I); 1086 assert(Cost >= 0 && "TTI should not produce negative costs!"); 1087 return Cost; 1088} 1089 1090uint64_t TargetTransformInfo::getMaxMemIntrinsicInlineSizeThreshold() const { 1091 return TTIImpl->getMaxMemIntrinsicInlineSizeThreshold(); 1092} 1093 1094InstructionCost TargetTransformInfo::getArithmeticReductionCost( 1095 unsigned Opcode, VectorType *Ty, std::optional<FastMathFlags> FMF, 1096 TTI::TargetCostKind CostKind) const { 1097 InstructionCost Cost = 1098 TTIImpl->getArithmeticReductionCost(Opcode, Ty, FMF, CostKind); 1099 assert(Cost >= 0 && "TTI should not produce negative costs!"); 1100 return Cost; 1101} 1102 1103InstructionCost TargetTransformInfo::getMinMaxReductionCost( 1104 Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, 1105 TTI::TargetCostKind CostKind) const { 1106 InstructionCost Cost = 1107 TTIImpl->getMinMaxReductionCost(IID, Ty, FMF, CostKind); 1108 assert(Cost >= 0 && "TTI should not produce negative costs!"); 1109 return Cost; 1110} 1111 1112InstructionCost TargetTransformInfo::getExtendedReductionCost( 1113 unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, 1114 FastMathFlags FMF, TTI::TargetCostKind CostKind) const { 1115 return TTIImpl->getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty, FMF, 1116 CostKind); 1117} 1118 1119InstructionCost TargetTransformInfo::getMulAccReductionCost( 1120 bool IsUnsigned, Type *ResTy, VectorType *Ty, 1121 TTI::TargetCostKind CostKind) const { 1122 return TTIImpl->getMulAccReductionCost(IsUnsigned, ResTy, Ty, CostKind); 1123} 1124 1125InstructionCost 1126TargetTransformInfo::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const { 1127 return TTIImpl->getCostOfKeepingLiveOverCall(Tys); 1128} 1129 1130bool TargetTransformInfo::getTgtMemIntrinsic(IntrinsicInst *Inst, 1131 MemIntrinsicInfo &Info) const { 1132 return TTIImpl->getTgtMemIntrinsic(Inst, Info); 1133} 1134 1135unsigned TargetTransformInfo::getAtomicMemIntrinsicMaxElementSize() const { 1136 return TTIImpl->getAtomicMemIntrinsicMaxElementSize(); 1137} 1138 1139Value *TargetTransformInfo::getOrCreateResultFromMemIntrinsic( 1140 IntrinsicInst *Inst, Type *ExpectedType) const { 1141 return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType); 1142} 1143 1144Type *TargetTransformInfo::getMemcpyLoopLoweringType( 1145 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, 1146 unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign, 1147 std::optional<uint32_t> AtomicElementSize) const { 1148 return TTIImpl->getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace, 1149 DestAddrSpace, SrcAlign, DestAlign, 1150 AtomicElementSize); 1151} 1152 1153void TargetTransformInfo::getMemcpyLoopResidualLoweringType( 1154 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context, 1155 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, 1156 unsigned SrcAlign, unsigned DestAlign, 1157 std::optional<uint32_t> AtomicCpySize) const { 1158 TTIImpl->getMemcpyLoopResidualLoweringType( 1159 OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign, 1160 DestAlign, AtomicCpySize); 1161} 1162 1163bool TargetTransformInfo::areInlineCompatible(const Function *Caller, 1164 const Function *Callee) const { 1165 return TTIImpl->areInlineCompatible(Caller, Callee); 1166} 1167 1168unsigned 1169TargetTransformInfo::getInlineCallPenalty(const Function *F, 1170 const CallBase &Call, 1171 unsigned DefaultCallPenalty) const { 1172 return TTIImpl->getInlineCallPenalty(F, Call, DefaultCallPenalty); 1173} 1174 1175bool TargetTransformInfo::areTypesABICompatible( 1176 const Function *Caller, const Function *Callee, 1177 const ArrayRef<Type *> &Types) const { 1178 return TTIImpl->areTypesABICompatible(Caller, Callee, Types); 1179} 1180 1181bool TargetTransformInfo::isIndexedLoadLegal(MemIndexedMode Mode, 1182 Type *Ty) const { 1183 return TTIImpl->isIndexedLoadLegal(Mode, Ty); 1184} 1185 1186bool TargetTransformInfo::isIndexedStoreLegal(MemIndexedMode Mode, 1187 Type *Ty) const { 1188 return TTIImpl->isIndexedStoreLegal(Mode, Ty); 1189} 1190 1191unsigned TargetTransformInfo::getLoadStoreVecRegBitWidth(unsigned AS) const { 1192 return TTIImpl->getLoadStoreVecRegBitWidth(AS); 1193} 1194 1195bool TargetTransformInfo::isLegalToVectorizeLoad(LoadInst *LI) const { 1196 return TTIImpl->isLegalToVectorizeLoad(LI); 1197} 1198 1199bool TargetTransformInfo::isLegalToVectorizeStore(StoreInst *SI) const { 1200 return TTIImpl->isLegalToVectorizeStore(SI); 1201} 1202 1203bool TargetTransformInfo::isLegalToVectorizeLoadChain( 1204 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const { 1205 return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment, 1206 AddrSpace); 1207} 1208 1209bool TargetTransformInfo::isLegalToVectorizeStoreChain( 1210 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const { 1211 return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment, 1212 AddrSpace); 1213} 1214 1215bool TargetTransformInfo::isLegalToVectorizeReduction( 1216 const RecurrenceDescriptor &RdxDesc, ElementCount VF) const { 1217 return TTIImpl->isLegalToVectorizeReduction(RdxDesc, VF); 1218} 1219 1220bool TargetTransformInfo::isElementTypeLegalForScalableVector(Type *Ty) const { 1221 return TTIImpl->isElementTypeLegalForScalableVector(Ty); 1222} 1223 1224unsigned TargetTransformInfo::getLoadVectorFactor(unsigned VF, 1225 unsigned LoadSize, 1226 unsigned ChainSizeInBytes, 1227 VectorType *VecTy) const { 1228 return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy); 1229} 1230 1231unsigned TargetTransformInfo::getStoreVectorFactor(unsigned VF, 1232 unsigned StoreSize, 1233 unsigned ChainSizeInBytes, 1234 VectorType *VecTy) const { 1235 return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy); 1236} 1237 1238bool TargetTransformInfo::preferInLoopReduction(unsigned Opcode, Type *Ty, 1239 ReductionFlags Flags) const { 1240 return TTIImpl->preferInLoopReduction(Opcode, Ty, Flags); 1241} 1242 1243bool TargetTransformInfo::preferPredicatedReductionSelect( 1244 unsigned Opcode, Type *Ty, ReductionFlags Flags) const { 1245 return TTIImpl->preferPredicatedReductionSelect(Opcode, Ty, Flags); 1246} 1247 1248bool TargetTransformInfo::preferEpilogueVectorization() const { 1249 return TTIImpl->preferEpilogueVectorization(); 1250} 1251 1252TargetTransformInfo::VPLegalization 1253TargetTransformInfo::getVPLegalizationStrategy(const VPIntrinsic &VPI) const { 1254 return TTIImpl->getVPLegalizationStrategy(VPI); 1255} 1256 1257bool TargetTransformInfo::hasArmWideBranch(bool Thumb) const { 1258 return TTIImpl->hasArmWideBranch(Thumb); 1259} 1260 1261unsigned TargetTransformInfo::getMaxNumArgs() const { 1262 return TTIImpl->getMaxNumArgs(); 1263} 1264 1265bool TargetTransformInfo::shouldExpandReduction(const IntrinsicInst *II) const { 1266 return TTIImpl->shouldExpandReduction(II); 1267} 1268 1269unsigned TargetTransformInfo::getGISelRematGlobalCost() const { 1270 return TTIImpl->getGISelRematGlobalCost(); 1271} 1272 1273unsigned TargetTransformInfo::getMinTripCountTailFoldingThreshold() const { 1274 return TTIImpl->getMinTripCountTailFoldingThreshold(); 1275} 1276 1277bool TargetTransformInfo::supportsScalableVectors() const { 1278 return TTIImpl->supportsScalableVectors(); 1279} 1280 1281bool TargetTransformInfo::enableScalableVectorization() const { 1282 return TTIImpl->enableScalableVectorization(); 1283} 1284 1285bool TargetTransformInfo::hasActiveVectorLength(unsigned Opcode, Type *DataType, 1286 Align Alignment) const { 1287 return TTIImpl->hasActiveVectorLength(Opcode, DataType, Alignment); 1288} 1289 1290TargetTransformInfo::Concept::~Concept() = default; 1291 1292TargetIRAnalysis::TargetIRAnalysis() : TTICallback(&getDefaultTTI) {} 1293 1294TargetIRAnalysis::TargetIRAnalysis( 1295 std::function<Result(const Function &)> TTICallback) 1296 : TTICallback(std::move(TTICallback)) {} 1297 1298TargetIRAnalysis::Result TargetIRAnalysis::run(const Function &F, 1299 FunctionAnalysisManager &) { 1300 return TTICallback(F); 1301} 1302 1303AnalysisKey TargetIRAnalysis::Key; 1304 1305TargetIRAnalysis::Result TargetIRAnalysis::getDefaultTTI(const Function &F) { 1306 return Result(F.getParent()->getDataLayout()); 1307} 1308 1309// Register the basic pass. 1310INITIALIZE_PASS(TargetTransformInfoWrapperPass, "tti", 1311 "Target Transform Information", false, true) 1312char TargetTransformInfoWrapperPass::ID = 0; 1313 1314void TargetTransformInfoWrapperPass::anchor() {} 1315 1316TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass() 1317 : ImmutablePass(ID) { 1318 initializeTargetTransformInfoWrapperPassPass( 1319 *PassRegistry::getPassRegistry()); 1320} 1321 1322TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass( 1323 TargetIRAnalysis TIRA) 1324 : ImmutablePass(ID), TIRA(std::move(TIRA)) { 1325 initializeTargetTransformInfoWrapperPassPass( 1326 *PassRegistry::getPassRegistry()); 1327} 1328 1329TargetTransformInfo &TargetTransformInfoWrapperPass::getTTI(const Function &F) { 1330 FunctionAnalysisManager DummyFAM; 1331 TTI = TIRA.run(F, DummyFAM); 1332 return *TTI; 1333} 1334 1335ImmutablePass * 1336llvm::createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA) { 1337 return new TargetTransformInfoWrapperPass(std::move(TIRA)); 1338} 1339