1//===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "PPCTargetTransformInfo.h"
10#include "llvm/Analysis/CodeMetrics.h"
11#include "llvm/Analysis/TargetTransformInfo.h"
12#include "llvm/CodeGen/BasicTTIImpl.h"
13#include "llvm/CodeGen/CostTable.h"
14#include "llvm/CodeGen/TargetLowering.h"
15#include "llvm/CodeGen/TargetSchedule.h"
16#include "llvm/Support/CommandLine.h"
17#include "llvm/Support/Debug.h"
18using namespace llvm;
19
20#define DEBUG_TYPE "ppctti"
21
22static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
23cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
24
25// This is currently only used for the data prefetch pass which is only enabled
26// for BG/Q by default.
27static cl::opt<unsigned>
28CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64),
29              cl::desc("The loop prefetch cache line size"));
30
31static cl::opt<bool>
32EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false),
33                cl::desc("Enable using coldcc calling conv for cold "
34                         "internal functions"));
35
36static cl::opt<bool>
37LsrNoInsnsCost("ppc-lsr-no-insns-cost", cl::Hidden, cl::init(false),
38               cl::desc("Do not add instruction count to lsr cost model"));
39
40// The latency of mtctr is only justified if there are more than 4
41// comparisons that will be removed as a result.
42static cl::opt<unsigned>
43SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden,
44                      cl::desc("Loops with a constant trip count smaller than "
45                               "this value will not use the count register."));
46
47//===----------------------------------------------------------------------===//
48//
49// PPC cost model.
50//
51//===----------------------------------------------------------------------===//
52
53TargetTransformInfo::PopcntSupportKind
54PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
55  assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
56  if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64)
57    return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ?
58             TTI::PSK_SlowHardware : TTI::PSK_FastHardware;
59  return TTI::PSK_Software;
60}
61
62int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
63                              TTI::TargetCostKind CostKind) {
64  if (DisablePPCConstHoist)
65    return BaseT::getIntImmCost(Imm, Ty, CostKind);
66
67  assert(Ty->isIntegerTy());
68
69  unsigned BitSize = Ty->getPrimitiveSizeInBits();
70  if (BitSize == 0)
71    return ~0U;
72
73  if (Imm == 0)
74    return TTI::TCC_Free;
75
76  if (Imm.getBitWidth() <= 64) {
77    if (isInt<16>(Imm.getSExtValue()))
78      return TTI::TCC_Basic;
79
80    if (isInt<32>(Imm.getSExtValue())) {
81      // A constant that can be materialized using lis.
82      if ((Imm.getZExtValue() & 0xFFFF) == 0)
83        return TTI::TCC_Basic;
84
85      return 2 * TTI::TCC_Basic;
86    }
87  }
88
89  return 4 * TTI::TCC_Basic;
90}
91
92int PPCTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
93                                    const APInt &Imm, Type *Ty,
94                                    TTI::TargetCostKind CostKind) {
95  if (DisablePPCConstHoist)
96    return BaseT::getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
97
98  assert(Ty->isIntegerTy());
99
100  unsigned BitSize = Ty->getPrimitiveSizeInBits();
101  if (BitSize == 0)
102    return ~0U;
103
104  switch (IID) {
105  default:
106    return TTI::TCC_Free;
107  case Intrinsic::sadd_with_overflow:
108  case Intrinsic::uadd_with_overflow:
109  case Intrinsic::ssub_with_overflow:
110  case Intrinsic::usub_with_overflow:
111    if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
112      return TTI::TCC_Free;
113    break;
114  case Intrinsic::experimental_stackmap:
115    if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
116      return TTI::TCC_Free;
117    break;
118  case Intrinsic::experimental_patchpoint_void:
119  case Intrinsic::experimental_patchpoint_i64:
120    if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
121      return TTI::TCC_Free;
122    break;
123  }
124  return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind);
125}
126
127int PPCTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
128                                  const APInt &Imm, Type *Ty,
129                                  TTI::TargetCostKind CostKind) {
130  if (DisablePPCConstHoist)
131    return BaseT::getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind);
132
133  assert(Ty->isIntegerTy());
134
135  unsigned BitSize = Ty->getPrimitiveSizeInBits();
136  if (BitSize == 0)
137    return ~0U;
138
139  unsigned ImmIdx = ~0U;
140  bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
141       ZeroFree = false;
142  switch (Opcode) {
143  default:
144    return TTI::TCC_Free;
145  case Instruction::GetElementPtr:
146    // Always hoist the base address of a GetElementPtr. This prevents the
147    // creation of new constants for every base constant that gets constant
148    // folded with the offset.
149    if (Idx == 0)
150      return 2 * TTI::TCC_Basic;
151    return TTI::TCC_Free;
152  case Instruction::And:
153    RunFree = true; // (for the rotate-and-mask instructions)
154    LLVM_FALLTHROUGH;
155  case Instruction::Add:
156  case Instruction::Or:
157  case Instruction::Xor:
158    ShiftedFree = true;
159    LLVM_FALLTHROUGH;
160  case Instruction::Sub:
161  case Instruction::Mul:
162  case Instruction::Shl:
163  case Instruction::LShr:
164  case Instruction::AShr:
165    ImmIdx = 1;
166    break;
167  case Instruction::ICmp:
168    UnsignedFree = true;
169    ImmIdx = 1;
170    // Zero comparisons can use record-form instructions.
171    LLVM_FALLTHROUGH;
172  case Instruction::Select:
173    ZeroFree = true;
174    break;
175  case Instruction::PHI:
176  case Instruction::Call:
177  case Instruction::Ret:
178  case Instruction::Load:
179  case Instruction::Store:
180    break;
181  }
182
183  if (ZeroFree && Imm == 0)
184    return TTI::TCC_Free;
185
186  if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
187    if (isInt<16>(Imm.getSExtValue()))
188      return TTI::TCC_Free;
189
190    if (RunFree) {
191      if (Imm.getBitWidth() <= 32 &&
192          (isShiftedMask_32(Imm.getZExtValue()) ||
193           isShiftedMask_32(~Imm.getZExtValue())))
194        return TTI::TCC_Free;
195
196      if (ST->isPPC64() &&
197          (isShiftedMask_64(Imm.getZExtValue()) ||
198           isShiftedMask_64(~Imm.getZExtValue())))
199        return TTI::TCC_Free;
200    }
201
202    if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
203      return TTI::TCC_Free;
204
205    if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
206      return TTI::TCC_Free;
207  }
208
209  return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind);
210}
211
212unsigned
213PPCTTIImpl::getUserCost(const User *U, ArrayRef<const Value *> Operands,
214                        TTI::TargetCostKind CostKind) {
215  // We already implement getCastInstrCost and getMemoryOpCost where we perform
216  // the vector adjustment there.
217  if (isa<CastInst>(U) || isa<LoadInst>(U) || isa<StoreInst>(U))
218    return BaseT::getUserCost(U, Operands, CostKind);
219
220  if (U->getType()->isVectorTy()) {
221    // Instructions that need to be split should cost more.
222    std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, U->getType());
223    return LT.first * BaseT::getUserCost(U, Operands, CostKind);
224  }
225
226  return BaseT::getUserCost(U, Operands, CostKind);
227}
228
229// Determining the address of a TLS variable results in a function call in
230// certain TLS models.
231static bool memAddrUsesCTR(const Value *MemAddr, const PPCTargetMachine &TM,
232                           SmallPtrSetImpl<const Value *> &Visited) {
233  // No need to traverse again if we already checked this operand.
234  if (!Visited.insert(MemAddr).second)
235    return false;
236  const auto *GV = dyn_cast<GlobalValue>(MemAddr);
237  if (!GV) {
238    // Recurse to check for constants that refer to TLS global variables.
239    if (const auto *CV = dyn_cast<Constant>(MemAddr))
240      for (const auto &CO : CV->operands())
241        if (memAddrUsesCTR(CO, TM, Visited))
242          return true;
243    return false;
244  }
245
246  if (!GV->isThreadLocal())
247    return false;
248  TLSModel::Model Model = TM.getTLSModel(GV);
249  return Model == TLSModel::GeneralDynamic || Model == TLSModel::LocalDynamic;
250}
251
252bool PPCTTIImpl::mightUseCTR(BasicBlock *BB, TargetLibraryInfo *LibInfo,
253                             SmallPtrSetImpl<const Value *> &Visited) {
254  const PPCTargetMachine &TM = ST->getTargetMachine();
255
256  // Loop through the inline asm constraints and look for something that
257  // clobbers ctr.
258  auto asmClobbersCTR = [](InlineAsm *IA) {
259    InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints();
260    for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) {
261      InlineAsm::ConstraintInfo &C = CIV[i];
262      if (C.Type != InlineAsm::isInput)
263        for (unsigned j = 0, je = C.Codes.size(); j < je; ++j)
264          if (StringRef(C.Codes[j]).equals_lower("{ctr}"))
265            return true;
266    }
267    return false;
268  };
269
270  auto isLargeIntegerTy = [](bool Is32Bit, Type *Ty) {
271    if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
272      return ITy->getBitWidth() > (Is32Bit ? 32U : 64U);
273
274    return false;
275  };
276
277  for (BasicBlock::iterator J = BB->begin(), JE = BB->end();
278       J != JE; ++J) {
279    if (CallInst *CI = dyn_cast<CallInst>(J)) {
280      // Inline ASM is okay, unless it clobbers the ctr register.
281      if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand())) {
282        if (asmClobbersCTR(IA))
283          return true;
284        continue;
285      }
286
287      if (Function *F = CI->getCalledFunction()) {
288        // Most intrinsics don't become function calls, but some might.
289        // sin, cos, exp and log are always calls.
290        unsigned Opcode = 0;
291        if (F->getIntrinsicID() != Intrinsic::not_intrinsic) {
292          switch (F->getIntrinsicID()) {
293          default: continue;
294          // If we have a call to loop_decrement or set_loop_iterations,
295          // we're definitely using CTR.
296          case Intrinsic::set_loop_iterations:
297          case Intrinsic::loop_decrement:
298            return true;
299
300          // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp
301          // because, although it does clobber the counter register, the
302          // control can't then return to inside the loop unless there is also
303          // an eh_sjlj_setjmp.
304          case Intrinsic::eh_sjlj_setjmp:
305
306          case Intrinsic::memcpy:
307          case Intrinsic::memmove:
308          case Intrinsic::memset:
309          case Intrinsic::powi:
310          case Intrinsic::log:
311          case Intrinsic::log2:
312          case Intrinsic::log10:
313          case Intrinsic::exp:
314          case Intrinsic::exp2:
315          case Intrinsic::pow:
316          case Intrinsic::sin:
317          case Intrinsic::cos:
318            return true;
319          case Intrinsic::copysign:
320            if (CI->getArgOperand(0)->getType()->getScalarType()->
321                isPPC_FP128Ty())
322              return true;
323            else
324              continue; // ISD::FCOPYSIGN is never a library call.
325          case Intrinsic::fma:                Opcode = ISD::FMA;        break;
326          case Intrinsic::sqrt:               Opcode = ISD::FSQRT;      break;
327          case Intrinsic::floor:              Opcode = ISD::FFLOOR;     break;
328          case Intrinsic::ceil:               Opcode = ISD::FCEIL;      break;
329          case Intrinsic::trunc:              Opcode = ISD::FTRUNC;     break;
330          case Intrinsic::rint:               Opcode = ISD::FRINT;      break;
331          case Intrinsic::lrint:              Opcode = ISD::LRINT;      break;
332          case Intrinsic::llrint:             Opcode = ISD::LLRINT;     break;
333          case Intrinsic::nearbyint:          Opcode = ISD::FNEARBYINT; break;
334          case Intrinsic::round:              Opcode = ISD::FROUND;     break;
335          case Intrinsic::lround:             Opcode = ISD::LROUND;     break;
336          case Intrinsic::llround:            Opcode = ISD::LLROUND;    break;
337          case Intrinsic::minnum:             Opcode = ISD::FMINNUM;    break;
338          case Intrinsic::maxnum:             Opcode = ISD::FMAXNUM;    break;
339          case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO;      break;
340          case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO;      break;
341          }
342        }
343
344        // PowerPC does not use [US]DIVREM or other library calls for
345        // operations on regular types which are not otherwise library calls
346        // (i.e. soft float or atomics). If adapting for targets that do,
347        // additional care is required here.
348
349        LibFunc Func;
350        if (!F->hasLocalLinkage() && F->hasName() && LibInfo &&
351            LibInfo->getLibFunc(F->getName(), Func) &&
352            LibInfo->hasOptimizedCodeGen(Func)) {
353          // Non-read-only functions are never treated as intrinsics.
354          if (!CI->onlyReadsMemory())
355            return true;
356
357          // Conversion happens only for FP calls.
358          if (!CI->getArgOperand(0)->getType()->isFloatingPointTy())
359            return true;
360
361          switch (Func) {
362          default: return true;
363          case LibFunc_copysign:
364          case LibFunc_copysignf:
365            continue; // ISD::FCOPYSIGN is never a library call.
366          case LibFunc_copysignl:
367            return true;
368          case LibFunc_fabs:
369          case LibFunc_fabsf:
370          case LibFunc_fabsl:
371            continue; // ISD::FABS is never a library call.
372          case LibFunc_sqrt:
373          case LibFunc_sqrtf:
374          case LibFunc_sqrtl:
375            Opcode = ISD::FSQRT; break;
376          case LibFunc_floor:
377          case LibFunc_floorf:
378          case LibFunc_floorl:
379            Opcode = ISD::FFLOOR; break;
380          case LibFunc_nearbyint:
381          case LibFunc_nearbyintf:
382          case LibFunc_nearbyintl:
383            Opcode = ISD::FNEARBYINT; break;
384          case LibFunc_ceil:
385          case LibFunc_ceilf:
386          case LibFunc_ceill:
387            Opcode = ISD::FCEIL; break;
388          case LibFunc_rint:
389          case LibFunc_rintf:
390          case LibFunc_rintl:
391            Opcode = ISD::FRINT; break;
392          case LibFunc_round:
393          case LibFunc_roundf:
394          case LibFunc_roundl:
395            Opcode = ISD::FROUND; break;
396          case LibFunc_trunc:
397          case LibFunc_truncf:
398          case LibFunc_truncl:
399            Opcode = ISD::FTRUNC; break;
400          case LibFunc_fmin:
401          case LibFunc_fminf:
402          case LibFunc_fminl:
403            Opcode = ISD::FMINNUM; break;
404          case LibFunc_fmax:
405          case LibFunc_fmaxf:
406          case LibFunc_fmaxl:
407            Opcode = ISD::FMAXNUM; break;
408          }
409        }
410
411        if (Opcode) {
412          EVT EVTy =
413              TLI->getValueType(DL, CI->getArgOperand(0)->getType(), true);
414
415          if (EVTy == MVT::Other)
416            return true;
417
418          if (TLI->isOperationLegalOrCustom(Opcode, EVTy))
419            continue;
420          else if (EVTy.isVector() &&
421                   TLI->isOperationLegalOrCustom(Opcode, EVTy.getScalarType()))
422            continue;
423
424          return true;
425        }
426      }
427
428      return true;
429    } else if (isa<BinaryOperator>(J) &&
430               (J->getType()->getScalarType()->isFP128Ty() ||
431                J->getType()->getScalarType()->isPPC_FP128Ty())) {
432      // Most operations on f128 or ppc_f128 values become calls.
433      return true;
434    } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) ||
435               isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) {
436      CastInst *CI = cast<CastInst>(J);
437      if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() ||
438          CI->getDestTy()->getScalarType()->isPPC_FP128Ty() ||
439          isLargeIntegerTy(!TM.isPPC64(), CI->getSrcTy()->getScalarType()) ||
440          isLargeIntegerTy(!TM.isPPC64(), CI->getDestTy()->getScalarType()))
441        return true;
442    } else if (isLargeIntegerTy(!TM.isPPC64(),
443                                J->getType()->getScalarType()) &&
444               (J->getOpcode() == Instruction::UDiv ||
445                J->getOpcode() == Instruction::SDiv ||
446                J->getOpcode() == Instruction::URem ||
447                J->getOpcode() == Instruction::SRem)) {
448      return true;
449    } else if (!TM.isPPC64() &&
450               isLargeIntegerTy(false, J->getType()->getScalarType()) &&
451               (J->getOpcode() == Instruction::Shl ||
452                J->getOpcode() == Instruction::AShr ||
453                J->getOpcode() == Instruction::LShr)) {
454      // Only on PPC32, for 128-bit integers (specifically not 64-bit
455      // integers), these might be runtime calls.
456      return true;
457    } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) {
458      // On PowerPC, indirect jumps use the counter register.
459      return true;
460    } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) {
461      if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries())
462        return true;
463    }
464
465    // FREM is always a call.
466    if (J->getOpcode() == Instruction::FRem)
467      return true;
468
469    if (ST->useSoftFloat()) {
470      switch(J->getOpcode()) {
471      case Instruction::FAdd:
472      case Instruction::FSub:
473      case Instruction::FMul:
474      case Instruction::FDiv:
475      case Instruction::FPTrunc:
476      case Instruction::FPExt:
477      case Instruction::FPToUI:
478      case Instruction::FPToSI:
479      case Instruction::UIToFP:
480      case Instruction::SIToFP:
481      case Instruction::FCmp:
482        return true;
483      }
484    }
485
486    for (Value *Operand : J->operands())
487      if (memAddrUsesCTR(Operand, TM, Visited))
488        return true;
489  }
490
491  return false;
492}
493
494bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
495                                          AssumptionCache &AC,
496                                          TargetLibraryInfo *LibInfo,
497                                          HardwareLoopInfo &HWLoopInfo) {
498  const PPCTargetMachine &TM = ST->getTargetMachine();
499  TargetSchedModel SchedModel;
500  SchedModel.init(ST);
501
502  // Do not convert small short loops to CTR loop.
503  unsigned ConstTripCount = SE.getSmallConstantTripCount(L);
504  if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) {
505    SmallPtrSet<const Value *, 32> EphValues;
506    CodeMetrics::collectEphemeralValues(L, &AC, EphValues);
507    CodeMetrics Metrics;
508    for (BasicBlock *BB : L->blocks())
509      Metrics.analyzeBasicBlock(BB, *this, EphValues);
510    // 6 is an approximate latency for the mtctr instruction.
511    if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth()))
512      return false;
513  }
514
515  // We don't want to spill/restore the counter register, and so we don't
516  // want to use the counter register if the loop contains calls.
517  SmallPtrSet<const Value *, 4> Visited;
518  for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
519       I != IE; ++I)
520    if (mightUseCTR(*I, LibInfo, Visited))
521      return false;
522
523  SmallVector<BasicBlock*, 4> ExitingBlocks;
524  L->getExitingBlocks(ExitingBlocks);
525
526  // If there is an exit edge known to be frequently taken,
527  // we should not transform this loop.
528  for (auto &BB : ExitingBlocks) {
529    Instruction *TI = BB->getTerminator();
530    if (!TI) continue;
531
532    if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
533      uint64_t TrueWeight = 0, FalseWeight = 0;
534      if (!BI->isConditional() ||
535          !BI->extractProfMetadata(TrueWeight, FalseWeight))
536        continue;
537
538      // If the exit path is more frequent than the loop path,
539      // we return here without further analysis for this loop.
540      bool TrueIsExit = !L->contains(BI->getSuccessor(0));
541      if (( TrueIsExit && FalseWeight < TrueWeight) ||
542          (!TrueIsExit && FalseWeight > TrueWeight))
543        return false;
544    }
545  }
546
547  // If an exit block has a PHI that accesses a TLS variable as one of the
548  // incoming values from the loop, we cannot produce a CTR loop because the
549  // address for that value will be computed in the loop.
550  SmallVector<BasicBlock *, 4> ExitBlocks;
551  L->getExitBlocks(ExitBlocks);
552  for (auto &BB : ExitBlocks) {
553    for (auto &PHI : BB->phis()) {
554      for (int Idx = 0, EndIdx = PHI.getNumIncomingValues(); Idx < EndIdx;
555           Idx++) {
556        const BasicBlock *IncomingBB = PHI.getIncomingBlock(Idx);
557        const Value *IncomingValue = PHI.getIncomingValue(Idx);
558        if (L->contains(IncomingBB) &&
559            memAddrUsesCTR(IncomingValue, TM, Visited))
560          return false;
561      }
562    }
563  }
564
565  LLVMContext &C = L->getHeader()->getContext();
566  HWLoopInfo.CountType = TM.isPPC64() ?
567    Type::getInt64Ty(C) : Type::getInt32Ty(C);
568  HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
569  return true;
570}
571
572void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
573                                         TTI::UnrollingPreferences &UP) {
574  if (ST->getCPUDirective() == PPC::DIR_A2) {
575    // The A2 is in-order with a deep pipeline, and concatenation unrolling
576    // helps expose latency-hiding opportunities to the instruction scheduler.
577    UP.Partial = UP.Runtime = true;
578
579    // We unroll a lot on the A2 (hundreds of instructions), and the benefits
580    // often outweigh the cost of a division to compute the trip count.
581    UP.AllowExpensiveTripCount = true;
582  }
583
584  BaseT::getUnrollingPreferences(L, SE, UP);
585}
586
587void PPCTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
588                                       TTI::PeelingPreferences &PP) {
589  BaseT::getPeelingPreferences(L, SE, PP);
590}
591// This function returns true to allow using coldcc calling convention.
592// Returning true results in coldcc being used for functions which are cold at
593// all call sites when the callers of the functions are not calling any other
594// non coldcc functions.
595bool PPCTTIImpl::useColdCCForColdCall(Function &F) {
596  return EnablePPCColdCC;
597}
598
599bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) {
600  // On the A2, always unroll aggressively. For QPX unaligned loads, we depend
601  // on combining the loads generated for consecutive accesses, and failure to
602  // do so is particularly expensive. This makes it much more likely (compared
603  // to only using concatenation unrolling).
604  if (ST->getCPUDirective() == PPC::DIR_A2)
605    return true;
606
607  return LoopHasReductions;
608}
609
610PPCTTIImpl::TTI::MemCmpExpansionOptions
611PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
612  TTI::MemCmpExpansionOptions Options;
613  Options.LoadSizes = {8, 4, 2, 1};
614  Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
615  return Options;
616}
617
618bool PPCTTIImpl::enableInterleavedAccessVectorization() {
619  return true;
620}
621
622unsigned PPCTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
623  assert(ClassID == GPRRC || ClassID == FPRRC ||
624         ClassID == VRRC || ClassID == VSXRC);
625  if (ST->hasVSX()) {
626    assert(ClassID == GPRRC || ClassID == VSXRC || ClassID == VRRC);
627    return ClassID == VSXRC ? 64 : 32;
628  }
629  assert(ClassID == GPRRC || ClassID == FPRRC || ClassID == VRRC);
630  return 32;
631}
632
633unsigned PPCTTIImpl::getRegisterClassForType(bool Vector, Type *Ty) const {
634  if (Vector)
635    return ST->hasVSX() ? VSXRC : VRRC;
636  else if (Ty && (Ty->getScalarType()->isFloatTy() ||
637                  Ty->getScalarType()->isDoubleTy()))
638    return ST->hasVSX() ? VSXRC : FPRRC;
639  else if (Ty && (Ty->getScalarType()->isFP128Ty() ||
640                  Ty->getScalarType()->isPPC_FP128Ty()))
641    return VRRC;
642  else if (Ty && Ty->getScalarType()->isHalfTy())
643    return VSXRC;
644  else
645    return GPRRC;
646}
647
648const char* PPCTTIImpl::getRegisterClassName(unsigned ClassID) const {
649
650  switch (ClassID) {
651    default:
652      llvm_unreachable("unknown register class");
653      return "PPC::unknown register class";
654    case GPRRC:       return "PPC::GPRRC";
655    case FPRRC:       return "PPC::FPRRC";
656    case VRRC:        return "PPC::VRRC";
657    case VSXRC:       return "PPC::VSXRC";
658  }
659}
660
661unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) const {
662  if (Vector) {
663    if (ST->hasQPX()) return 256;
664    if (ST->hasAltivec()) return 128;
665    return 0;
666  }
667
668  if (ST->isPPC64())
669    return 64;
670  return 32;
671
672}
673
674unsigned PPCTTIImpl::getCacheLineSize() const {
675  // Check first if the user specified a custom line size.
676  if (CacheLineSize.getNumOccurrences() > 0)
677    return CacheLineSize;
678
679  // Starting with P7 we have a cache line size of 128.
680  unsigned Directive = ST->getCPUDirective();
681  // Assume that Future CPU has the same cache line size as the others.
682  if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
683      Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 ||
684      Directive == PPC::DIR_PWR_FUTURE)
685    return 128;
686
687  // On other processors return a default of 64 bytes.
688  return 64;
689}
690
691unsigned PPCTTIImpl::getPrefetchDistance() const {
692  // This seems like a reasonable default for the BG/Q (this pass is enabled, by
693  // default, only on the BG/Q).
694  return 300;
695}
696
697unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
698  unsigned Directive = ST->getCPUDirective();
699  // The 440 has no SIMD support, but floating-point instructions
700  // have a 5-cycle latency, so unroll by 5x for latency hiding.
701  if (Directive == PPC::DIR_440)
702    return 5;
703
704  // The A2 has no SIMD support, but floating-point instructions
705  // have a 6-cycle latency, so unroll by 6x for latency hiding.
706  if (Directive == PPC::DIR_A2)
707    return 6;
708
709  // FIXME: For lack of any better information, do no harm...
710  if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500)
711    return 1;
712
713  // For P7 and P8, floating-point instructions have a 6-cycle latency and
714  // there are two execution units, so unroll by 12x for latency hiding.
715  // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
716  // FIXME: the same for P10 as previous gen until POWER10 scheduling is ready
717  // Assume that future is the same as the others.
718  if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
719      Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 ||
720      Directive == PPC::DIR_PWR_FUTURE)
721    return 12;
722
723  // For most things, modern systems have two execution units (and
724  // out-of-order execution).
725  return 2;
726}
727
728// Adjust the cost of vector instructions on targets which there is overlap
729// between the vector and scalar units, thereby reducing the overall throughput
730// of vector code wrt. scalar code.
731int PPCTTIImpl::vectorCostAdjustment(int Cost, unsigned Opcode, Type *Ty1,
732                                     Type *Ty2) {
733  if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy())
734    return Cost;
735
736  std::pair<int, MVT> LT1 = TLI->getTypeLegalizationCost(DL, Ty1);
737  // If type legalization involves splitting the vector, we don't want to
738  // double the cost at every step - only the last step.
739  if (LT1.first != 1 || !LT1.second.isVector())
740    return Cost;
741
742  int ISD = TLI->InstructionOpcodeToISD(Opcode);
743  if (TLI->isOperationExpand(ISD, LT1.second))
744    return Cost;
745
746  if (Ty2) {
747    std::pair<int, MVT> LT2 = TLI->getTypeLegalizationCost(DL, Ty2);
748    if (LT2.first != 1 || !LT2.second.isVector())
749      return Cost;
750  }
751
752  return Cost * 2;
753}
754
755int PPCTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
756                                       TTI::TargetCostKind CostKind,
757                                       TTI::OperandValueKind Op1Info,
758                                       TTI::OperandValueKind Op2Info,
759                                       TTI::OperandValueProperties Opd1PropInfo,
760                                       TTI::OperandValueProperties Opd2PropInfo,
761                                       ArrayRef<const Value *> Args,
762                                       const Instruction *CxtI) {
763  assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
764  // TODO: Handle more cost kinds.
765  if (CostKind != TTI::TCK_RecipThroughput)
766    return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
767                                         Op2Info, Opd1PropInfo,
768                                         Opd2PropInfo, Args, CxtI);
769
770  // Fallback to the default implementation.
771  int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
772                                           Op2Info,
773                                           Opd1PropInfo, Opd2PropInfo);
774  return vectorCostAdjustment(Cost, Opcode, Ty, nullptr);
775}
776
777int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
778                               Type *SubTp) {
779  // Legalize the type.
780  std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
781
782  // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
783  // (at least in the sense that there need only be one non-loop-invariant
784  // instruction). We need one such shuffle instruction for each actual
785  // register (this is not true for arbitrary shuffles, but is true for the
786  // structured types of shuffles covered by TTI::ShuffleKind).
787  return vectorCostAdjustment(LT.first, Instruction::ShuffleVector, Tp,
788                              nullptr);
789}
790
791int PPCTTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) {
792  if (CostKind != TTI::TCK_RecipThroughput)
793    return Opcode == Instruction::PHI ? 0 : 1;
794  // Branches are assumed to be predicted.
795  return CostKind == TTI::TCK_RecipThroughput ? 0 : 1;
796}
797
798int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
799                                 TTI::TargetCostKind CostKind,
800                                 const Instruction *I) {
801  assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
802
803  int Cost = BaseT::getCastInstrCost(Opcode, Dst, Src, CostKind, I);
804  Cost = vectorCostAdjustment(Cost, Opcode, Dst, Src);
805  // TODO: Allow non-throughput costs that aren't binary.
806  if (CostKind != TTI::TCK_RecipThroughput)
807    return Cost == 0 ? 0 : 1;
808  return Cost;
809}
810
811int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
812                                   TTI::TargetCostKind CostKind,
813                                   const Instruction *I) {
814  int Cost = BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, I);
815  // TODO: Handle other cost kinds.
816  if (CostKind != TTI::TCK_RecipThroughput)
817    return Cost;
818  return vectorCostAdjustment(Cost, Opcode, ValTy, nullptr);
819}
820
821int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
822  assert(Val->isVectorTy() && "This must be a vector type");
823
824  int ISD = TLI->InstructionOpcodeToISD(Opcode);
825  assert(ISD && "Invalid opcode");
826
827  int Cost = BaseT::getVectorInstrCost(Opcode, Val, Index);
828  Cost = vectorCostAdjustment(Cost, Opcode, Val, nullptr);
829
830  if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
831    // Double-precision scalars are already located in index #0 (or #1 if LE).
832    if (ISD == ISD::EXTRACT_VECTOR_ELT &&
833        Index == (ST->isLittleEndian() ? 1 : 0))
834      return 0;
835
836    return Cost;
837
838  } else if (ST->hasQPX() && Val->getScalarType()->isFloatingPointTy()) {
839    // Floating point scalars are already located in index #0.
840    if (Index == 0)
841      return 0;
842
843    return Cost;
844
845  } else if (Val->getScalarType()->isIntegerTy() && Index != -1U) {
846    if (ST->hasP9Altivec()) {
847      if (ISD == ISD::INSERT_VECTOR_ELT)
848        // A move-to VSR and a permute/insert.  Assume vector operation cost
849        // for both (cost will be 2x on P9).
850        return vectorCostAdjustment(2, Opcode, Val, nullptr);
851
852      // It's an extract.  Maybe we can do a cheap move-from VSR.
853      unsigned EltSize = Val->getScalarSizeInBits();
854      if (EltSize == 64) {
855        unsigned MfvsrdIndex = ST->isLittleEndian() ? 1 : 0;
856        if (Index == MfvsrdIndex)
857          return 1;
858      } else if (EltSize == 32) {
859        unsigned MfvsrwzIndex = ST->isLittleEndian() ? 2 : 1;
860        if (Index == MfvsrwzIndex)
861          return 1;
862      }
863
864      // We need a vector extract (or mfvsrld).  Assume vector operation cost.
865      // The cost of the load constant for a vector extract is disregarded
866      // (invariant, easily schedulable).
867      return vectorCostAdjustment(1, Opcode, Val, nullptr);
868
869    } else if (ST->hasDirectMove())
870      // Assume permute has standard cost.
871      // Assume move-to/move-from VSR have 2x standard cost.
872      return 3;
873  }
874
875  // Estimated cost of a load-hit-store delay.  This was obtained
876  // experimentally as a minimum needed to prevent unprofitable
877  // vectorization for the paq8p benchmark.  It may need to be
878  // raised further if other unprofitable cases remain.
879  unsigned LHSPenalty = 2;
880  if (ISD == ISD::INSERT_VECTOR_ELT)
881    LHSPenalty += 7;
882
883  // Vector element insert/extract with Altivec is very expensive,
884  // because they require store and reload with the attendant
885  // processor stall for load-hit-store.  Until VSX is available,
886  // these need to be estimated as very costly.
887  if (ISD == ISD::EXTRACT_VECTOR_ELT ||
888      ISD == ISD::INSERT_VECTOR_ELT)
889    return LHSPenalty + Cost;
890
891  return Cost;
892}
893
894int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
895                                MaybeAlign Alignment, unsigned AddressSpace,
896                                TTI::TargetCostKind CostKind,
897                                const Instruction *I) {
898  if (TLI->getValueType(DL, Src,  true) == MVT::Other)
899    return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
900                                  CostKind);
901  // Legalize the type.
902  std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
903  assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
904         "Invalid Opcode");
905
906  int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
907                                    CostKind);
908  // TODO: Handle other cost kinds.
909  if (CostKind != TTI::TCK_RecipThroughput)
910    return Cost;
911
912  Cost = vectorCostAdjustment(Cost, Opcode, Src, nullptr);
913
914  bool IsAltivecType = ST->hasAltivec() &&
915                       (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
916                        LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
917  bool IsVSXType = ST->hasVSX() &&
918                   (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
919  bool IsQPXType = ST->hasQPX() &&
920                   (LT.second == MVT::v4f64 || LT.second == MVT::v4f32);
921
922  // VSX has 32b/64b load instructions. Legalization can handle loading of
923  // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and
924  // PPCTargetLowering can't compute the cost appropriately. So here we
925  // explicitly check this case.
926  unsigned MemBytes = Src->getPrimitiveSizeInBits();
927  if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType &&
928      (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32)))
929    return 1;
930
931  // Aligned loads and stores are easy.
932  unsigned SrcBytes = LT.second.getStoreSize();
933  if (!SrcBytes || !Alignment || *Alignment >= SrcBytes)
934    return Cost;
935
936  // If we can use the permutation-based load sequence, then this is also
937  // relatively cheap (not counting loop-invariant instructions): one load plus
938  // one permute (the last load in a series has extra cost, but we're
939  // neglecting that here). Note that on the P7, we could do unaligned loads
940  // for Altivec types using the VSX instructions, but that's more expensive
941  // than using the permutation-based load sequence. On the P8, that's no
942  // longer true.
943  if (Opcode == Instruction::Load &&
944      ((!ST->hasP8Vector() && IsAltivecType) || IsQPXType) &&
945      *Alignment >= LT.second.getScalarType().getStoreSize())
946    return Cost + LT.first; // Add the cost of the permutations.
947
948  // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
949  // P7, unaligned vector loads are more expensive than the permutation-based
950  // load sequence, so that might be used instead, but regardless, the net cost
951  // is about the same (not counting loop-invariant instructions).
952  if (IsVSXType || (ST->hasVSX() && IsAltivecType))
953    return Cost;
954
955  // Newer PPC supports unaligned memory access.
956  if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0))
957    return Cost;
958
959  // PPC in general does not support unaligned loads and stores. They'll need
960  // to be decomposed based on the alignment factor.
961
962  // Add the cost of each scalar load or store.
963  assert(Alignment);
964  Cost += LT.first * ((SrcBytes / Alignment->value()) - 1);
965
966  // For a vector type, there is also scalarization overhead (only for
967  // stores, loads are expanded using the vector-load + permutation sequence,
968  // which is much less expensive).
969  if (Src->isVectorTy() && Opcode == Instruction::Store)
970    for (int i = 0, e = cast<FixedVectorType>(Src)->getNumElements(); i < e;
971         ++i)
972      Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i);
973
974  return Cost;
975}
976
977int PPCTTIImpl::getInterleavedMemoryOpCost(
978    unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
979    Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
980    bool UseMaskForCond, bool UseMaskForGaps) {
981  if (UseMaskForCond || UseMaskForGaps)
982    return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
983                                             Alignment, AddressSpace, CostKind,
984                                             UseMaskForCond, UseMaskForGaps);
985
986  assert(isa<VectorType>(VecTy) &&
987         "Expect a vector type for interleaved memory op");
988
989  // Legalize the type.
990  std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy);
991
992  // Firstly, the cost of load/store operation.
993  int Cost =
994      getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace,
995                      CostKind);
996
997  // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
998  // (at least in the sense that there need only be one non-loop-invariant
999  // instruction). For each result vector, we need one shuffle per incoming
1000  // vector (except that the first shuffle can take two incoming vectors
1001  // because it does not need to take itself).
1002  Cost += Factor*(LT.first-1);
1003
1004  return Cost;
1005}
1006
1007unsigned PPCTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1008                                           TTI::TargetCostKind CostKind) {
1009  return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1010}
1011
1012bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
1013                            LoopInfo *LI, DominatorTree *DT,
1014                            AssumptionCache *AC, TargetLibraryInfo *LibInfo) {
1015  // Process nested loops first.
1016  for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
1017    if (canSaveCmp(*I, BI, SE, LI, DT, AC, LibInfo))
1018      return false; // Stop search.
1019
1020  HardwareLoopInfo HWLoopInfo(L);
1021
1022  if (!HWLoopInfo.canAnalyze(*LI))
1023    return false;
1024
1025  if (!isHardwareLoopProfitable(L, *SE, *AC, LibInfo, HWLoopInfo))
1026    return false;
1027
1028  if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT))
1029    return false;
1030
1031  *BI = HWLoopInfo.ExitBranch;
1032  return true;
1033}
1034
1035bool PPCTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
1036                               TargetTransformInfo::LSRCost &C2) {
1037  // PowerPC default behaviour here is "instruction number 1st priority".
1038  // If LsrNoInsnsCost is set, call default implementation.
1039  if (!LsrNoInsnsCost)
1040    return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, C1.NumIVMuls,
1041                    C1.NumBaseAdds, C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
1042           std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, C2.NumIVMuls,
1043                    C2.NumBaseAdds, C2.ScaleCost, C2.ImmCost, C2.SetupCost);
1044  else
1045    return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
1046}
1047