1//===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "PPCTargetTransformInfo.h"
10#include "llvm/Analysis/CodeMetrics.h"
11#include "llvm/Analysis/TargetLibraryInfo.h"
12#include "llvm/Analysis/TargetTransformInfo.h"
13#include "llvm/CodeGen/BasicTTIImpl.h"
14#include "llvm/CodeGen/CostTable.h"
15#include "llvm/CodeGen/TargetLowering.h"
16#include "llvm/CodeGen/TargetSchedule.h"
17#include "llvm/IR/IntrinsicsPowerPC.h"
18#include "llvm/Support/CommandLine.h"
19#include "llvm/Support/Debug.h"
20#include "llvm/Support/KnownBits.h"
21#include "llvm/Transforms/InstCombine/InstCombiner.h"
22#include "llvm/Transforms/Utils/Local.h"
23
24using namespace llvm;
25
26#define DEBUG_TYPE "ppctti"
27
28static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
29cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
30
31// This is currently only used for the data prefetch pass
32static cl::opt<unsigned>
33CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64),
34              cl::desc("The loop prefetch cache line size"));
35
36static cl::opt<bool>
37EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false),
38                cl::desc("Enable using coldcc calling conv for cold "
39                         "internal functions"));
40
41static cl::opt<bool>
42LsrNoInsnsCost("ppc-lsr-no-insns-cost", cl::Hidden, cl::init(false),
43               cl::desc("Do not add instruction count to lsr cost model"));
44
45// The latency of mtctr is only justified if there are more than 4
46// comparisons that will be removed as a result.
47static cl::opt<unsigned>
48SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden,
49                      cl::desc("Loops with a constant trip count smaller than "
50                               "this value will not use the count register."));
51
52//===----------------------------------------------------------------------===//
53//
54// PPC cost model.
55//
56//===----------------------------------------------------------------------===//
57
58TargetTransformInfo::PopcntSupportKind
59PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
60  assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
61  if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64)
62    return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ?
63             TTI::PSK_SlowHardware : TTI::PSK_FastHardware;
64  return TTI::PSK_Software;
65}
66
67Optional<Instruction *>
68PPCTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
69  Intrinsic::ID IID = II.getIntrinsicID();
70  switch (IID) {
71  default:
72    break;
73  case Intrinsic::ppc_altivec_lvx:
74  case Intrinsic::ppc_altivec_lvxl:
75    // Turn PPC lvx -> load if the pointer is known aligned.
76    if (getOrEnforceKnownAlignment(
77            II.getArgOperand(0), Align(16), IC.getDataLayout(), &II,
78            &IC.getAssumptionCache(), &IC.getDominatorTree()) >= 16) {
79      Value *Ptr = IC.Builder.CreateBitCast(
80          II.getArgOperand(0), PointerType::getUnqual(II.getType()));
81      return new LoadInst(II.getType(), Ptr, "", false, Align(16));
82    }
83    break;
84  case Intrinsic::ppc_vsx_lxvw4x:
85  case Intrinsic::ppc_vsx_lxvd2x: {
86    // Turn PPC VSX loads into normal loads.
87    Value *Ptr = IC.Builder.CreateBitCast(II.getArgOperand(0),
88                                          PointerType::getUnqual(II.getType()));
89    return new LoadInst(II.getType(), Ptr, Twine(""), false, Align(1));
90  }
91  case Intrinsic::ppc_altivec_stvx:
92  case Intrinsic::ppc_altivec_stvxl:
93    // Turn stvx -> store if the pointer is known aligned.
94    if (getOrEnforceKnownAlignment(
95            II.getArgOperand(1), Align(16), IC.getDataLayout(), &II,
96            &IC.getAssumptionCache(), &IC.getDominatorTree()) >= 16) {
97      Type *OpPtrTy = PointerType::getUnqual(II.getArgOperand(0)->getType());
98      Value *Ptr = IC.Builder.CreateBitCast(II.getArgOperand(1), OpPtrTy);
99      return new StoreInst(II.getArgOperand(0), Ptr, false, Align(16));
100    }
101    break;
102  case Intrinsic::ppc_vsx_stxvw4x:
103  case Intrinsic::ppc_vsx_stxvd2x: {
104    // Turn PPC VSX stores into normal stores.
105    Type *OpPtrTy = PointerType::getUnqual(II.getArgOperand(0)->getType());
106    Value *Ptr = IC.Builder.CreateBitCast(II.getArgOperand(1), OpPtrTy);
107    return new StoreInst(II.getArgOperand(0), Ptr, false, Align(1));
108  }
109  case Intrinsic::ppc_altivec_vperm:
110    // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
111    // Note that ppc_altivec_vperm has a big-endian bias, so when creating
112    // a vectorshuffle for little endian, we must undo the transformation
113    // performed on vec_perm in altivec.h.  That is, we must complement
114    // the permutation mask with respect to 31 and reverse the order of
115    // V1 and V2.
116    if (Constant *Mask = dyn_cast<Constant>(II.getArgOperand(2))) {
117      assert(cast<FixedVectorType>(Mask->getType())->getNumElements() == 16 &&
118             "Bad type for intrinsic!");
119
120      // Check that all of the elements are integer constants or undefs.
121      bool AllEltsOk = true;
122      for (unsigned i = 0; i != 16; ++i) {
123        Constant *Elt = Mask->getAggregateElement(i);
124        if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
125          AllEltsOk = false;
126          break;
127        }
128      }
129
130      if (AllEltsOk) {
131        // Cast the input vectors to byte vectors.
132        Value *Op0 =
133            IC.Builder.CreateBitCast(II.getArgOperand(0), Mask->getType());
134        Value *Op1 =
135            IC.Builder.CreateBitCast(II.getArgOperand(1), Mask->getType());
136        Value *Result = UndefValue::get(Op0->getType());
137
138        // Only extract each element once.
139        Value *ExtractedElts[32];
140        memset(ExtractedElts, 0, sizeof(ExtractedElts));
141
142        for (unsigned i = 0; i != 16; ++i) {
143          if (isa<UndefValue>(Mask->getAggregateElement(i)))
144            continue;
145          unsigned Idx =
146              cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
147          Idx &= 31; // Match the hardware behavior.
148          if (DL.isLittleEndian())
149            Idx = 31 - Idx;
150
151          if (!ExtractedElts[Idx]) {
152            Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
153            Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
154            ExtractedElts[Idx] = IC.Builder.CreateExtractElement(
155                Idx < 16 ? Op0ToUse : Op1ToUse, IC.Builder.getInt32(Idx & 15));
156          }
157
158          // Insert this value into the result vector.
159          Result = IC.Builder.CreateInsertElement(Result, ExtractedElts[Idx],
160                                                  IC.Builder.getInt32(i));
161        }
162        return CastInst::Create(Instruction::BitCast, Result, II.getType());
163      }
164    }
165    break;
166  }
167  return None;
168}
169
170InstructionCost PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
171                                          TTI::TargetCostKind CostKind) {
172  if (DisablePPCConstHoist)
173    return BaseT::getIntImmCost(Imm, Ty, CostKind);
174
175  assert(Ty->isIntegerTy());
176
177  unsigned BitSize = Ty->getPrimitiveSizeInBits();
178  if (BitSize == 0)
179    return ~0U;
180
181  if (Imm == 0)
182    return TTI::TCC_Free;
183
184  if (Imm.getBitWidth() <= 64) {
185    if (isInt<16>(Imm.getSExtValue()))
186      return TTI::TCC_Basic;
187
188    if (isInt<32>(Imm.getSExtValue())) {
189      // A constant that can be materialized using lis.
190      if ((Imm.getZExtValue() & 0xFFFF) == 0)
191        return TTI::TCC_Basic;
192
193      return 2 * TTI::TCC_Basic;
194    }
195  }
196
197  return 4 * TTI::TCC_Basic;
198}
199
200InstructionCost PPCTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
201                                                const APInt &Imm, Type *Ty,
202                                                TTI::TargetCostKind CostKind) {
203  if (DisablePPCConstHoist)
204    return BaseT::getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
205
206  assert(Ty->isIntegerTy());
207
208  unsigned BitSize = Ty->getPrimitiveSizeInBits();
209  if (BitSize == 0)
210    return ~0U;
211
212  switch (IID) {
213  default:
214    return TTI::TCC_Free;
215  case Intrinsic::sadd_with_overflow:
216  case Intrinsic::uadd_with_overflow:
217  case Intrinsic::ssub_with_overflow:
218  case Intrinsic::usub_with_overflow:
219    if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
220      return TTI::TCC_Free;
221    break;
222  case Intrinsic::experimental_stackmap:
223    if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
224      return TTI::TCC_Free;
225    break;
226  case Intrinsic::experimental_patchpoint_void:
227  case Intrinsic::experimental_patchpoint_i64:
228    if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
229      return TTI::TCC_Free;
230    break;
231  }
232  return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind);
233}
234
235InstructionCost PPCTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
236                                              const APInt &Imm, Type *Ty,
237                                              TTI::TargetCostKind CostKind,
238                                              Instruction *Inst) {
239  if (DisablePPCConstHoist)
240    return BaseT::getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);
241
242  assert(Ty->isIntegerTy());
243
244  unsigned BitSize = Ty->getPrimitiveSizeInBits();
245  if (BitSize == 0)
246    return ~0U;
247
248  unsigned ImmIdx = ~0U;
249  bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
250       ZeroFree = false;
251  switch (Opcode) {
252  default:
253    return TTI::TCC_Free;
254  case Instruction::GetElementPtr:
255    // Always hoist the base address of a GetElementPtr. This prevents the
256    // creation of new constants for every base constant that gets constant
257    // folded with the offset.
258    if (Idx == 0)
259      return 2 * TTI::TCC_Basic;
260    return TTI::TCC_Free;
261  case Instruction::And:
262    RunFree = true; // (for the rotate-and-mask instructions)
263    LLVM_FALLTHROUGH;
264  case Instruction::Add:
265  case Instruction::Or:
266  case Instruction::Xor:
267    ShiftedFree = true;
268    LLVM_FALLTHROUGH;
269  case Instruction::Sub:
270  case Instruction::Mul:
271  case Instruction::Shl:
272  case Instruction::LShr:
273  case Instruction::AShr:
274    ImmIdx = 1;
275    break;
276  case Instruction::ICmp:
277    UnsignedFree = true;
278    ImmIdx = 1;
279    // Zero comparisons can use record-form instructions.
280    LLVM_FALLTHROUGH;
281  case Instruction::Select:
282    ZeroFree = true;
283    break;
284  case Instruction::PHI:
285  case Instruction::Call:
286  case Instruction::Ret:
287  case Instruction::Load:
288  case Instruction::Store:
289    break;
290  }
291
292  if (ZeroFree && Imm == 0)
293    return TTI::TCC_Free;
294
295  if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
296    if (isInt<16>(Imm.getSExtValue()))
297      return TTI::TCC_Free;
298
299    if (RunFree) {
300      if (Imm.getBitWidth() <= 32 &&
301          (isShiftedMask_32(Imm.getZExtValue()) ||
302           isShiftedMask_32(~Imm.getZExtValue())))
303        return TTI::TCC_Free;
304
305      if (ST->isPPC64() &&
306          (isShiftedMask_64(Imm.getZExtValue()) ||
307           isShiftedMask_64(~Imm.getZExtValue())))
308        return TTI::TCC_Free;
309    }
310
311    if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
312      return TTI::TCC_Free;
313
314    if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
315      return TTI::TCC_Free;
316  }
317
318  return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind);
319}
320
321InstructionCost PPCTTIImpl::getUserCost(const User *U,
322                                        ArrayRef<const Value *> Operands,
323                                        TTI::TargetCostKind CostKind) {
324  // We already implement getCastInstrCost and getMemoryOpCost where we perform
325  // the vector adjustment there.
326  if (isa<CastInst>(U) || isa<LoadInst>(U) || isa<StoreInst>(U))
327    return BaseT::getUserCost(U, Operands, CostKind);
328
329  if (U->getType()->isVectorTy()) {
330    // Instructions that need to be split should cost more.
331    std::pair<InstructionCost, MVT> LT =
332        TLI->getTypeLegalizationCost(DL, U->getType());
333    return LT.first * BaseT::getUserCost(U, Operands, CostKind);
334  }
335
336  return BaseT::getUserCost(U, Operands, CostKind);
337}
338
339// Determining the address of a TLS variable results in a function call in
340// certain TLS models.
341static bool memAddrUsesCTR(const Value *MemAddr, const PPCTargetMachine &TM,
342                           SmallPtrSetImpl<const Value *> &Visited) {
343  // No need to traverse again if we already checked this operand.
344  if (!Visited.insert(MemAddr).second)
345    return false;
346  const auto *GV = dyn_cast<GlobalValue>(MemAddr);
347  if (!GV) {
348    // Recurse to check for constants that refer to TLS global variables.
349    if (const auto *CV = dyn_cast<Constant>(MemAddr))
350      for (const auto &CO : CV->operands())
351        if (memAddrUsesCTR(CO, TM, Visited))
352          return true;
353    return false;
354  }
355
356  if (!GV->isThreadLocal())
357    return false;
358  TLSModel::Model Model = TM.getTLSModel(GV);
359  return Model == TLSModel::GeneralDynamic || Model == TLSModel::LocalDynamic;
360}
361
362bool PPCTTIImpl::mightUseCTR(BasicBlock *BB, TargetLibraryInfo *LibInfo,
363                             SmallPtrSetImpl<const Value *> &Visited) {
364  const PPCTargetMachine &TM = ST->getTargetMachine();
365
366  // Loop through the inline asm constraints and look for something that
367  // clobbers ctr.
368  auto asmClobbersCTR = [](InlineAsm *IA) {
369    InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints();
370    for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) {
371      InlineAsm::ConstraintInfo &C = CIV[i];
372      if (C.Type != InlineAsm::isInput)
373        for (unsigned j = 0, je = C.Codes.size(); j < je; ++j)
374          if (StringRef(C.Codes[j]).equals_lower("{ctr}"))
375            return true;
376    }
377    return false;
378  };
379
380  auto isLargeIntegerTy = [](bool Is32Bit, Type *Ty) {
381    if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
382      return ITy->getBitWidth() > (Is32Bit ? 32U : 64U);
383
384    return false;
385  };
386
387  auto supportedHalfPrecisionOp = [](Instruction *Inst) {
388    switch (Inst->getOpcode()) {
389    default:
390      return false;
391    case Instruction::FPTrunc:
392    case Instruction::FPExt:
393    case Instruction::Load:
394    case Instruction::Store:
395    case Instruction::FPToUI:
396    case Instruction::UIToFP:
397    case Instruction::FPToSI:
398    case Instruction::SIToFP:
399      return true;
400    }
401  };
402
403  for (BasicBlock::iterator J = BB->begin(), JE = BB->end();
404       J != JE; ++J) {
405    // There are no direct operations on half precision so assume that
406    // anything with that type requires a call except for a few select
407    // operations with Power9.
408    if (Instruction *CurrInst = dyn_cast<Instruction>(J)) {
409      for (const auto &Op : CurrInst->operands()) {
410        if (Op->getType()->getScalarType()->isHalfTy() ||
411            CurrInst->getType()->getScalarType()->isHalfTy())
412          return !(ST->isISA3_0() && supportedHalfPrecisionOp(CurrInst));
413      }
414    }
415    if (CallInst *CI = dyn_cast<CallInst>(J)) {
416      // Inline ASM is okay, unless it clobbers the ctr register.
417      if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand())) {
418        if (asmClobbersCTR(IA))
419          return true;
420        continue;
421      }
422
423      if (Function *F = CI->getCalledFunction()) {
424        // Most intrinsics don't become function calls, but some might.
425        // sin, cos, exp and log are always calls.
426        unsigned Opcode = 0;
427        if (F->getIntrinsicID() != Intrinsic::not_intrinsic) {
428          switch (F->getIntrinsicID()) {
429          default: continue;
430          // If we have a call to loop_decrement or set_loop_iterations,
431          // we're definitely using CTR.
432          case Intrinsic::set_loop_iterations:
433          case Intrinsic::loop_decrement:
434            return true;
435
436          // Binary operations on 128-bit value will use CTR.
437          case Intrinsic::experimental_constrained_fadd:
438          case Intrinsic::experimental_constrained_fsub:
439          case Intrinsic::experimental_constrained_fmul:
440          case Intrinsic::experimental_constrained_fdiv:
441          case Intrinsic::experimental_constrained_frem:
442            if (F->getType()->getScalarType()->isFP128Ty() ||
443                F->getType()->getScalarType()->isPPC_FP128Ty())
444              return true;
445            break;
446
447          case Intrinsic::experimental_constrained_fptosi:
448          case Intrinsic::experimental_constrained_fptoui:
449          case Intrinsic::experimental_constrained_sitofp:
450          case Intrinsic::experimental_constrained_uitofp: {
451            Type *SrcType = CI->getArgOperand(0)->getType()->getScalarType();
452            Type *DstType = CI->getType()->getScalarType();
453            if (SrcType->isPPC_FP128Ty() || DstType->isPPC_FP128Ty() ||
454                isLargeIntegerTy(!TM.isPPC64(), SrcType) ||
455                isLargeIntegerTy(!TM.isPPC64(), DstType))
456              return true;
457            break;
458          }
459
460          // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp
461          // because, although it does clobber the counter register, the
462          // control can't then return to inside the loop unless there is also
463          // an eh_sjlj_setjmp.
464          case Intrinsic::eh_sjlj_setjmp:
465
466          case Intrinsic::memcpy:
467          case Intrinsic::memmove:
468          case Intrinsic::memset:
469          case Intrinsic::powi:
470          case Intrinsic::log:
471          case Intrinsic::log2:
472          case Intrinsic::log10:
473          case Intrinsic::exp:
474          case Intrinsic::exp2:
475          case Intrinsic::pow:
476          case Intrinsic::sin:
477          case Intrinsic::cos:
478          case Intrinsic::experimental_constrained_powi:
479          case Intrinsic::experimental_constrained_log:
480          case Intrinsic::experimental_constrained_log2:
481          case Intrinsic::experimental_constrained_log10:
482          case Intrinsic::experimental_constrained_exp:
483          case Intrinsic::experimental_constrained_exp2:
484          case Intrinsic::experimental_constrained_pow:
485          case Intrinsic::experimental_constrained_sin:
486          case Intrinsic::experimental_constrained_cos:
487            return true;
488          case Intrinsic::copysign:
489            if (CI->getArgOperand(0)->getType()->getScalarType()->
490                isPPC_FP128Ty())
491              return true;
492            else
493              continue; // ISD::FCOPYSIGN is never a library call.
494          case Intrinsic::fma:                Opcode = ISD::FMA;        break;
495          case Intrinsic::sqrt:               Opcode = ISD::FSQRT;      break;
496          case Intrinsic::floor:              Opcode = ISD::FFLOOR;     break;
497          case Intrinsic::ceil:               Opcode = ISD::FCEIL;      break;
498          case Intrinsic::trunc:              Opcode = ISD::FTRUNC;     break;
499          case Intrinsic::rint:               Opcode = ISD::FRINT;      break;
500          case Intrinsic::lrint:              Opcode = ISD::LRINT;      break;
501          case Intrinsic::llrint:             Opcode = ISD::LLRINT;     break;
502          case Intrinsic::nearbyint:          Opcode = ISD::FNEARBYINT; break;
503          case Intrinsic::round:              Opcode = ISD::FROUND;     break;
504          case Intrinsic::lround:             Opcode = ISD::LROUND;     break;
505          case Intrinsic::llround:            Opcode = ISD::LLROUND;    break;
506          case Intrinsic::minnum:             Opcode = ISD::FMINNUM;    break;
507          case Intrinsic::maxnum:             Opcode = ISD::FMAXNUM;    break;
508          case Intrinsic::experimental_constrained_fcmp:
509            Opcode = ISD::STRICT_FSETCC;
510            break;
511          case Intrinsic::experimental_constrained_fcmps:
512            Opcode = ISD::STRICT_FSETCCS;
513            break;
514          case Intrinsic::experimental_constrained_fma:
515            Opcode = ISD::STRICT_FMA;
516            break;
517          case Intrinsic::experimental_constrained_sqrt:
518            Opcode = ISD::STRICT_FSQRT;
519            break;
520          case Intrinsic::experimental_constrained_floor:
521            Opcode = ISD::STRICT_FFLOOR;
522            break;
523          case Intrinsic::experimental_constrained_ceil:
524            Opcode = ISD::STRICT_FCEIL;
525            break;
526          case Intrinsic::experimental_constrained_trunc:
527            Opcode = ISD::STRICT_FTRUNC;
528            break;
529          case Intrinsic::experimental_constrained_rint:
530            Opcode = ISD::STRICT_FRINT;
531            break;
532          case Intrinsic::experimental_constrained_lrint:
533            Opcode = ISD::STRICT_LRINT;
534            break;
535          case Intrinsic::experimental_constrained_llrint:
536            Opcode = ISD::STRICT_LLRINT;
537            break;
538          case Intrinsic::experimental_constrained_nearbyint:
539            Opcode = ISD::STRICT_FNEARBYINT;
540            break;
541          case Intrinsic::experimental_constrained_round:
542            Opcode = ISD::STRICT_FROUND;
543            break;
544          case Intrinsic::experimental_constrained_lround:
545            Opcode = ISD::STRICT_LROUND;
546            break;
547          case Intrinsic::experimental_constrained_llround:
548            Opcode = ISD::STRICT_LLROUND;
549            break;
550          case Intrinsic::experimental_constrained_minnum:
551            Opcode = ISD::STRICT_FMINNUM;
552            break;
553          case Intrinsic::experimental_constrained_maxnum:
554            Opcode = ISD::STRICT_FMAXNUM;
555            break;
556          case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO;      break;
557          case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO;      break;
558          }
559        }
560
561        // PowerPC does not use [US]DIVREM or other library calls for
562        // operations on regular types which are not otherwise library calls
563        // (i.e. soft float or atomics). If adapting for targets that do,
564        // additional care is required here.
565
566        LibFunc Func;
567        if (!F->hasLocalLinkage() && F->hasName() && LibInfo &&
568            LibInfo->getLibFunc(F->getName(), Func) &&
569            LibInfo->hasOptimizedCodeGen(Func)) {
570          // Non-read-only functions are never treated as intrinsics.
571          if (!CI->onlyReadsMemory())
572            return true;
573
574          // Conversion happens only for FP calls.
575          if (!CI->getArgOperand(0)->getType()->isFloatingPointTy())
576            return true;
577
578          switch (Func) {
579          default: return true;
580          case LibFunc_copysign:
581          case LibFunc_copysignf:
582            continue; // ISD::FCOPYSIGN is never a library call.
583          case LibFunc_copysignl:
584            return true;
585          case LibFunc_fabs:
586          case LibFunc_fabsf:
587          case LibFunc_fabsl:
588            continue; // ISD::FABS is never a library call.
589          case LibFunc_sqrt:
590          case LibFunc_sqrtf:
591          case LibFunc_sqrtl:
592            Opcode = ISD::FSQRT; break;
593          case LibFunc_floor:
594          case LibFunc_floorf:
595          case LibFunc_floorl:
596            Opcode = ISD::FFLOOR; break;
597          case LibFunc_nearbyint:
598          case LibFunc_nearbyintf:
599          case LibFunc_nearbyintl:
600            Opcode = ISD::FNEARBYINT; break;
601          case LibFunc_ceil:
602          case LibFunc_ceilf:
603          case LibFunc_ceill:
604            Opcode = ISD::FCEIL; break;
605          case LibFunc_rint:
606          case LibFunc_rintf:
607          case LibFunc_rintl:
608            Opcode = ISD::FRINT; break;
609          case LibFunc_round:
610          case LibFunc_roundf:
611          case LibFunc_roundl:
612            Opcode = ISD::FROUND; break;
613          case LibFunc_trunc:
614          case LibFunc_truncf:
615          case LibFunc_truncl:
616            Opcode = ISD::FTRUNC; break;
617          case LibFunc_fmin:
618          case LibFunc_fminf:
619          case LibFunc_fminl:
620            Opcode = ISD::FMINNUM; break;
621          case LibFunc_fmax:
622          case LibFunc_fmaxf:
623          case LibFunc_fmaxl:
624            Opcode = ISD::FMAXNUM; break;
625          }
626        }
627
628        if (Opcode) {
629          EVT EVTy =
630              TLI->getValueType(DL, CI->getArgOperand(0)->getType(), true);
631
632          if (EVTy == MVT::Other)
633            return true;
634
635          if (TLI->isOperationLegalOrCustom(Opcode, EVTy))
636            continue;
637          else if (EVTy.isVector() &&
638                   TLI->isOperationLegalOrCustom(Opcode, EVTy.getScalarType()))
639            continue;
640
641          return true;
642        }
643      }
644
645      return true;
646    } else if (isa<BinaryOperator>(J) &&
647               (J->getType()->getScalarType()->isFP128Ty() ||
648                J->getType()->getScalarType()->isPPC_FP128Ty())) {
649      // Most operations on f128 or ppc_f128 values become calls.
650      return true;
651    } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) ||
652               isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) {
653      CastInst *CI = cast<CastInst>(J);
654      if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() ||
655          CI->getDestTy()->getScalarType()->isPPC_FP128Ty() ||
656          isLargeIntegerTy(!TM.isPPC64(), CI->getSrcTy()->getScalarType()) ||
657          isLargeIntegerTy(!TM.isPPC64(), CI->getDestTy()->getScalarType()))
658        return true;
659    } else if (isLargeIntegerTy(!TM.isPPC64(),
660                                J->getType()->getScalarType()) &&
661               (J->getOpcode() == Instruction::UDiv ||
662                J->getOpcode() == Instruction::SDiv ||
663                J->getOpcode() == Instruction::URem ||
664                J->getOpcode() == Instruction::SRem)) {
665      return true;
666    } else if (!TM.isPPC64() &&
667               isLargeIntegerTy(false, J->getType()->getScalarType()) &&
668               (J->getOpcode() == Instruction::Shl ||
669                J->getOpcode() == Instruction::AShr ||
670                J->getOpcode() == Instruction::LShr)) {
671      // Only on PPC32, for 128-bit integers (specifically not 64-bit
672      // integers), these might be runtime calls.
673      return true;
674    } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) {
675      // On PowerPC, indirect jumps use the counter register.
676      return true;
677    } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) {
678      if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries())
679        return true;
680    }
681
682    // FREM is always a call.
683    if (J->getOpcode() == Instruction::FRem)
684      return true;
685
686    if (ST->useSoftFloat()) {
687      switch(J->getOpcode()) {
688      case Instruction::FAdd:
689      case Instruction::FSub:
690      case Instruction::FMul:
691      case Instruction::FDiv:
692      case Instruction::FPTrunc:
693      case Instruction::FPExt:
694      case Instruction::FPToUI:
695      case Instruction::FPToSI:
696      case Instruction::UIToFP:
697      case Instruction::SIToFP:
698      case Instruction::FCmp:
699        return true;
700      }
701    }
702
703    for (Value *Operand : J->operands())
704      if (memAddrUsesCTR(Operand, TM, Visited))
705        return true;
706  }
707
708  return false;
709}
710
711bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
712                                          AssumptionCache &AC,
713                                          TargetLibraryInfo *LibInfo,
714                                          HardwareLoopInfo &HWLoopInfo) {
715  const PPCTargetMachine &TM = ST->getTargetMachine();
716  TargetSchedModel SchedModel;
717  SchedModel.init(ST);
718
719  // Do not convert small short loops to CTR loop.
720  unsigned ConstTripCount = SE.getSmallConstantTripCount(L);
721  if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) {
722    SmallPtrSet<const Value *, 32> EphValues;
723    CodeMetrics::collectEphemeralValues(L, &AC, EphValues);
724    CodeMetrics Metrics;
725    for (BasicBlock *BB : L->blocks())
726      Metrics.analyzeBasicBlock(BB, *this, EphValues);
727    // 6 is an approximate latency for the mtctr instruction.
728    if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth()))
729      return false;
730  }
731
732  // We don't want to spill/restore the counter register, and so we don't
733  // want to use the counter register if the loop contains calls.
734  SmallPtrSet<const Value *, 4> Visited;
735  for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
736       I != IE; ++I)
737    if (mightUseCTR(*I, LibInfo, Visited))
738      return false;
739
740  SmallVector<BasicBlock*, 4> ExitingBlocks;
741  L->getExitingBlocks(ExitingBlocks);
742
743  // If there is an exit edge known to be frequently taken,
744  // we should not transform this loop.
745  for (auto &BB : ExitingBlocks) {
746    Instruction *TI = BB->getTerminator();
747    if (!TI) continue;
748
749    if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
750      uint64_t TrueWeight = 0, FalseWeight = 0;
751      if (!BI->isConditional() ||
752          !BI->extractProfMetadata(TrueWeight, FalseWeight))
753        continue;
754
755      // If the exit path is more frequent than the loop path,
756      // we return here without further analysis for this loop.
757      bool TrueIsExit = !L->contains(BI->getSuccessor(0));
758      if (( TrueIsExit && FalseWeight < TrueWeight) ||
759          (!TrueIsExit && FalseWeight > TrueWeight))
760        return false;
761    }
762  }
763
764  // If an exit block has a PHI that accesses a TLS variable as one of the
765  // incoming values from the loop, we cannot produce a CTR loop because the
766  // address for that value will be computed in the loop.
767  SmallVector<BasicBlock *, 4> ExitBlocks;
768  L->getExitBlocks(ExitBlocks);
769  for (auto &BB : ExitBlocks) {
770    for (auto &PHI : BB->phis()) {
771      for (int Idx = 0, EndIdx = PHI.getNumIncomingValues(); Idx < EndIdx;
772           Idx++) {
773        const BasicBlock *IncomingBB = PHI.getIncomingBlock(Idx);
774        const Value *IncomingValue = PHI.getIncomingValue(Idx);
775        if (L->contains(IncomingBB) &&
776            memAddrUsesCTR(IncomingValue, TM, Visited))
777          return false;
778      }
779    }
780  }
781
782  LLVMContext &C = L->getHeader()->getContext();
783  HWLoopInfo.CountType = TM.isPPC64() ?
784    Type::getInt64Ty(C) : Type::getInt32Ty(C);
785  HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
786  return true;
787}
788
789void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
790                                         TTI::UnrollingPreferences &UP) {
791  if (ST->getCPUDirective() == PPC::DIR_A2) {
792    // The A2 is in-order with a deep pipeline, and concatenation unrolling
793    // helps expose latency-hiding opportunities to the instruction scheduler.
794    UP.Partial = UP.Runtime = true;
795
796    // We unroll a lot on the A2 (hundreds of instructions), and the benefits
797    // often outweigh the cost of a division to compute the trip count.
798    UP.AllowExpensiveTripCount = true;
799  }
800
801  BaseT::getUnrollingPreferences(L, SE, UP);
802}
803
804void PPCTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
805                                       TTI::PeelingPreferences &PP) {
806  BaseT::getPeelingPreferences(L, SE, PP);
807}
808// This function returns true to allow using coldcc calling convention.
809// Returning true results in coldcc being used for functions which are cold at
810// all call sites when the callers of the functions are not calling any other
811// non coldcc functions.
812bool PPCTTIImpl::useColdCCForColdCall(Function &F) {
813  return EnablePPCColdCC;
814}
815
816bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) {
817  // On the A2, always unroll aggressively.
818  if (ST->getCPUDirective() == PPC::DIR_A2)
819    return true;
820
821  return LoopHasReductions;
822}
823
824PPCTTIImpl::TTI::MemCmpExpansionOptions
825PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
826  TTI::MemCmpExpansionOptions Options;
827  Options.LoadSizes = {8, 4, 2, 1};
828  Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
829  return Options;
830}
831
832bool PPCTTIImpl::enableInterleavedAccessVectorization() {
833  return true;
834}
835
836unsigned PPCTTIImpl::getNumberOfRegisters(unsigned ClassID) const {
837  assert(ClassID == GPRRC || ClassID == FPRRC ||
838         ClassID == VRRC || ClassID == VSXRC);
839  if (ST->hasVSX()) {
840    assert(ClassID == GPRRC || ClassID == VSXRC || ClassID == VRRC);
841    return ClassID == VSXRC ? 64 : 32;
842  }
843  assert(ClassID == GPRRC || ClassID == FPRRC || ClassID == VRRC);
844  return 32;
845}
846
847unsigned PPCTTIImpl::getRegisterClassForType(bool Vector, Type *Ty) const {
848  if (Vector)
849    return ST->hasVSX() ? VSXRC : VRRC;
850  else if (Ty && (Ty->getScalarType()->isFloatTy() ||
851                  Ty->getScalarType()->isDoubleTy()))
852    return ST->hasVSX() ? VSXRC : FPRRC;
853  else if (Ty && (Ty->getScalarType()->isFP128Ty() ||
854                  Ty->getScalarType()->isPPC_FP128Ty()))
855    return VRRC;
856  else if (Ty && Ty->getScalarType()->isHalfTy())
857    return VSXRC;
858  else
859    return GPRRC;
860}
861
862const char* PPCTTIImpl::getRegisterClassName(unsigned ClassID) const {
863
864  switch (ClassID) {
865    default:
866      llvm_unreachable("unknown register class");
867      return "PPC::unknown register class";
868    case GPRRC:       return "PPC::GPRRC";
869    case FPRRC:       return "PPC::FPRRC";
870    case VRRC:        return "PPC::VRRC";
871    case VSXRC:       return "PPC::VSXRC";
872  }
873}
874
875TypeSize
876PPCTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
877  switch (K) {
878  case TargetTransformInfo::RGK_Scalar:
879    return TypeSize::getFixed(ST->isPPC64() ? 64 : 32);
880  case TargetTransformInfo::RGK_FixedWidthVector:
881    return TypeSize::getFixed(ST->hasAltivec() ? 128 : 0);
882  case TargetTransformInfo::RGK_ScalableVector:
883    return TypeSize::getScalable(0);
884  }
885
886  llvm_unreachable("Unsupported register kind");
887}
888
889unsigned PPCTTIImpl::getCacheLineSize() const {
890  // Check first if the user specified a custom line size.
891  if (CacheLineSize.getNumOccurrences() > 0)
892    return CacheLineSize;
893
894  // Starting with P7 we have a cache line size of 128.
895  unsigned Directive = ST->getCPUDirective();
896  // Assume that Future CPU has the same cache line size as the others.
897  if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
898      Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 ||
899      Directive == PPC::DIR_PWR_FUTURE)
900    return 128;
901
902  // On other processors return a default of 64 bytes.
903  return 64;
904}
905
906unsigned PPCTTIImpl::getPrefetchDistance() const {
907  return 300;
908}
909
910unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
911  unsigned Directive = ST->getCPUDirective();
912  // The 440 has no SIMD support, but floating-point instructions
913  // have a 5-cycle latency, so unroll by 5x for latency hiding.
914  if (Directive == PPC::DIR_440)
915    return 5;
916
917  // The A2 has no SIMD support, but floating-point instructions
918  // have a 6-cycle latency, so unroll by 6x for latency hiding.
919  if (Directive == PPC::DIR_A2)
920    return 6;
921
922  // FIXME: For lack of any better information, do no harm...
923  if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500)
924    return 1;
925
926  // For P7 and P8, floating-point instructions have a 6-cycle latency and
927  // there are two execution units, so unroll by 12x for latency hiding.
928  // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready
929  // FIXME: the same for P10 as previous gen until POWER10 scheduling is ready
930  // Assume that future is the same as the others.
931  if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
932      Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 ||
933      Directive == PPC::DIR_PWR_FUTURE)
934    return 12;
935
936  // For most things, modern systems have two execution units (and
937  // out-of-order execution).
938  return 2;
939}
940
941// Adjust the cost of vector instructions on targets which there is overlap
942// between the vector and scalar units, thereby reducing the overall throughput
943// of vector code wrt. scalar code.
944InstructionCost PPCTTIImpl::vectorCostAdjustment(InstructionCost Cost,
945                                                 unsigned Opcode, Type *Ty1,
946                                                 Type *Ty2) {
947  if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy())
948    return Cost;
949
950  std::pair<InstructionCost, MVT> LT1 = TLI->getTypeLegalizationCost(DL, Ty1);
951  // If type legalization involves splitting the vector, we don't want to
952  // double the cost at every step - only the last step.
953  if (LT1.first != 1 || !LT1.second.isVector())
954    return Cost;
955
956  int ISD = TLI->InstructionOpcodeToISD(Opcode);
957  if (TLI->isOperationExpand(ISD, LT1.second))
958    return Cost;
959
960  if (Ty2) {
961    std::pair<InstructionCost, MVT> LT2 = TLI->getTypeLegalizationCost(DL, Ty2);
962    if (LT2.first != 1 || !LT2.second.isVector())
963      return Cost;
964  }
965
966  return Cost * 2;
967}
968
969InstructionCost PPCTTIImpl::getArithmeticInstrCost(
970    unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
971    TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
972    TTI::OperandValueProperties Opd1PropInfo,
973    TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
974    const Instruction *CxtI) {
975  assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
976  // TODO: Handle more cost kinds.
977  if (CostKind != TTI::TCK_RecipThroughput)
978    return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
979                                         Op2Info, Opd1PropInfo,
980                                         Opd2PropInfo, Args, CxtI);
981
982  // Fallback to the default implementation.
983  InstructionCost Cost = BaseT::getArithmeticInstrCost(
984      Opcode, Ty, CostKind, Op1Info, Op2Info, Opd1PropInfo, Opd2PropInfo);
985  return vectorCostAdjustment(Cost, Opcode, Ty, nullptr);
986}
987
988InstructionCost PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
989                                           ArrayRef<int> Mask, int Index,
990                                           Type *SubTp) {
991  // Legalize the type.
992  std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
993
994  // PPC, for both Altivec/VSX, support cheap arbitrary permutations
995  // (at least in the sense that there need only be one non-loop-invariant
996  // instruction). We need one such shuffle instruction for each actual
997  // register (this is not true for arbitrary shuffles, but is true for the
998  // structured types of shuffles covered by TTI::ShuffleKind).
999  return vectorCostAdjustment(LT.first, Instruction::ShuffleVector, Tp,
1000                              nullptr);
1001}
1002
1003InstructionCost PPCTTIImpl::getCFInstrCost(unsigned Opcode,
1004                                           TTI::TargetCostKind CostKind,
1005                                           const Instruction *I) {
1006  if (CostKind != TTI::TCK_RecipThroughput)
1007    return Opcode == Instruction::PHI ? 0 : 1;
1008  // Branches are assumed to be predicted.
1009  return 0;
1010}
1011
1012InstructionCost PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
1013                                             Type *Src,
1014                                             TTI::CastContextHint CCH,
1015                                             TTI::TargetCostKind CostKind,
1016                                             const Instruction *I) {
1017  assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
1018
1019  InstructionCost Cost =
1020      BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
1021  Cost = vectorCostAdjustment(Cost, Opcode, Dst, Src);
1022  // TODO: Allow non-throughput costs that aren't binary.
1023  if (CostKind != TTI::TCK_RecipThroughput)
1024    return Cost == 0 ? 0 : 1;
1025  return Cost;
1026}
1027
1028InstructionCost PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
1029                                               Type *CondTy,
1030                                               CmpInst::Predicate VecPred,
1031                                               TTI::TargetCostKind CostKind,
1032                                               const Instruction *I) {
1033  InstructionCost Cost =
1034      BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
1035  // TODO: Handle other cost kinds.
1036  if (CostKind != TTI::TCK_RecipThroughput)
1037    return Cost;
1038  return vectorCostAdjustment(Cost, Opcode, ValTy, nullptr);
1039}
1040
1041InstructionCost PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
1042                                               unsigned Index) {
1043  assert(Val->isVectorTy() && "This must be a vector type");
1044
1045  int ISD = TLI->InstructionOpcodeToISD(Opcode);
1046  assert(ISD && "Invalid opcode");
1047
1048  InstructionCost Cost = BaseT::getVectorInstrCost(Opcode, Val, Index);
1049  Cost = vectorCostAdjustment(Cost, Opcode, Val, nullptr);
1050
1051  if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
1052    // Double-precision scalars are already located in index #0 (or #1 if LE).
1053    if (ISD == ISD::EXTRACT_VECTOR_ELT &&
1054        Index == (ST->isLittleEndian() ? 1 : 0))
1055      return 0;
1056
1057    return Cost;
1058
1059  } else if (Val->getScalarType()->isIntegerTy() && Index != -1U) {
1060    if (ST->hasP9Altivec()) {
1061      if (ISD == ISD::INSERT_VECTOR_ELT)
1062        // A move-to VSR and a permute/insert.  Assume vector operation cost
1063        // for both (cost will be 2x on P9).
1064        return vectorCostAdjustment(2, Opcode, Val, nullptr);
1065
1066      // It's an extract.  Maybe we can do a cheap move-from VSR.
1067      unsigned EltSize = Val->getScalarSizeInBits();
1068      if (EltSize == 64) {
1069        unsigned MfvsrdIndex = ST->isLittleEndian() ? 1 : 0;
1070        if (Index == MfvsrdIndex)
1071          return 1;
1072      } else if (EltSize == 32) {
1073        unsigned MfvsrwzIndex = ST->isLittleEndian() ? 2 : 1;
1074        if (Index == MfvsrwzIndex)
1075          return 1;
1076      }
1077
1078      // We need a vector extract (or mfvsrld).  Assume vector operation cost.
1079      // The cost of the load constant for a vector extract is disregarded
1080      // (invariant, easily schedulable).
1081      return vectorCostAdjustment(1, Opcode, Val, nullptr);
1082
1083    } else if (ST->hasDirectMove())
1084      // Assume permute has standard cost.
1085      // Assume move-to/move-from VSR have 2x standard cost.
1086      return 3;
1087  }
1088
1089  // Estimated cost of a load-hit-store delay.  This was obtained
1090  // experimentally as a minimum needed to prevent unprofitable
1091  // vectorization for the paq8p benchmark.  It may need to be
1092  // raised further if other unprofitable cases remain.
1093  unsigned LHSPenalty = 2;
1094  if (ISD == ISD::INSERT_VECTOR_ELT)
1095    LHSPenalty += 7;
1096
1097  // Vector element insert/extract with Altivec is very expensive,
1098  // because they require store and reload with the attendant
1099  // processor stall for load-hit-store.  Until VSX is available,
1100  // these need to be estimated as very costly.
1101  if (ISD == ISD::EXTRACT_VECTOR_ELT ||
1102      ISD == ISD::INSERT_VECTOR_ELT)
1103    return LHSPenalty + Cost;
1104
1105  return Cost;
1106}
1107
1108InstructionCost PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1109                                            MaybeAlign Alignment,
1110                                            unsigned AddressSpace,
1111                                            TTI::TargetCostKind CostKind,
1112                                            const Instruction *I) {
1113  if (TLI->getValueType(DL, Src,  true) == MVT::Other)
1114    return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1115                                  CostKind);
1116  // Legalize the type.
1117  std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1118  assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
1119         "Invalid Opcode");
1120
1121  InstructionCost Cost =
1122      BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind);
1123  // TODO: Handle other cost kinds.
1124  if (CostKind != TTI::TCK_RecipThroughput)
1125    return Cost;
1126
1127  Cost = vectorCostAdjustment(Cost, Opcode, Src, nullptr);
1128
1129  bool IsAltivecType = ST->hasAltivec() &&
1130                       (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
1131                        LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
1132  bool IsVSXType = ST->hasVSX() &&
1133                   (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
1134
1135  // VSX has 32b/64b load instructions. Legalization can handle loading of
1136  // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and
1137  // PPCTargetLowering can't compute the cost appropriately. So here we
1138  // explicitly check this case.
1139  unsigned MemBytes = Src->getPrimitiveSizeInBits();
1140  if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType &&
1141      (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32)))
1142    return 1;
1143
1144  // Aligned loads and stores are easy.
1145  unsigned SrcBytes = LT.second.getStoreSize();
1146  if (!SrcBytes || !Alignment || *Alignment >= SrcBytes)
1147    return Cost;
1148
1149  // If we can use the permutation-based load sequence, then this is also
1150  // relatively cheap (not counting loop-invariant instructions): one load plus
1151  // one permute (the last load in a series has extra cost, but we're
1152  // neglecting that here). Note that on the P7, we could do unaligned loads
1153  // for Altivec types using the VSX instructions, but that's more expensive
1154  // than using the permutation-based load sequence. On the P8, that's no
1155  // longer true.
1156  if (Opcode == Instruction::Load && (!ST->hasP8Vector() && IsAltivecType) &&
1157      *Alignment >= LT.second.getScalarType().getStoreSize())
1158    return Cost + LT.first; // Add the cost of the permutations.
1159
1160  // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
1161  // P7, unaligned vector loads are more expensive than the permutation-based
1162  // load sequence, so that might be used instead, but regardless, the net cost
1163  // is about the same (not counting loop-invariant instructions).
1164  if (IsVSXType || (ST->hasVSX() && IsAltivecType))
1165    return Cost;
1166
1167  // Newer PPC supports unaligned memory access.
1168  if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0))
1169    return Cost;
1170
1171  // PPC in general does not support unaligned loads and stores. They'll need
1172  // to be decomposed based on the alignment factor.
1173
1174  // Add the cost of each scalar load or store.
1175  assert(Alignment);
1176  Cost += LT.first * ((SrcBytes / Alignment->value()) - 1);
1177
1178  // For a vector type, there is also scalarization overhead (only for
1179  // stores, loads are expanded using the vector-load + permutation sequence,
1180  // which is much less expensive).
1181  if (Src->isVectorTy() && Opcode == Instruction::Store)
1182    for (int i = 0, e = cast<FixedVectorType>(Src)->getNumElements(); i < e;
1183         ++i)
1184      Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i);
1185
1186  return Cost;
1187}
1188
1189InstructionCost PPCTTIImpl::getInterleavedMemoryOpCost(
1190    unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1191    Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1192    bool UseMaskForCond, bool UseMaskForGaps) {
1193  if (UseMaskForCond || UseMaskForGaps)
1194    return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1195                                             Alignment, AddressSpace, CostKind,
1196                                             UseMaskForCond, UseMaskForGaps);
1197
1198  assert(isa<VectorType>(VecTy) &&
1199         "Expect a vector type for interleaved memory op");
1200
1201  // Legalize the type.
1202  std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy);
1203
1204  // Firstly, the cost of load/store operation.
1205  InstructionCost Cost = getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment),
1206                                         AddressSpace, CostKind);
1207
1208  // PPC, for both Altivec/VSX, support cheap arbitrary permutations
1209  // (at least in the sense that there need only be one non-loop-invariant
1210  // instruction). For each result vector, we need one shuffle per incoming
1211  // vector (except that the first shuffle can take two incoming vectors
1212  // because it does not need to take itself).
1213  Cost += Factor*(LT.first-1);
1214
1215  return Cost;
1216}
1217
1218InstructionCost
1219PPCTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1220                                  TTI::TargetCostKind CostKind) {
1221  return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1222}
1223
1224bool PPCTTIImpl::areFunctionArgsABICompatible(
1225    const Function *Caller, const Function *Callee,
1226    SmallPtrSetImpl<Argument *> &Args) const {
1227
1228  // We need to ensure that argument promotion does not
1229  // attempt to promote pointers to MMA types (__vector_pair
1230  // and __vector_quad) since these types explicitly cannot be
1231  // passed as arguments. Both of these types are larger than
1232  // the 128-bit Altivec vectors and have a scalar size of 1 bit.
1233  if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args))
1234    return false;
1235
1236  return llvm::none_of(Args, [](Argument *A) {
1237    auto *EltTy = cast<PointerType>(A->getType())->getElementType();
1238    if (EltTy->isSized())
1239      return (EltTy->isIntOrIntVectorTy(1) &&
1240              EltTy->getPrimitiveSizeInBits() > 128);
1241    return false;
1242  });
1243}
1244
1245bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
1246                            LoopInfo *LI, DominatorTree *DT,
1247                            AssumptionCache *AC, TargetLibraryInfo *LibInfo) {
1248  // Process nested loops first.
1249  for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
1250    if (canSaveCmp(*I, BI, SE, LI, DT, AC, LibInfo))
1251      return false; // Stop search.
1252
1253  HardwareLoopInfo HWLoopInfo(L);
1254
1255  if (!HWLoopInfo.canAnalyze(*LI))
1256    return false;
1257
1258  if (!isHardwareLoopProfitable(L, *SE, *AC, LibInfo, HWLoopInfo))
1259    return false;
1260
1261  if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT))
1262    return false;
1263
1264  *BI = HWLoopInfo.ExitBranch;
1265  return true;
1266}
1267
1268bool PPCTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
1269                               TargetTransformInfo::LSRCost &C2) {
1270  // PowerPC default behaviour here is "instruction number 1st priority".
1271  // If LsrNoInsnsCost is set, call default implementation.
1272  if (!LsrNoInsnsCost)
1273    return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, C1.NumIVMuls,
1274                    C1.NumBaseAdds, C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
1275           std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, C2.NumIVMuls,
1276                    C2.NumBaseAdds, C2.ScaleCost, C2.ImmCost, C2.SetupCost);
1277  else
1278    return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
1279}
1280
1281bool PPCTTIImpl::isNumRegsMajorCostOfLSR() {
1282  return false;
1283}
1284
1285bool PPCTTIImpl::shouldBuildRelLookupTables() const {
1286  const PPCTargetMachine &TM = ST->getTargetMachine();
1287  // XCOFF hasn't implemented lowerRelativeReference, disable non-ELF for now.
1288  if (!TM.isELFv2ABI())
1289    return false;
1290  return BaseT::shouldBuildRelLookupTables();
1291}
1292
1293bool PPCTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
1294                                    MemIntrinsicInfo &Info) {
1295  switch (Inst->getIntrinsicID()) {
1296  case Intrinsic::ppc_altivec_lvx:
1297  case Intrinsic::ppc_altivec_lvxl:
1298  case Intrinsic::ppc_altivec_lvebx:
1299  case Intrinsic::ppc_altivec_lvehx:
1300  case Intrinsic::ppc_altivec_lvewx:
1301  case Intrinsic::ppc_vsx_lxvd2x:
1302  case Intrinsic::ppc_vsx_lxvw4x:
1303  case Intrinsic::ppc_vsx_lxvd2x_be:
1304  case Intrinsic::ppc_vsx_lxvw4x_be:
1305  case Intrinsic::ppc_vsx_lxvl:
1306  case Intrinsic::ppc_vsx_lxvll:
1307  case Intrinsic::ppc_vsx_lxvp: {
1308    Info.PtrVal = Inst->getArgOperand(0);
1309    Info.ReadMem = true;
1310    Info.WriteMem = false;
1311    return true;
1312  }
1313  case Intrinsic::ppc_altivec_stvx:
1314  case Intrinsic::ppc_altivec_stvxl:
1315  case Intrinsic::ppc_altivec_stvebx:
1316  case Intrinsic::ppc_altivec_stvehx:
1317  case Intrinsic::ppc_altivec_stvewx:
1318  case Intrinsic::ppc_vsx_stxvd2x:
1319  case Intrinsic::ppc_vsx_stxvw4x:
1320  case Intrinsic::ppc_vsx_stxvd2x_be:
1321  case Intrinsic::ppc_vsx_stxvw4x_be:
1322  case Intrinsic::ppc_vsx_stxvl:
1323  case Intrinsic::ppc_vsx_stxvll:
1324  case Intrinsic::ppc_vsx_stxvp: {
1325    Info.PtrVal = Inst->getArgOperand(1);
1326    Info.ReadMem = false;
1327    Info.WriteMem = true;
1328    return true;
1329  }
1330  default:
1331    break;
1332  }
1333
1334  return false;
1335}
1336