1//===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines vectorizer utilities.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Analysis/VectorUtils.h"
14#include "llvm/ADT/EquivalenceClasses.h"
15#include "llvm/Analysis/DemandedBits.h"
16#include "llvm/Analysis/LoopInfo.h"
17#include "llvm/Analysis/LoopIterator.h"
18#include "llvm/Analysis/ScalarEvolution.h"
19#include "llvm/Analysis/ScalarEvolutionExpressions.h"
20#include "llvm/Analysis/TargetTransformInfo.h"
21#include "llvm/Analysis/ValueTracking.h"
22#include "llvm/IR/Constants.h"
23#include "llvm/IR/GetElementPtrTypeIterator.h"
24#include "llvm/IR/IRBuilder.h"
25#include "llvm/IR/PatternMatch.h"
26#include "llvm/IR/Value.h"
27#include "llvm/Support/CommandLine.h"
28
29#define DEBUG_TYPE "vectorutils"
30
31using namespace llvm;
32using namespace llvm::PatternMatch;
33
34/// Maximum factor for an interleaved memory access.
35static cl::opt<unsigned> MaxInterleaveGroupFactor(
36    "max-interleave-group-factor", cl::Hidden,
37    cl::desc("Maximum factor for an interleaved access group (default = 8)"),
38    cl::init(8));
39
40/// Return true if all of the intrinsic's arguments and return type are scalars
41/// for the scalar form of the intrinsic, and vectors for the vector form of the
42/// intrinsic (except operands that are marked as always being scalar by
43/// hasVectorInstrinsicScalarOpd).
44bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
45  switch (ID) {
46  case Intrinsic::bswap: // Begin integer bit-manipulation.
47  case Intrinsic::bitreverse:
48  case Intrinsic::ctpop:
49  case Intrinsic::ctlz:
50  case Intrinsic::cttz:
51  case Intrinsic::fshl:
52  case Intrinsic::fshr:
53  case Intrinsic::sadd_sat:
54  case Intrinsic::ssub_sat:
55  case Intrinsic::uadd_sat:
56  case Intrinsic::usub_sat:
57  case Intrinsic::smul_fix:
58  case Intrinsic::smul_fix_sat:
59  case Intrinsic::umul_fix:
60  case Intrinsic::umul_fix_sat:
61  case Intrinsic::sqrt: // Begin floating-point.
62  case Intrinsic::sin:
63  case Intrinsic::cos:
64  case Intrinsic::exp:
65  case Intrinsic::exp2:
66  case Intrinsic::log:
67  case Intrinsic::log10:
68  case Intrinsic::log2:
69  case Intrinsic::fabs:
70  case Intrinsic::minnum:
71  case Intrinsic::maxnum:
72  case Intrinsic::minimum:
73  case Intrinsic::maximum:
74  case Intrinsic::copysign:
75  case Intrinsic::floor:
76  case Intrinsic::ceil:
77  case Intrinsic::trunc:
78  case Intrinsic::rint:
79  case Intrinsic::nearbyint:
80  case Intrinsic::round:
81  case Intrinsic::pow:
82  case Intrinsic::fma:
83  case Intrinsic::fmuladd:
84  case Intrinsic::powi:
85  case Intrinsic::canonicalize:
86    return true;
87  default:
88    return false;
89  }
90}
91
92/// Identifies if the vector form of the intrinsic has a scalar operand.
93bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,
94                                        unsigned ScalarOpdIdx) {
95  switch (ID) {
96  case Intrinsic::ctlz:
97  case Intrinsic::cttz:
98  case Intrinsic::powi:
99    return (ScalarOpdIdx == 1);
100  case Intrinsic::smul_fix:
101  case Intrinsic::smul_fix_sat:
102  case Intrinsic::umul_fix:
103  case Intrinsic::umul_fix_sat:
104    return (ScalarOpdIdx == 2);
105  default:
106    return false;
107  }
108}
109
110/// Returns intrinsic ID for call.
111/// For the input call instruction it finds mapping intrinsic and returns
112/// its ID, in case it does not found it return not_intrinsic.
113Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
114                                                const TargetLibraryInfo *TLI) {
115  Intrinsic::ID ID = getIntrinsicForCallSite(CI, TLI);
116  if (ID == Intrinsic::not_intrinsic)
117    return Intrinsic::not_intrinsic;
118
119  if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start ||
120      ID == Intrinsic::lifetime_end || ID == Intrinsic::assume ||
121      ID == Intrinsic::sideeffect)
122    return ID;
123  return Intrinsic::not_intrinsic;
124}
125
126/// Find the operand of the GEP that should be checked for consecutive
127/// stores. This ignores trailing indices that have no effect on the final
128/// pointer.
129unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) {
130  const DataLayout &DL = Gep->getModule()->getDataLayout();
131  unsigned LastOperand = Gep->getNumOperands() - 1;
132  unsigned GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
133
134  // Walk backwards and try to peel off zeros.
135  while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
136    // Find the type we're currently indexing into.
137    gep_type_iterator GEPTI = gep_type_begin(Gep);
138    std::advance(GEPTI, LastOperand - 2);
139
140    // If it's a type with the same allocation size as the result of the GEP we
141    // can peel off the zero index.
142    if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize)
143      break;
144    --LastOperand;
145  }
146
147  return LastOperand;
148}
149
150/// If the argument is a GEP, then returns the operand identified by
151/// getGEPInductionOperand. However, if there is some other non-loop-invariant
152/// operand, it returns that instead.
153Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
154  GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
155  if (!GEP)
156    return Ptr;
157
158  unsigned InductionOperand = getGEPInductionOperand(GEP);
159
160  // Check that all of the gep indices are uniform except for our induction
161  // operand.
162  for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
163    if (i != InductionOperand &&
164        !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
165      return Ptr;
166  return GEP->getOperand(InductionOperand);
167}
168
169/// If a value has only one user that is a CastInst, return it.
170Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) {
171  Value *UniqueCast = nullptr;
172  for (User *U : Ptr->users()) {
173    CastInst *CI = dyn_cast<CastInst>(U);
174    if (CI && CI->getType() == Ty) {
175      if (!UniqueCast)
176        UniqueCast = CI;
177      else
178        return nullptr;
179    }
180  }
181  return UniqueCast;
182}
183
184/// Get the stride of a pointer access in a loop. Looks for symbolic
185/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
186Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
187  auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
188  if (!PtrTy || PtrTy->isAggregateType())
189    return nullptr;
190
191  // Try to remove a gep instruction to make the pointer (actually index at this
192  // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
193  // pointer, otherwise, we are analyzing the index.
194  Value *OrigPtr = Ptr;
195
196  // The size of the pointer access.
197  int64_t PtrAccessSize = 1;
198
199  Ptr = stripGetElementPtr(Ptr, SE, Lp);
200  const SCEV *V = SE->getSCEV(Ptr);
201
202  if (Ptr != OrigPtr)
203    // Strip off casts.
204    while (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V))
205      V = C->getOperand();
206
207  const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
208  if (!S)
209    return nullptr;
210
211  V = S->getStepRecurrence(*SE);
212  if (!V)
213    return nullptr;
214
215  // Strip off the size of access multiplication if we are still analyzing the
216  // pointer.
217  if (OrigPtr == Ptr) {
218    if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
219      if (M->getOperand(0)->getSCEVType() != scConstant)
220        return nullptr;
221
222      const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
223
224      // Huge step value - give up.
225      if (APStepVal.getBitWidth() > 64)
226        return nullptr;
227
228      int64_t StepVal = APStepVal.getSExtValue();
229      if (PtrAccessSize != StepVal)
230        return nullptr;
231      V = M->getOperand(1);
232    }
233  }
234
235  // Strip off casts.
236  Type *StripedOffRecurrenceCast = nullptr;
237  if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V)) {
238    StripedOffRecurrenceCast = C->getType();
239    V = C->getOperand();
240  }
241
242  // Look for the loop invariant symbolic value.
243  const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
244  if (!U)
245    return nullptr;
246
247  Value *Stride = U->getValue();
248  if (!Lp->isLoopInvariant(Stride))
249    return nullptr;
250
251  // If we have stripped off the recurrence cast we have to make sure that we
252  // return the value that is used in this loop so that we can replace it later.
253  if (StripedOffRecurrenceCast)
254    Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast);
255
256  return Stride;
257}
258
259/// Given a vector and an element number, see if the scalar value is
260/// already around as a register, for example if it were inserted then extracted
261/// from the vector.
262Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
263  assert(V->getType()->isVectorTy() && "Not looking at a vector?");
264  VectorType *VTy = cast<VectorType>(V->getType());
265  unsigned Width = VTy->getNumElements();
266  if (EltNo >= Width)  // Out of range access.
267    return UndefValue::get(VTy->getElementType());
268
269  if (Constant *C = dyn_cast<Constant>(V))
270    return C->getAggregateElement(EltNo);
271
272  if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
273    // If this is an insert to a variable element, we don't know what it is.
274    if (!isa<ConstantInt>(III->getOperand(2)))
275      return nullptr;
276    unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
277
278    // If this is an insert to the element we are looking for, return the
279    // inserted value.
280    if (EltNo == IIElt)
281      return III->getOperand(1);
282
283    // Otherwise, the insertelement doesn't modify the value, recurse on its
284    // vector input.
285    return findScalarElement(III->getOperand(0), EltNo);
286  }
287
288  if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) {
289    unsigned LHSWidth = SVI->getOperand(0)->getType()->getVectorNumElements();
290    int InEl = SVI->getMaskValue(EltNo);
291    if (InEl < 0)
292      return UndefValue::get(VTy->getElementType());
293    if (InEl < (int)LHSWidth)
294      return findScalarElement(SVI->getOperand(0), InEl);
295    return findScalarElement(SVI->getOperand(1), InEl - LHSWidth);
296  }
297
298  // Extract a value from a vector add operation with a constant zero.
299  // TODO: Use getBinOpIdentity() to generalize this.
300  Value *Val; Constant *C;
301  if (match(V, m_Add(m_Value(Val), m_Constant(C))))
302    if (Constant *Elt = C->getAggregateElement(EltNo))
303      if (Elt->isNullValue())
304        return findScalarElement(Val, EltNo);
305
306  // Otherwise, we don't know.
307  return nullptr;
308}
309
310/// Get splat value if the input is a splat vector or return nullptr.
311/// This function is not fully general. It checks only 2 cases:
312/// the input value is (1) a splat constant vector or (2) a sequence
313/// of instructions that broadcasts a scalar at element 0.
314const llvm::Value *llvm::getSplatValue(const Value *V) {
315  if (isa<VectorType>(V->getType()))
316    if (auto *C = dyn_cast<Constant>(V))
317      return C->getSplatValue();
318
319  // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...>
320  Value *Splat;
321  if (match(V, m_ShuffleVector(m_InsertElement(m_Value(), m_Value(Splat),
322                                               m_ZeroInt()),
323                               m_Value(), m_ZeroInt())))
324    return Splat;
325
326  return nullptr;
327}
328
329// This setting is based on its counterpart in value tracking, but it could be
330// adjusted if needed.
331const unsigned MaxDepth = 6;
332
333bool llvm::isSplatValue(const Value *V, unsigned Depth) {
334  assert(Depth <= MaxDepth && "Limit Search Depth");
335
336  if (isa<VectorType>(V->getType())) {
337    if (isa<UndefValue>(V))
338      return true;
339    // FIXME: Constant splat analysis does not allow undef elements.
340    if (auto *C = dyn_cast<Constant>(V))
341      return C->getSplatValue() != nullptr;
342  }
343
344  // FIXME: Constant splat analysis does not allow undef elements.
345  Constant *Mask;
346  if (match(V, m_ShuffleVector(m_Value(), m_Value(), m_Constant(Mask))))
347    return Mask->getSplatValue() != nullptr;
348
349  // The remaining tests are all recursive, so bail out if we hit the limit.
350  if (Depth++ == MaxDepth)
351    return false;
352
353  // If both operands of a binop are splats, the result is a splat.
354  Value *X, *Y, *Z;
355  if (match(V, m_BinOp(m_Value(X), m_Value(Y))))
356    return isSplatValue(X, Depth) && isSplatValue(Y, Depth);
357
358  // If all operands of a select are splats, the result is a splat.
359  if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z))))
360    return isSplatValue(X, Depth) && isSplatValue(Y, Depth) &&
361           isSplatValue(Z, Depth);
362
363  // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops).
364
365  return false;
366}
367
368MapVector<Instruction *, uint64_t>
369llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
370                               const TargetTransformInfo *TTI) {
371
372  // DemandedBits will give us every value's live-out bits. But we want
373  // to ensure no extra casts would need to be inserted, so every DAG
374  // of connected values must have the same minimum bitwidth.
375  EquivalenceClasses<Value *> ECs;
376  SmallVector<Value *, 16> Worklist;
377  SmallPtrSet<Value *, 4> Roots;
378  SmallPtrSet<Value *, 16> Visited;
379  DenseMap<Value *, uint64_t> DBits;
380  SmallPtrSet<Instruction *, 4> InstructionSet;
381  MapVector<Instruction *, uint64_t> MinBWs;
382
383  // Determine the roots. We work bottom-up, from truncs or icmps.
384  bool SeenExtFromIllegalType = false;
385  for (auto *BB : Blocks)
386    for (auto &I : *BB) {
387      InstructionSet.insert(&I);
388
389      if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) &&
390          !TTI->isTypeLegal(I.getOperand(0)->getType()))
391        SeenExtFromIllegalType = true;
392
393      // Only deal with non-vector integers up to 64-bits wide.
394      if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) &&
395          !I.getType()->isVectorTy() &&
396          I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) {
397        // Don't make work for ourselves. If we know the loaded type is legal,
398        // don't add it to the worklist.
399        if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType()))
400          continue;
401
402        Worklist.push_back(&I);
403        Roots.insert(&I);
404      }
405    }
406  // Early exit.
407  if (Worklist.empty() || (TTI && !SeenExtFromIllegalType))
408    return MinBWs;
409
410  // Now proceed breadth-first, unioning values together.
411  while (!Worklist.empty()) {
412    Value *Val = Worklist.pop_back_val();
413    Value *Leader = ECs.getOrInsertLeaderValue(Val);
414
415    if (Visited.count(Val))
416      continue;
417    Visited.insert(Val);
418
419    // Non-instructions terminate a chain successfully.
420    if (!isa<Instruction>(Val))
421      continue;
422    Instruction *I = cast<Instruction>(Val);
423
424    // If we encounter a type that is larger than 64 bits, we can't represent
425    // it so bail out.
426    if (DB.getDemandedBits(I).getBitWidth() > 64)
427      return MapVector<Instruction *, uint64_t>();
428
429    uint64_t V = DB.getDemandedBits(I).getZExtValue();
430    DBits[Leader] |= V;
431    DBits[I] = V;
432
433    // Casts, loads and instructions outside of our range terminate a chain
434    // successfully.
435    if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) ||
436        !InstructionSet.count(I))
437      continue;
438
439    // Unsafe casts terminate a chain unsuccessfully. We can't do anything
440    // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to
441    // transform anything that relies on them.
442    if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) ||
443        !I->getType()->isIntegerTy()) {
444      DBits[Leader] |= ~0ULL;
445      continue;
446    }
447
448    // We don't modify the types of PHIs. Reductions will already have been
449    // truncated if possible, and inductions' sizes will have been chosen by
450    // indvars.
451    if (isa<PHINode>(I))
452      continue;
453
454    if (DBits[Leader] == ~0ULL)
455      // All bits demanded, no point continuing.
456      continue;
457
458    for (Value *O : cast<User>(I)->operands()) {
459      ECs.unionSets(Leader, O);
460      Worklist.push_back(O);
461    }
462  }
463
464  // Now we've discovered all values, walk them to see if there are
465  // any users we didn't see. If there are, we can't optimize that
466  // chain.
467  for (auto &I : DBits)
468    for (auto *U : I.first->users())
469      if (U->getType()->isIntegerTy() && DBits.count(U) == 0)
470        DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL;
471
472  for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) {
473    uint64_t LeaderDemandedBits = 0;
474    for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI)
475      LeaderDemandedBits |= DBits[*MI];
476
477    uint64_t MinBW = (sizeof(LeaderDemandedBits) * 8) -
478                     llvm::countLeadingZeros(LeaderDemandedBits);
479    // Round up to a power of 2
480    if (!isPowerOf2_64((uint64_t)MinBW))
481      MinBW = NextPowerOf2(MinBW);
482
483    // We don't modify the types of PHIs. Reductions will already have been
484    // truncated if possible, and inductions' sizes will have been chosen by
485    // indvars.
486    // If we are required to shrink a PHI, abandon this entire equivalence class.
487    bool Abort = false;
488    for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI)
489      if (isa<PHINode>(*MI) && MinBW < (*MI)->getType()->getScalarSizeInBits()) {
490        Abort = true;
491        break;
492      }
493    if (Abort)
494      continue;
495
496    for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) {
497      if (!isa<Instruction>(*MI))
498        continue;
499      Type *Ty = (*MI)->getType();
500      if (Roots.count(*MI))
501        Ty = cast<Instruction>(*MI)->getOperand(0)->getType();
502      if (MinBW < Ty->getScalarSizeInBits())
503        MinBWs[cast<Instruction>(*MI)] = MinBW;
504    }
505  }
506
507  return MinBWs;
508}
509
510/// Add all access groups in @p AccGroups to @p List.
511template <typename ListT>
512static void addToAccessGroupList(ListT &List, MDNode *AccGroups) {
513  // Interpret an access group as a list containing itself.
514  if (AccGroups->getNumOperands() == 0) {
515    assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group");
516    List.insert(AccGroups);
517    return;
518  }
519
520  for (auto &AccGroupListOp : AccGroups->operands()) {
521    auto *Item = cast<MDNode>(AccGroupListOp.get());
522    assert(isValidAsAccessGroup(Item) && "List item must be an access group");
523    List.insert(Item);
524  }
525}
526
527MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) {
528  if (!AccGroups1)
529    return AccGroups2;
530  if (!AccGroups2)
531    return AccGroups1;
532  if (AccGroups1 == AccGroups2)
533    return AccGroups1;
534
535  SmallSetVector<Metadata *, 4> Union;
536  addToAccessGroupList(Union, AccGroups1);
537  addToAccessGroupList(Union, AccGroups2);
538
539  if (Union.size() == 0)
540    return nullptr;
541  if (Union.size() == 1)
542    return cast<MDNode>(Union.front());
543
544  LLVMContext &Ctx = AccGroups1->getContext();
545  return MDNode::get(Ctx, Union.getArrayRef());
546}
547
548MDNode *llvm::intersectAccessGroups(const Instruction *Inst1,
549                                    const Instruction *Inst2) {
550  bool MayAccessMem1 = Inst1->mayReadOrWriteMemory();
551  bool MayAccessMem2 = Inst2->mayReadOrWriteMemory();
552
553  if (!MayAccessMem1 && !MayAccessMem2)
554    return nullptr;
555  if (!MayAccessMem1)
556    return Inst2->getMetadata(LLVMContext::MD_access_group);
557  if (!MayAccessMem2)
558    return Inst1->getMetadata(LLVMContext::MD_access_group);
559
560  MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group);
561  MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group);
562  if (!MD1 || !MD2)
563    return nullptr;
564  if (MD1 == MD2)
565    return MD1;
566
567  // Use set for scalable 'contains' check.
568  SmallPtrSet<Metadata *, 4> AccGroupSet2;
569  addToAccessGroupList(AccGroupSet2, MD2);
570
571  SmallVector<Metadata *, 4> Intersection;
572  if (MD1->getNumOperands() == 0) {
573    assert(isValidAsAccessGroup(MD1) && "Node must be an access group");
574    if (AccGroupSet2.count(MD1))
575      Intersection.push_back(MD1);
576  } else {
577    for (const MDOperand &Node : MD1->operands()) {
578      auto *Item = cast<MDNode>(Node.get());
579      assert(isValidAsAccessGroup(Item) && "List item must be an access group");
580      if (AccGroupSet2.count(Item))
581        Intersection.push_back(Item);
582    }
583  }
584
585  if (Intersection.size() == 0)
586    return nullptr;
587  if (Intersection.size() == 1)
588    return cast<MDNode>(Intersection.front());
589
590  LLVMContext &Ctx = Inst1->getContext();
591  return MDNode::get(Ctx, Intersection);
592}
593
594/// \returns \p I after propagating metadata from \p VL.
595Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
596  Instruction *I0 = cast<Instruction>(VL[0]);
597  SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
598  I0->getAllMetadataOtherThanDebugLoc(Metadata);
599
600  for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
601                    LLVMContext::MD_noalias, LLVMContext::MD_fpmath,
602                    LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load,
603                    LLVMContext::MD_access_group}) {
604    MDNode *MD = I0->getMetadata(Kind);
605
606    for (int J = 1, E = VL.size(); MD && J != E; ++J) {
607      const Instruction *IJ = cast<Instruction>(VL[J]);
608      MDNode *IMD = IJ->getMetadata(Kind);
609      switch (Kind) {
610      case LLVMContext::MD_tbaa:
611        MD = MDNode::getMostGenericTBAA(MD, IMD);
612        break;
613      case LLVMContext::MD_alias_scope:
614        MD = MDNode::getMostGenericAliasScope(MD, IMD);
615        break;
616      case LLVMContext::MD_fpmath:
617        MD = MDNode::getMostGenericFPMath(MD, IMD);
618        break;
619      case LLVMContext::MD_noalias:
620      case LLVMContext::MD_nontemporal:
621      case LLVMContext::MD_invariant_load:
622        MD = MDNode::intersect(MD, IMD);
623        break;
624      case LLVMContext::MD_access_group:
625        MD = intersectAccessGroups(Inst, IJ);
626        break;
627      default:
628        llvm_unreachable("unhandled metadata");
629      }
630    }
631
632    Inst->setMetadata(Kind, MD);
633  }
634
635  return Inst;
636}
637
638Constant *
639llvm::createBitMaskForGaps(IRBuilder<> &Builder, unsigned VF,
640                           const InterleaveGroup<Instruction> &Group) {
641  // All 1's means mask is not needed.
642  if (Group.getNumMembers() == Group.getFactor())
643    return nullptr;
644
645  // TODO: support reversed access.
646  assert(!Group.isReverse() && "Reversed group not supported.");
647
648  SmallVector<Constant *, 16> Mask;
649  for (unsigned i = 0; i < VF; i++)
650    for (unsigned j = 0; j < Group.getFactor(); ++j) {
651      unsigned HasMember = Group.getMember(j) ? 1 : 0;
652      Mask.push_back(Builder.getInt1(HasMember));
653    }
654
655  return ConstantVector::get(Mask);
656}
657
658Constant *llvm::createReplicatedMask(IRBuilder<> &Builder,
659                                     unsigned ReplicationFactor, unsigned VF) {
660  SmallVector<Constant *, 16> MaskVec;
661  for (unsigned i = 0; i < VF; i++)
662    for (unsigned j = 0; j < ReplicationFactor; j++)
663      MaskVec.push_back(Builder.getInt32(i));
664
665  return ConstantVector::get(MaskVec);
666}
667
668Constant *llvm::createInterleaveMask(IRBuilder<> &Builder, unsigned VF,
669                                     unsigned NumVecs) {
670  SmallVector<Constant *, 16> Mask;
671  for (unsigned i = 0; i < VF; i++)
672    for (unsigned j = 0; j < NumVecs; j++)
673      Mask.push_back(Builder.getInt32(j * VF + i));
674
675  return ConstantVector::get(Mask);
676}
677
678Constant *llvm::createStrideMask(IRBuilder<> &Builder, unsigned Start,
679                                 unsigned Stride, unsigned VF) {
680  SmallVector<Constant *, 16> Mask;
681  for (unsigned i = 0; i < VF; i++)
682    Mask.push_back(Builder.getInt32(Start + i * Stride));
683
684  return ConstantVector::get(Mask);
685}
686
687Constant *llvm::createSequentialMask(IRBuilder<> &Builder, unsigned Start,
688                                     unsigned NumInts, unsigned NumUndefs) {
689  SmallVector<Constant *, 16> Mask;
690  for (unsigned i = 0; i < NumInts; i++)
691    Mask.push_back(Builder.getInt32(Start + i));
692
693  Constant *Undef = UndefValue::get(Builder.getInt32Ty());
694  for (unsigned i = 0; i < NumUndefs; i++)
695    Mask.push_back(Undef);
696
697  return ConstantVector::get(Mask);
698}
699
700/// A helper function for concatenating vectors. This function concatenates two
701/// vectors having the same element type. If the second vector has fewer
702/// elements than the first, it is padded with undefs.
703static Value *concatenateTwoVectors(IRBuilder<> &Builder, Value *V1,
704                                    Value *V2) {
705  VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType());
706  VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType());
707  assert(VecTy1 && VecTy2 &&
708         VecTy1->getScalarType() == VecTy2->getScalarType() &&
709         "Expect two vectors with the same element type");
710
711  unsigned NumElts1 = VecTy1->getNumElements();
712  unsigned NumElts2 = VecTy2->getNumElements();
713  assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements");
714
715  if (NumElts1 > NumElts2) {
716    // Extend with UNDEFs.
717    Constant *ExtMask =
718        createSequentialMask(Builder, 0, NumElts2, NumElts1 - NumElts2);
719    V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask);
720  }
721
722  Constant *Mask = createSequentialMask(Builder, 0, NumElts1 + NumElts2, 0);
723  return Builder.CreateShuffleVector(V1, V2, Mask);
724}
725
726Value *llvm::concatenateVectors(IRBuilder<> &Builder, ArrayRef<Value *> Vecs) {
727  unsigned NumVecs = Vecs.size();
728  assert(NumVecs > 1 && "Should be at least two vectors");
729
730  SmallVector<Value *, 8> ResList;
731  ResList.append(Vecs.begin(), Vecs.end());
732  do {
733    SmallVector<Value *, 8> TmpList;
734    for (unsigned i = 0; i < NumVecs - 1; i += 2) {
735      Value *V0 = ResList[i], *V1 = ResList[i + 1];
736      assert((V0->getType() == V1->getType() || i == NumVecs - 2) &&
737             "Only the last vector may have a different type");
738
739      TmpList.push_back(concatenateTwoVectors(Builder, V0, V1));
740    }
741
742    // Push the last vector if the total number of vectors is odd.
743    if (NumVecs % 2 != 0)
744      TmpList.push_back(ResList[NumVecs - 1]);
745
746    ResList = TmpList;
747    NumVecs = ResList.size();
748  } while (NumVecs > 1);
749
750  return ResList[0];
751}
752
753bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
754  auto *ConstMask = dyn_cast<Constant>(Mask);
755  if (!ConstMask)
756    return false;
757  if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
758    return true;
759  for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E;
760       ++I) {
761    if (auto *MaskElt = ConstMask->getAggregateElement(I))
762      if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
763        continue;
764    return false;
765  }
766  return true;
767}
768
769
770bool llvm::maskIsAllOneOrUndef(Value *Mask) {
771  auto *ConstMask = dyn_cast<Constant>(Mask);
772  if (!ConstMask)
773    return false;
774  if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
775    return true;
776  for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E;
777       ++I) {
778    if (auto *MaskElt = ConstMask->getAggregateElement(I))
779      if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
780        continue;
781    return false;
782  }
783  return true;
784}
785
786/// TODO: This is a lot like known bits, but for
787/// vectors.  Is there something we can common this with?
788APInt llvm::possiblyDemandedEltsInMask(Value *Mask) {
789
790  const unsigned VWidth = cast<VectorType>(Mask->getType())->getNumElements();
791  APInt DemandedElts = APInt::getAllOnesValue(VWidth);
792  if (auto *CV = dyn_cast<ConstantVector>(Mask))
793    for (unsigned i = 0; i < VWidth; i++)
794      if (CV->getAggregateElement(i)->isNullValue())
795        DemandedElts.clearBit(i);
796  return DemandedElts;
797}
798
799bool InterleavedAccessInfo::isStrided(int Stride) {
800  unsigned Factor = std::abs(Stride);
801  return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
802}
803
804void InterleavedAccessInfo::collectConstStrideAccesses(
805    MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
806    const ValueToValueMap &Strides) {
807  auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
808
809  // Since it's desired that the load/store instructions be maintained in
810  // "program order" for the interleaved access analysis, we have to visit the
811  // blocks in the loop in reverse postorder (i.e., in a topological order).
812  // Such an ordering will ensure that any load/store that may be executed
813  // before a second load/store will precede the second load/store in
814  // AccessStrideInfo.
815  LoopBlocksDFS DFS(TheLoop);
816  DFS.perform(LI);
817  for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
818    for (auto &I : *BB) {
819      auto *LI = dyn_cast<LoadInst>(&I);
820      auto *SI = dyn_cast<StoreInst>(&I);
821      if (!LI && !SI)
822        continue;
823
824      Value *Ptr = getLoadStorePointerOperand(&I);
825      // We don't check wrapping here because we don't know yet if Ptr will be
826      // part of a full group or a group with gaps. Checking wrapping for all
827      // pointers (even those that end up in groups with no gaps) will be overly
828      // conservative. For full groups, wrapping should be ok since if we would
829      // wrap around the address space we would do a memory access at nullptr
830      // even without the transformation. The wrapping checks are therefore
831      // deferred until after we've formed the interleaved groups.
832      int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides,
833                                    /*Assume=*/true, /*ShouldCheckWrap=*/false);
834
835      const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
836      PointerType *PtrTy = cast<PointerType>(Ptr->getType());
837      uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
838
839      // An alignment of 0 means target ABI alignment.
840      MaybeAlign Alignment = MaybeAlign(getLoadStoreAlignment(&I));
841      if (!Alignment)
842        Alignment = Align(DL.getABITypeAlignment(PtrTy->getElementType()));
843
844      AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, *Alignment);
845    }
846}
847
848// Analyze interleaved accesses and collect them into interleaved load and
849// store groups.
850//
851// When generating code for an interleaved load group, we effectively hoist all
852// loads in the group to the location of the first load in program order. When
853// generating code for an interleaved store group, we sink all stores to the
854// location of the last store. This code motion can change the order of load
855// and store instructions and may break dependences.
856//
857// The code generation strategy mentioned above ensures that we won't violate
858// any write-after-read (WAR) dependences.
859//
860// E.g., for the WAR dependence:  a = A[i];      // (1)
861//                                A[i] = b;      // (2)
862//
863// The store group of (2) is always inserted at or below (2), and the load
864// group of (1) is always inserted at or above (1). Thus, the instructions will
865// never be reordered. All other dependences are checked to ensure the
866// correctness of the instruction reordering.
867//
868// The algorithm visits all memory accesses in the loop in bottom-up program
869// order. Program order is established by traversing the blocks in the loop in
870// reverse postorder when collecting the accesses.
871//
872// We visit the memory accesses in bottom-up order because it can simplify the
873// construction of store groups in the presence of write-after-write (WAW)
874// dependences.
875//
876// E.g., for the WAW dependence:  A[i] = a;      // (1)
877//                                A[i] = b;      // (2)
878//                                A[i + 1] = c;  // (3)
879//
880// We will first create a store group with (3) and (2). (1) can't be added to
881// this group because it and (2) are dependent. However, (1) can be grouped
882// with other accesses that may precede it in program order. Note that a
883// bottom-up order does not imply that WAW dependences should not be checked.
884void InterleavedAccessInfo::analyzeInterleaving(
885                                 bool EnablePredicatedInterleavedMemAccesses) {
886  LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
887  const ValueToValueMap &Strides = LAI->getSymbolicStrides();
888
889  // Holds all accesses with a constant stride.
890  MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
891  collectConstStrideAccesses(AccessStrideInfo, Strides);
892
893  if (AccessStrideInfo.empty())
894    return;
895
896  // Collect the dependences in the loop.
897  collectDependences();
898
899  // Holds all interleaved store groups temporarily.
900  SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups;
901  // Holds all interleaved load groups temporarily.
902  SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups;
903
904  // Search in bottom-up program order for pairs of accesses (A and B) that can
905  // form interleaved load or store groups. In the algorithm below, access A
906  // precedes access B in program order. We initialize a group for B in the
907  // outer loop of the algorithm, and then in the inner loop, we attempt to
908  // insert each A into B's group if:
909  //
910  //  1. A and B have the same stride,
911  //  2. A and B have the same memory object size, and
912  //  3. A belongs in B's group according to its distance from B.
913  //
914  // Special care is taken to ensure group formation will not break any
915  // dependences.
916  for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
917       BI != E; ++BI) {
918    Instruction *B = BI->first;
919    StrideDescriptor DesB = BI->second;
920
921    // Initialize a group for B if it has an allowable stride. Even if we don't
922    // create a group for B, we continue with the bottom-up algorithm to ensure
923    // we don't break any of B's dependences.
924    InterleaveGroup<Instruction> *Group = nullptr;
925    if (isStrided(DesB.Stride) &&
926        (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
927      Group = getInterleaveGroup(B);
928      if (!Group) {
929        LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
930                          << '\n');
931        Group = createInterleaveGroup(B, DesB.Stride, DesB.Alignment);
932      }
933      if (B->mayWriteToMemory())
934        StoreGroups.insert(Group);
935      else
936        LoadGroups.insert(Group);
937    }
938
939    for (auto AI = std::next(BI); AI != E; ++AI) {
940      Instruction *A = AI->first;
941      StrideDescriptor DesA = AI->second;
942
943      // Our code motion strategy implies that we can't have dependences
944      // between accesses in an interleaved group and other accesses located
945      // between the first and last member of the group. Note that this also
946      // means that a group can't have more than one member at a given offset.
947      // The accesses in a group can have dependences with other accesses, but
948      // we must ensure we don't extend the boundaries of the group such that
949      // we encompass those dependent accesses.
950      //
951      // For example, assume we have the sequence of accesses shown below in a
952      // stride-2 loop:
953      //
954      //  (1, 2) is a group | A[i]   = a;  // (1)
955      //                    | A[i-1] = b;  // (2) |
956      //                      A[i-3] = c;  // (3)
957      //                      A[i]   = d;  // (4) | (2, 4) is not a group
958      //
959      // Because accesses (2) and (3) are dependent, we can group (2) with (1)
960      // but not with (4). If we did, the dependent access (3) would be within
961      // the boundaries of the (2, 4) group.
962      if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
963        // If a dependence exists and A is already in a group, we know that A
964        // must be a store since A precedes B and WAR dependences are allowed.
965        // Thus, A would be sunk below B. We release A's group to prevent this
966        // illegal code motion. A will then be free to form another group with
967        // instructions that precede it.
968        if (isInterleaved(A)) {
969          InterleaveGroup<Instruction> *StoreGroup = getInterleaveGroup(A);
970
971          LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to "
972                               "dependence between " << *A << " and "<< *B << '\n');
973
974          StoreGroups.remove(StoreGroup);
975          releaseGroup(StoreGroup);
976        }
977
978        // If a dependence exists and A is not already in a group (or it was
979        // and we just released it), B might be hoisted above A (if B is a
980        // load) or another store might be sunk below A (if B is a store). In
981        // either case, we can't add additional instructions to B's group. B
982        // will only form a group with instructions that it precedes.
983        break;
984      }
985
986      // At this point, we've checked for illegal code motion. If either A or B
987      // isn't strided, there's nothing left to do.
988      if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
989        continue;
990
991      // Ignore A if it's already in a group or isn't the same kind of memory
992      // operation as B.
993      // Note that mayReadFromMemory() isn't mutually exclusive to
994      // mayWriteToMemory in the case of atomic loads. We shouldn't see those
995      // here, canVectorizeMemory() should have returned false - except for the
996      // case we asked for optimization remarks.
997      if (isInterleaved(A) ||
998          (A->mayReadFromMemory() != B->mayReadFromMemory()) ||
999          (A->mayWriteToMemory() != B->mayWriteToMemory()))
1000        continue;
1001
1002      // Check rules 1 and 2. Ignore A if its stride or size is different from
1003      // that of B.
1004      if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
1005        continue;
1006
1007      // Ignore A if the memory object of A and B don't belong to the same
1008      // address space
1009      if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B))
1010        continue;
1011
1012      // Calculate the distance from A to B.
1013      const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
1014          PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
1015      if (!DistToB)
1016        continue;
1017      int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
1018
1019      // Check rule 3. Ignore A if its distance to B is not a multiple of the
1020      // size.
1021      if (DistanceToB % static_cast<int64_t>(DesB.Size))
1022        continue;
1023
1024      // All members of a predicated interleave-group must have the same predicate,
1025      // and currently must reside in the same BB.
1026      BasicBlock *BlockA = A->getParent();
1027      BasicBlock *BlockB = B->getParent();
1028      if ((isPredicated(BlockA) || isPredicated(BlockB)) &&
1029          (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB))
1030        continue;
1031
1032      // The index of A is the index of B plus A's distance to B in multiples
1033      // of the size.
1034      int IndexA =
1035          Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
1036
1037      // Try to insert A into B's group.
1038      if (Group->insertMember(A, IndexA, DesA.Alignment)) {
1039        LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
1040                          << "    into the interleave group with" << *B
1041                          << '\n');
1042        InterleaveGroupMap[A] = Group;
1043
1044        // Set the first load in program order as the insert position.
1045        if (A->mayReadFromMemory())
1046          Group->setInsertPos(A);
1047      }
1048    } // Iteration over A accesses.
1049  }   // Iteration over B accesses.
1050
1051  // Remove interleaved store groups with gaps.
1052  for (auto *Group : StoreGroups)
1053    if (Group->getNumMembers() != Group->getFactor()) {
1054      LLVM_DEBUG(
1055          dbgs() << "LV: Invalidate candidate interleaved store group due "
1056                    "to gaps.\n");
1057      releaseGroup(Group);
1058    }
1059  // Remove interleaved groups with gaps (currently only loads) whose memory
1060  // accesses may wrap around. We have to revisit the getPtrStride analysis,
1061  // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
1062  // not check wrapping (see documentation there).
1063  // FORNOW we use Assume=false;
1064  // TODO: Change to Assume=true but making sure we don't exceed the threshold
1065  // of runtime SCEV assumptions checks (thereby potentially failing to
1066  // vectorize altogether).
1067  // Additional optional optimizations:
1068  // TODO: If we are peeling the loop and we know that the first pointer doesn't
1069  // wrap then we can deduce that all pointers in the group don't wrap.
1070  // This means that we can forcefully peel the loop in order to only have to
1071  // check the first pointer for no-wrap. When we'll change to use Assume=true
1072  // we'll only need at most one runtime check per interleaved group.
1073  for (auto *Group : LoadGroups) {
1074    // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1075    // load would wrap around the address space we would do a memory access at
1076    // nullptr even without the transformation.
1077    if (Group->getNumMembers() == Group->getFactor())
1078      continue;
1079
1080    // Case 2: If first and last members of the group don't wrap this implies
1081    // that all the pointers in the group don't wrap.
1082    // So we check only group member 0 (which is always guaranteed to exist),
1083    // and group member Factor - 1; If the latter doesn't exist we rely on
1084    // peeling (if it is a non-reversed accsess -- see Case 3).
1085    Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0));
1086    if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false,
1087                      /*ShouldCheckWrap=*/true)) {
1088      LLVM_DEBUG(
1089          dbgs() << "LV: Invalidate candidate interleaved group due to "
1090                    "first group member potentially pointer-wrapping.\n");
1091      releaseGroup(Group);
1092      continue;
1093    }
1094    Instruction *LastMember = Group->getMember(Group->getFactor() - 1);
1095    if (LastMember) {
1096      Value *LastMemberPtr = getLoadStorePointerOperand(LastMember);
1097      if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false,
1098                        /*ShouldCheckWrap=*/true)) {
1099        LLVM_DEBUG(
1100            dbgs() << "LV: Invalidate candidate interleaved group due to "
1101                      "last group member potentially pointer-wrapping.\n");
1102        releaseGroup(Group);
1103      }
1104    } else {
1105      // Case 3: A non-reversed interleaved load group with gaps: We need
1106      // to execute at least one scalar epilogue iteration. This will ensure
1107      // we don't speculatively access memory out-of-bounds. We only need
1108      // to look for a member at index factor - 1, since every group must have
1109      // a member at index zero.
1110      if (Group->isReverse()) {
1111        LLVM_DEBUG(
1112            dbgs() << "LV: Invalidate candidate interleaved group due to "
1113                      "a reverse access with gaps.\n");
1114        releaseGroup(Group);
1115        continue;
1116      }
1117      LLVM_DEBUG(
1118          dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
1119      RequiresScalarEpilogue = true;
1120    }
1121  }
1122}
1123
1124void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
1125  // If no group had triggered the requirement to create an epilogue loop,
1126  // there is nothing to do.
1127  if (!requiresScalarEpilogue())
1128    return;
1129
1130  // Avoid releasing a Group twice.
1131  SmallPtrSet<InterleaveGroup<Instruction> *, 4> DelSet;
1132  for (auto &I : InterleaveGroupMap) {
1133    InterleaveGroup<Instruction> *Group = I.second;
1134    if (Group->requiresScalarEpilogue())
1135      DelSet.insert(Group);
1136  }
1137  for (auto *Ptr : DelSet) {
1138    LLVM_DEBUG(
1139        dbgs()
1140        << "LV: Invalidate candidate interleaved group due to gaps that "
1141           "require a scalar epilogue (not allowed under optsize) and cannot "
1142           "be masked (not enabled). \n");
1143    releaseGroup(Ptr);
1144  }
1145
1146  RequiresScalarEpilogue = false;
1147}
1148
1149template <typename InstT>
1150void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const {
1151  llvm_unreachable("addMetadata can only be used for Instruction");
1152}
1153
1154namespace llvm {
1155template <>
1156void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const {
1157  SmallVector<Value *, 4> VL;
1158  std::transform(Members.begin(), Members.end(), std::back_inserter(VL),
1159                 [](std::pair<int, Instruction *> p) { return p.second; });
1160  propagateMetadata(NewInst, VL);
1161}
1162}
1163
1164void VFABI::getVectorVariantNames(
1165    const CallInst &CI, SmallVectorImpl<std::string> &VariantMappings) {
1166  const StringRef S =
1167      CI.getAttribute(AttributeList::FunctionIndex, VFABI::MappingsAttrName)
1168          .getValueAsString();
1169  if (S.empty())
1170    return;
1171
1172  SmallVector<StringRef, 8> ListAttr;
1173  S.split(ListAttr, ",");
1174
1175  for (auto &S : SetVector<StringRef>(ListAttr.begin(), ListAttr.end())) {
1176#ifndef NDEBUG
1177    Optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S);
1178    assert(Info.hasValue() && "Invalid name for a VFABI variant.");
1179    assert(CI.getModule()->getFunction(Info.getValue().VectorName) &&
1180           "Vector function is missing.");
1181#endif
1182    VariantMappings.push_back(S);
1183  }
1184}
1185
1186bool VFShape::hasValidParameterList() const {
1187  for (unsigned Pos = 0, NumParams = Parameters.size(); Pos < NumParams;
1188       ++Pos) {
1189    assert(Parameters[Pos].ParamPos == Pos && "Broken parameter list.");
1190
1191    switch (Parameters[Pos].ParamKind) {
1192    default: // Nothing to check.
1193      break;
1194    case VFParamKind::OMP_Linear:
1195    case VFParamKind::OMP_LinearRef:
1196    case VFParamKind::OMP_LinearVal:
1197    case VFParamKind::OMP_LinearUVal:
1198      // Compile time linear steps must be non-zero.
1199      if (Parameters[Pos].LinearStepOrPos == 0)
1200        return false;
1201      break;
1202    case VFParamKind::OMP_LinearPos:
1203    case VFParamKind::OMP_LinearRefPos:
1204    case VFParamKind::OMP_LinearValPos:
1205    case VFParamKind::OMP_LinearUValPos:
1206      // The runtime linear step must be referring to some other
1207      // parameters in the signature.
1208      if (Parameters[Pos].LinearStepOrPos >= int(NumParams))
1209        return false;
1210      // The linear step parameter must be marked as uniform.
1211      if (Parameters[Parameters[Pos].LinearStepOrPos].ParamKind !=
1212          VFParamKind::OMP_Uniform)
1213        return false;
1214      // The linear step parameter can't point at itself.
1215      if (Parameters[Pos].LinearStepOrPos == int(Pos))
1216        return false;
1217      break;
1218    case VFParamKind::GlobalPredicate:
1219      // The global predicate must be the unique. Can be placed anywhere in the
1220      // signature.
1221      for (unsigned NextPos = Pos + 1; NextPos < NumParams; ++NextPos)
1222        if (Parameters[NextPos].ParamKind == VFParamKind::GlobalPredicate)
1223          return false;
1224      break;
1225    }
1226  }
1227  return true;
1228}
1229