1151497Sru//===----------- VectorUtils.cpp - Vectorizer utility functions -----------===// 2151497Sru// 3151497Sru// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4151497Sru// See https://llvm.org/LICENSE.txt for license information. 5151497Sru// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6151497Sru// 7151497Sru//===----------------------------------------------------------------------===// 8151497Sru// 9151497Sru// This file defines vectorizer utilities. 10151497Sru// 11151497Sru//===----------------------------------------------------------------------===// 12151497Sru 13151497Sru#include "llvm/Analysis/VectorUtils.h" 14151497Sru#include "llvm/ADT/EquivalenceClasses.h" 15151497Sru#include "llvm/Analysis/DemandedBits.h" 16151497Sru#include "llvm/Analysis/LoopInfo.h" 17151497Sru#include "llvm/Analysis/LoopIterator.h" 18151497Sru#include "llvm/Analysis/ScalarEvolution.h" 19151497Sru#include "llvm/Analysis/ScalarEvolutionExpressions.h" 20151497Sru#include "llvm/Analysis/TargetTransformInfo.h" 21151497Sru#include "llvm/Analysis/ValueTracking.h" 22151497Sru#include "llvm/IR/Constants.h" 23151497Sru#include "llvm/IR/GetElementPtrTypeIterator.h" 24151497Sru#include "llvm/IR/IRBuilder.h" 25151497Sru#include "llvm/IR/PatternMatch.h" 26151497Sru#include "llvm/IR/Value.h" 27151497Sru#include "llvm/Support/CommandLine.h" 28151497Sru 29151497Sru#define DEBUG_TYPE "vectorutils" 30151497Sru 31151497Sruusing namespace llvm; 32151497Sruusing namespace llvm::PatternMatch; 33151497Sru 34151497Sru/// Maximum factor for an interleaved memory access. 35151497Srustatic cl::opt<unsigned> MaxInterleaveGroupFactor( 36151497Sru "max-interleave-group-factor", cl::Hidden, 37151497Sru cl::desc("Maximum factor for an interleaved access group (default = 8)"), 38151497Sru cl::init(8)); 39151497Sru 40151497Sru/// Return true if all of the intrinsic's arguments and return type are scalars 41151497Sru/// for the scalar form of the intrinsic, and vectors for the vector form of the 42151497Sru/// intrinsic (except operands that are marked as always being scalar by 43151497Sru/// isVectorIntrinsicWithScalarOpAtArg). 44151497Srubool llvm::isTriviallyVectorizable(Intrinsic::ID ID) { 45151497Sru switch (ID) { 46151497Sru case Intrinsic::abs: // Begin integer bit-manipulation. 47151497Sru case Intrinsic::bswap: 48151497Sru case Intrinsic::bitreverse: 49151497Sru case Intrinsic::ctpop: 50151497Sru case Intrinsic::ctlz: 51151497Sru case Intrinsic::cttz: 52151497Sru case Intrinsic::fshl: 53151497Sru case Intrinsic::fshr: 54151497Sru case Intrinsic::smax: 55151497Sru case Intrinsic::smin: 56151497Sru case Intrinsic::umax: 57151497Sru case Intrinsic::umin: 58151497Sru case Intrinsic::sadd_sat: 59151497Sru case Intrinsic::ssub_sat: 60151497Sru case Intrinsic::uadd_sat: 61151497Sru case Intrinsic::usub_sat: 62151497Sru case Intrinsic::smul_fix: 63151497Sru case Intrinsic::smul_fix_sat: 64151497Sru case Intrinsic::umul_fix: 65151497Sru case Intrinsic::umul_fix_sat: 66151497Sru case Intrinsic::sqrt: // Begin floating-point. 67151497Sru case Intrinsic::sin: 68151497Sru case Intrinsic::cos: 69151497Sru case Intrinsic::exp: 70151497Sru case Intrinsic::exp2: 71151497Sru case Intrinsic::log: 72151497Sru case Intrinsic::log10: 73151497Sru case Intrinsic::log2: 74151497Sru case Intrinsic::fabs: 75151497Sru case Intrinsic::minnum: 76151497Sru case Intrinsic::maxnum: 77151497Sru case Intrinsic::minimum: 78151497Sru case Intrinsic::maximum: 79151497Sru case Intrinsic::copysign: 80151497Sru case Intrinsic::floor: 81151497Sru case Intrinsic::ceil: 82151497Sru case Intrinsic::trunc: 83151497Sru case Intrinsic::rint: 84151497Sru case Intrinsic::nearbyint: 85151497Sru case Intrinsic::round: 86151497Sru case Intrinsic::roundeven: 87151497Sru case Intrinsic::pow: 88151497Sru case Intrinsic::fma: 89151497Sru case Intrinsic::fmuladd: 90151497Sru case Intrinsic::powi: 91151497Sru case Intrinsic::canonicalize: 92151497Sru case Intrinsic::fptosi_sat: 93151497Sru case Intrinsic::fptoui_sat: 94151497Sru return true; 95151497Sru default: 96151497Sru return false; 97151497Sru } 98151497Sru} 99151497Sru 100151497Sru/// Identifies if the vector form of the intrinsic has a scalar operand. 101151497Srubool llvm::isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, 102151497Sru unsigned ScalarOpdIdx) { 103151497Sru switch (ID) { 104151497Sru case Intrinsic::abs: 105151497Sru case Intrinsic::ctlz: 106151497Sru case Intrinsic::cttz: 107151497Sru case Intrinsic::powi: 108151497Sru return (ScalarOpdIdx == 1); 109151497Sru case Intrinsic::smul_fix: 110151497Sru case Intrinsic::smul_fix_sat: 111151497Sru case Intrinsic::umul_fix: 112151497Sru case Intrinsic::umul_fix_sat: 113151497Sru return (ScalarOpdIdx == 2); 114151497Sru default: 115151497Sru return false; 116151497Sru } 117151497Sru} 118151497Sru 119151497Srubool llvm::isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, 120151497Sru unsigned OpdIdx) { 121151497Sru switch (ID) { 122151497Sru case Intrinsic::fptosi_sat: 123151497Sru case Intrinsic::fptoui_sat: 124151497Sru return OpdIdx == 0; 125151497Sru case Intrinsic::powi: 126151497Sru return OpdIdx == 1; 127151497Sru default: 128151497Sru return false; 129151497Sru } 130151497Sru} 131151497Sru 132151497Sru/// Returns intrinsic ID for call. 133151497Sru/// For the input call instruction it finds mapping intrinsic and returns 134151497Sru/// its ID, in case it does not found it return not_intrinsic. 135151497SruIntrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI, 136151497Sru const TargetLibraryInfo *TLI) { 137151497Sru Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI); 138151497Sru if (ID == Intrinsic::not_intrinsic) 139151497Sru return Intrinsic::not_intrinsic; 140151497Sru 141151497Sru if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start || 142151497Sru ID == Intrinsic::lifetime_end || ID == Intrinsic::assume || 143151497Sru ID == Intrinsic::experimental_noalias_scope_decl || 144151497Sru ID == Intrinsic::sideeffect || ID == Intrinsic::pseudoprobe) 145151497Sru return ID; 146151497Sru return Intrinsic::not_intrinsic; 147151497Sru} 148151497Sru 149151497Sru/// Find the operand of the GEP that should be checked for consecutive 150151497Sru/// stores. This ignores trailing indices that have no effect on the final 151151497Sru/// pointer. 152151497Sruunsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) { 153151497Sru const DataLayout &DL = Gep->getModule()->getDataLayout(); 154151497Sru unsigned LastOperand = Gep->getNumOperands() - 1; 155151497Sru TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType()); 156151497Sru 157151497Sru // Walk backwards and try to peel off zeros. 158151497Sru while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) { 159151497Sru // Find the type we're currently indexing into. 160151497Sru gep_type_iterator GEPTI = gep_type_begin(Gep); 161151497Sru std::advance(GEPTI, LastOperand - 2); 162151497Sru 163151497Sru // If it's a type with the same allocation size as the result of the GEP we 164151497Sru // can peel off the zero index. 165151497Sru if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize) 166151497Sru break; 167151497Sru --LastOperand; 168151497Sru } 169151497Sru 170151497Sru return LastOperand; 171151497Sru} 172151497Sru 173151497Sru/// If the argument is a GEP, then returns the operand identified by 174151497Sru/// getGEPInductionOperand. However, if there is some other non-loop-invariant 175151497Sru/// operand, it returns that instead. 176151497SruValue *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { 177151497Sru GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 178151497Sru if (!GEP) 179151497Sru return Ptr; 180151497Sru 181151497Sru unsigned InductionOperand = getGEPInductionOperand(GEP); 182151497Sru 183151497Sru // Check that all of the gep indices are uniform except for our induction 184151497Sru // operand. 185151497Sru for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) 186151497Sru if (i != InductionOperand && 187151497Sru !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp)) 188151497Sru return Ptr; 189151497Sru return GEP->getOperand(InductionOperand); 190151497Sru} 191151497Sru 192151497Sru/// If a value has only one user that is a CastInst, return it. 193151497SruValue *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) { 194151497Sru Value *UniqueCast = nullptr; 195151497Sru for (User *U : Ptr->users()) { 196151497Sru CastInst *CI = dyn_cast<CastInst>(U); 197151497Sru if (CI && CI->getType() == Ty) { 198151497Sru if (!UniqueCast) 199151497Sru UniqueCast = CI; 200151497Sru else 201151497Sru return nullptr; 202151497Sru } 203151497Sru } 204151497Sru return UniqueCast; 205151497Sru} 206151497Sru 207151497Sru/// Get the stride of a pointer access in a loop. Looks for symbolic 208151497Sru/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise. 209151497SruValue *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { 210151497Sru auto *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 211151497Sru if (!PtrTy || PtrTy->isAggregateType()) 212151497Sru return nullptr; 213151497Sru 214151497Sru // Try to remove a gep instruction to make the pointer (actually index at this 215151497Sru // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the 216151497Sru // pointer, otherwise, we are analyzing the index. 217151497Sru Value *OrigPtr = Ptr; 218151497Sru 219151497Sru // The size of the pointer access. 220151497Sru int64_t PtrAccessSize = 1; 221151497Sru 222151497Sru Ptr = stripGetElementPtr(Ptr, SE, Lp); 223151497Sru const SCEV *V = SE->getSCEV(Ptr); 224151497Sru 225151497Sru if (Ptr != OrigPtr) 226151497Sru // Strip off casts. 227151497Sru while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V)) 228151497Sru V = C->getOperand(); 229151497Sru 230151497Sru const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V); 231151497Sru if (!S) 232151497Sru return nullptr; 233151497Sru 234151497Sru V = S->getStepRecurrence(*SE); 235151497Sru if (!V) 236151497Sru return nullptr; 237151497Sru 238151497Sru // Strip off the size of access multiplication if we are still analyzing the 239151497Sru // pointer. 240151497Sru if (OrigPtr == Ptr) { 241151497Sru if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) { 242151497Sru if (M->getOperand(0)->getSCEVType() != scConstant) 243151497Sru return nullptr; 244151497Sru 245151497Sru const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt(); 246151497Sru 247151497Sru // Huge step value - give up. 248151497Sru if (APStepVal.getBitWidth() > 64) 249151497Sru return nullptr; 250151497Sru 251151497Sru int64_t StepVal = APStepVal.getSExtValue(); 252151497Sru if (PtrAccessSize != StepVal) 253151497Sru return nullptr; 254151497Sru V = M->getOperand(1); 255151497Sru } 256151497Sru } 257151497Sru 258151497Sru // Strip off casts. 259151497Sru Type *StripedOffRecurrenceCast = nullptr; 260151497Sru if (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V)) { 261151497Sru StripedOffRecurrenceCast = C->getType(); 262151497Sru V = C->getOperand(); 263151497Sru } 264151497Sru 265151497Sru // Look for the loop invariant symbolic value. 266151497Sru const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V); 267151497Sru if (!U) 268151497Sru return nullptr; 269151497Sru 270151497Sru Value *Stride = U->getValue(); 271151497Sru if (!Lp->isLoopInvariant(Stride)) 272151497Sru return nullptr; 273151497Sru 274151497Sru // If we have stripped off the recurrence cast we have to make sure that we 275151497Sru // return the value that is used in this loop so that we can replace it later. 276151497Sru if (StripedOffRecurrenceCast) 277151497Sru Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast); 278151497Sru 279151497Sru return Stride; 280151497Sru} 281151497Sru 282151497Sru/// Given a vector and an element number, see if the scalar value is 283151497Sru/// already around as a register, for example if it were inserted then extracted 284151497Sru/// from the vector. 285151497SruValue *llvm::findScalarElement(Value *V, unsigned EltNo) { 286151497Sru assert(V->getType()->isVectorTy() && "Not looking at a vector?"); 287151497Sru VectorType *VTy = cast<VectorType>(V->getType()); 288151497Sru // For fixed-length vector, return undef for out of range access. 289151497Sru if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) { 290151497Sru unsigned Width = FVTy->getNumElements(); 291151497Sru if (EltNo >= Width) 292151497Sru return UndefValue::get(FVTy->getElementType()); 293151497Sru } 294151497Sru 295151497Sru if (Constant *C = dyn_cast<Constant>(V)) 296151497Sru return C->getAggregateElement(EltNo); 297151497Sru 298151497Sru if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) { 299151497Sru // If this is an insert to a variable element, we don't know what it is. 300151497Sru if (!isa<ConstantInt>(III->getOperand(2))) 301151497Sru return nullptr; 302151497Sru unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue(); 303151497Sru 304151497Sru // If this is an insert to the element we are looking for, return the 305151497Sru // inserted value. 306151497Sru if (EltNo == IIElt) 307151497Sru return III->getOperand(1); 308151497Sru 309151497Sru // Guard against infinite loop on malformed, unreachable IR. 310151497Sru if (III == III->getOperand(0)) 311151497Sru return nullptr; 312151497Sru 313151497Sru // Otherwise, the insertelement doesn't modify the value, recurse on its 314151497Sru // vector input. 315151497Sru return findScalarElement(III->getOperand(0), EltNo); 316151497Sru } 317151497Sru 318151497Sru ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V); 319151497Sru // Restrict the following transformation to fixed-length vector. 320151497Sru if (SVI && isa<FixedVectorType>(SVI->getType())) { 321151497Sru unsigned LHSWidth = 322151497Sru cast<FixedVectorType>(SVI->getOperand(0)->getType())->getNumElements(); 323151497Sru int InEl = SVI->getMaskValue(EltNo); 324151497Sru if (InEl < 0) 325151497Sru return UndefValue::get(VTy->getElementType()); 326151497Sru if (InEl < (int)LHSWidth) 327151497Sru return findScalarElement(SVI->getOperand(0), InEl); 328151497Sru return findScalarElement(SVI->getOperand(1), InEl - LHSWidth); 329151497Sru } 330151497Sru 331151497Sru // Extract a value from a vector add operation with a constant zero. 332151497Sru // TODO: Use getBinOpIdentity() to generalize this. 333151497Sru Value *Val; Constant *C; 334151497Sru if (match(V, m_Add(m_Value(Val), m_Constant(C)))) 335151497Sru if (Constant *Elt = C->getAggregateElement(EltNo)) 336151497Sru if (Elt->isNullValue()) 337151497Sru return findScalarElement(Val, EltNo); 338151497Sru 339151497Sru // If the vector is a splat then we can trivially find the scalar element. 340151497Sru if (isa<ScalableVectorType>(VTy)) 341151497Sru if (Value *Splat = getSplatValue(V)) 342151497Sru if (EltNo < VTy->getElementCount().getKnownMinValue()) 343151497Sru return Splat; 344151497Sru 345151497Sru // Otherwise, we don't know. 346151497Sru return nullptr; 347151497Sru} 348151497Sru 349151497Sruint llvm::getSplatIndex(ArrayRef<int> Mask) { 350151497Sru int SplatIndex = -1; 351151497Sru for (int M : Mask) { 352151497Sru // Ignore invalid (undefined) mask elements. 353151497Sru if (M < 0) 354151497Sru continue; 355151497Sru 356151497Sru // There can be only 1 non-negative mask element value if this is a splat. 357151497Sru if (SplatIndex != -1 && SplatIndex != M) 358151497Sru return -1; 359151497Sru 360151497Sru // Initialize the splat index to the 1st non-negative mask element. 361151497Sru SplatIndex = M; 362151497Sru } 363151497Sru assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?"); 364151497Sru return SplatIndex; 365151497Sru} 366151497Sru 367151497Sru/// Get splat value if the input is a splat vector or return nullptr. 368151497Sru/// This function is not fully general. It checks only 2 cases: 369151497Sru/// the input value is (1) a splat constant vector or (2) a sequence 370151497Sru/// of instructions that broadcasts a scalar at element 0. 371151497SruValue *llvm::getSplatValue(const Value *V) { 372151497Sru if (isa<VectorType>(V->getType())) 373151497Sru if (auto *C = dyn_cast<Constant>(V)) 374151497Sru return C->getSplatValue(); 375151497Sru 376151497Sru // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...> 377151497Sru Value *Splat; 378151497Sru if (match(V, 379151497Sru m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat), m_ZeroInt()), 380151497Sru m_Value(), m_ZeroMask()))) 381151497Sru return Splat; 382151497Sru 383151497Sru return nullptr; 384151497Sru} 385151497Sru 386151497Srubool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) { 387151497Sru assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); 388151497Sru 389151497Sru if (isa<VectorType>(V->getType())) { 390151497Sru if (isa<UndefValue>(V)) 391151497Sru return true; 392151497Sru // FIXME: We can allow undefs, but if Index was specified, we may want to 393151497Sru // check that the constant is defined at that index. 394151497Sru if (auto *C = dyn_cast<Constant>(V)) 395151497Sru return C->getSplatValue() != nullptr; 396151497Sru } 397151497Sru 398151497Sru if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) { 399151497Sru // FIXME: We can safely allow undefs here. If Index was specified, we will 400151497Sru // check that the mask elt is defined at the required index. 401151497Sru if (!all_equal(Shuf->getShuffleMask())) 402151497Sru return false; 403151497Sru 404151497Sru // Match any index. 405151497Sru if (Index == -1) 406151497Sru return true; 407151497Sru 408151497Sru // Match a specific element. The mask should be defined at and match the 409151497Sru // specified index. 410151497Sru return Shuf->getMaskValue(Index) == Index; 411151497Sru } 412151497Sru 413151497Sru // The remaining tests are all recursive, so bail out if we hit the limit. 414151497Sru if (Depth++ == MaxAnalysisRecursionDepth) 415151497Sru return false; 416151497Sru 417151497Sru // If both operands of a binop are splats, the result is a splat. 418151497Sru Value *X, *Y, *Z; 419151497Sru if (match(V, m_BinOp(m_Value(X), m_Value(Y)))) 420151497Sru return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth); 421151497Sru 422151497Sru // If all operands of a select are splats, the result is a splat. 423151497Sru if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z)))) 424151497Sru return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) && 425151497Sru isSplatValue(Z, Index, Depth); 426151497Sru 427151497Sru // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops). 428151497Sru 429151497Sru return false; 430151497Sru} 431151497Sru 432151497Srubool llvm::getShuffleDemandedElts(int SrcWidth, ArrayRef<int> Mask, 433151497Sru const APInt &DemandedElts, APInt &DemandedLHS, 434151497Sru APInt &DemandedRHS, bool AllowUndefElts) { 435151497Sru DemandedLHS = DemandedRHS = APInt::getZero(SrcWidth); 436151497Sru 437151497Sru // Early out if we don't demand any elements. 438151497Sru if (DemandedElts.isZero()) 439151497Sru return true; 440151497Sru 441151497Sru // Simple case of a shuffle with zeroinitializer. 442151497Sru if (all_of(Mask, [](int Elt) { return Elt == 0; })) { 443151497Sru DemandedLHS.setBit(0); 444151497Sru return true; 445151497Sru } 446151497Sru 447151497Sru for (unsigned I = 0, E = Mask.size(); I != E; ++I) { 448151497Sru int M = Mask[I]; 449151497Sru assert((-1 <= M) && (M < (SrcWidth * 2)) && 450151497Sru "Invalid shuffle mask constant"); 451151497Sru 452151497Sru if (!DemandedElts[I] || (AllowUndefElts && (M < 0))) 453151497Sru continue; 454151497Sru 455151497Sru // For undef elements, we don't know anything about the common state of 456151497Sru // the shuffle result. 457151497Sru if (M < 0) 458151497Sru return false; 459151497Sru 460151497Sru if (M < SrcWidth) 461151497Sru DemandedLHS.setBit(M); 462151497Sru else 463151497Sru DemandedRHS.setBit(M - SrcWidth); 464151497Sru } 465151497Sru 466151497Sru return true; 467151497Sru} 468151497Sru 469151497Sruvoid llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask, 470151497Sru SmallVectorImpl<int> &ScaledMask) { 471151497Sru assert(Scale > 0 && "Unexpected scaling factor"); 472151497Sru 473151497Sru // Fast-path: if no scaling, then it is just a copy. 474151497Sru if (Scale == 1) { 475151497Sru ScaledMask.assign(Mask.begin(), Mask.end()); 476151497Sru return; 477151497Sru } 478151497Sru 479151497Sru ScaledMask.clear(); 480151497Sru for (int MaskElt : Mask) { 481151497Sru if (MaskElt >= 0) { 482151497Sru assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= INT32_MAX && 483151497Sru "Overflowed 32-bits"); 484151497Sru } 485151497Sru for (int SliceElt = 0; SliceElt != Scale; ++SliceElt) 486151497Sru ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt); 487151497Sru } 488151497Sru} 489151497Sru 490151497Srubool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask, 491151497Sru SmallVectorImpl<int> &ScaledMask) { 492151497Sru assert(Scale > 0 && "Unexpected scaling factor"); 493151497Sru 494151497Sru // Fast-path: if no scaling, then it is just a copy. 495151497Sru if (Scale == 1) { 496151497Sru ScaledMask.assign(Mask.begin(), Mask.end()); 497151497Sru return true; 498151497Sru } 499151497Sru 500151497Sru // We must map the original elements down evenly to a type with less elements. 501151497Sru int NumElts = Mask.size(); 502151497Sru if (NumElts % Scale != 0) 503151497Sru return false; 504151497Sru 505151497Sru ScaledMask.clear(); 506151497Sru ScaledMask.reserve(NumElts / Scale); 507151497Sru 508151497Sru // Step through the input mask by splitting into Scale-sized slices. 509151497Sru do { 510151497Sru ArrayRef<int> MaskSlice = Mask.take_front(Scale); 511151497Sru assert((int)MaskSlice.size() == Scale && "Expected Scale-sized slice."); 512151497Sru 513151497Sru // The first element of the slice determines how we evaluate this slice. 514151497Sru int SliceFront = MaskSlice.front(); 515151497Sru if (SliceFront < 0) { 516151497Sru // Negative values (undef or other "sentinel" values) must be equal across 517151497Sru // the entire slice. 518151497Sru if (!all_equal(MaskSlice)) 519151497Sru return false; 520151497Sru ScaledMask.push_back(SliceFront); 521151497Sru } else { 522151497Sru // A positive mask element must be cleanly divisible. 523151497Sru if (SliceFront % Scale != 0) 524151497Sru return false; 525151497Sru // Elements of the slice must be consecutive. 526151497Sru for (int i = 1; i < Scale; ++i) 527151497Sru if (MaskSlice[i] != SliceFront + i) 528151497Sru return false; 529151497Sru ScaledMask.push_back(SliceFront / Scale); 530151497Sru } 531151497Sru Mask = Mask.drop_front(Scale); 532151497Sru } while (!Mask.empty()); 533151497Sru 534151497Sru assert((int)ScaledMask.size() * Scale == NumElts && "Unexpected scaled mask"); 535151497Sru 536151497Sru // All elements of the original mask can be scaled down to map to the elements 537151497Sru // of a mask with wider elements. 538151497Sru return true; 539151497Sru} 540151497Sru 541151497Sruvoid llvm::getShuffleMaskWithWidestElts(ArrayRef<int> Mask, 542151497Sru SmallVectorImpl<int> &ScaledMask) { 543151497Sru std::array<SmallVector<int, 16>, 2> TmpMasks; 544151497Sru SmallVectorImpl<int> *Output = &TmpMasks[0], *Tmp = &TmpMasks[1]; 545151497Sru ArrayRef<int> InputMask = Mask; 546151497Sru for (unsigned Scale = 2; Scale <= InputMask.size(); ++Scale) { 547151497Sru while (widenShuffleMaskElts(Scale, InputMask, *Output)) { 548151497Sru InputMask = *Output; 549151497Sru std::swap(Output, Tmp); 550151497Sru } 551151497Sru } 552151497Sru ScaledMask.assign(InputMask.begin(), InputMask.end()); 553151497Sru} 554151497Sru 555151497Sruvoid llvm::processShuffleMasks( 556151497Sru ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, 557151497Sru unsigned NumOfUsedRegs, function_ref<void()> NoInputAction, 558151497Sru function_ref<void(ArrayRef<int>, unsigned, unsigned)> SingleInputAction, 559151497Sru function_ref<void(ArrayRef<int>, unsigned, unsigned)> ManyInputsAction) { 560151497Sru SmallVector<SmallVector<SmallVector<int>>> Res(NumOfDestRegs); 561151497Sru // Try to perform better estimation of the permutation. 562151497Sru // 1. Split the source/destination vectors into real registers. 563151497Sru // 2. Do the mask analysis to identify which real registers are 564151497Sru // permuted. 565151497Sru int Sz = Mask.size(); 566151497Sru unsigned SzDest = Sz / NumOfDestRegs; 567151497Sru unsigned SzSrc = Sz / NumOfSrcRegs; 568151497Sru for (unsigned I = 0; I < NumOfDestRegs; ++I) { 569151497Sru auto &RegMasks = Res[I]; 570151497Sru RegMasks.assign(NumOfSrcRegs, {}); 571151497Sru // Check that the values in dest registers are in the one src 572151497Sru // register. 573151497Sru for (unsigned K = 0; K < SzDest; ++K) { 574151497Sru int Idx = I * SzDest + K; 575151497Sru if (Idx == Sz) 576151497Sru break; 577151497Sru if (Mask[Idx] >= Sz || Mask[Idx] == UndefMaskElem) 578151497Sru continue; 579151497Sru int SrcRegIdx = Mask[Idx] / SzSrc; 580151497Sru // Add a cost of PermuteTwoSrc for each new source register permute, 581151497Sru // if we have more than one source registers. 582151497Sru if (RegMasks[SrcRegIdx].empty()) 583151497Sru RegMasks[SrcRegIdx].assign(SzDest, UndefMaskElem); 584151497Sru RegMasks[SrcRegIdx][K] = Mask[Idx] % SzSrc; 585151497Sru } 586151497Sru } 587151497Sru // Process split mask. 588151497Sru for (unsigned I = 0; I < NumOfUsedRegs; ++I) { 589151497Sru auto &Dest = Res[I]; 590151497Sru int NumSrcRegs = 591151497Sru count_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); }); 592151497Sru switch (NumSrcRegs) { 593151497Sru case 0: 594151497Sru // No input vectors were used! 595151497Sru NoInputAction(); 596151497Sru break; 597151497Sru case 1: { 598151497Sru // Find the only mask with at least single undef mask elem. 599151497Sru auto *It = 600151497Sru find_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); }); 601151497Sru unsigned SrcReg = std::distance(Dest.begin(), It); 602151497Sru SingleInputAction(*It, SrcReg, I); 603151497Sru break; 604151497Sru } 605151497Sru default: { 606151497Sru // The first mask is a permutation of a single register. Since we have >2 607151497Sru // input registers to shuffle, we merge the masks for 2 first registers 608151497Sru // and generate a shuffle of 2 registers rather than the reordering of the 609151497Sru // first register and then shuffle with the second register. Next, 610151497Sru // generate the shuffles of the resulting register + the remaining 611151497Sru // registers from the list. 612151497Sru auto &&CombineMasks = [](MutableArrayRef<int> FirstMask, 613151497Sru ArrayRef<int> SecondMask) { 614151497Sru for (int Idx = 0, VF = FirstMask.size(); Idx < VF; ++Idx) { 615151497Sru if (SecondMask[Idx] != UndefMaskElem) { 616151497Sru assert(FirstMask[Idx] == UndefMaskElem && 617151497Sru "Expected undefined mask element."); 618151497Sru FirstMask[Idx] = SecondMask[Idx] + VF; 619151497Sru } 620151497Sru } 621151497Sru }; 622151497Sru auto &&NormalizeMask = [](MutableArrayRef<int> Mask) { 623151497Sru for (int Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) { 624151497Sru if (Mask[Idx] != UndefMaskElem) 625151497Sru Mask[Idx] = Idx; 626151497Sru } 627151497Sru }; 628151497Sru int SecondIdx; 629151497Sru do { 630151497Sru int FirstIdx = -1; 631151497Sru SecondIdx = -1; 632151497Sru MutableArrayRef<int> FirstMask, SecondMask; 633151497Sru for (unsigned I = 0; I < NumOfDestRegs; ++I) { 634151497Sru SmallVectorImpl<int> &RegMask = Dest[I]; 635151497Sru if (RegMask.empty()) 636151497Sru continue; 637151497Sru 638151497Sru if (FirstIdx == SecondIdx) { 639151497Sru FirstIdx = I; 640151497Sru FirstMask = RegMask; 641151497Sru continue; 642151497Sru } 643151497Sru SecondIdx = I; 644151497Sru SecondMask = RegMask; 645151497Sru CombineMasks(FirstMask, SecondMask); 646151497Sru ManyInputsAction(FirstMask, FirstIdx, SecondIdx); 647151497Sru NormalizeMask(FirstMask); 648151497Sru RegMask.clear(); 649151497Sru SecondMask = FirstMask; 650151497Sru SecondIdx = FirstIdx; 651151497Sru } 652151497Sru if (FirstIdx != SecondIdx && SecondIdx >= 0) { 653151497Sru CombineMasks(SecondMask, FirstMask); 654151497Sru ManyInputsAction(SecondMask, SecondIdx, FirstIdx); 655151497Sru Dest[FirstIdx].clear(); 656151497Sru NormalizeMask(SecondMask); 657151497Sru } 658151497Sru } while (SecondIdx >= 0); 659151497Sru break; 660151497Sru } 661151497Sru } 662151497Sru } 663151497Sru} 664151497Sru 665151497SruMapVector<Instruction *, uint64_t> 666151497Srullvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB, 667151497Sru const TargetTransformInfo *TTI) { 668151497Sru 669151497Sru // DemandedBits will give us every value's live-out bits. But we want 670151497Sru // to ensure no extra casts would need to be inserted, so every DAG 671151497Sru // of connected values must have the same minimum bitwidth. 672151497Sru EquivalenceClasses<Value *> ECs; 673151497Sru SmallVector<Value *, 16> Worklist; 674151497Sru SmallPtrSet<Value *, 4> Roots; 675151497Sru SmallPtrSet<Value *, 16> Visited; 676151497Sru DenseMap<Value *, uint64_t> DBits; 677151497Sru SmallPtrSet<Instruction *, 4> InstructionSet; 678151497Sru MapVector<Instruction *, uint64_t> MinBWs; 679151497Sru 680151497Sru // Determine the roots. We work bottom-up, from truncs or icmps. 681151497Sru bool SeenExtFromIllegalType = false; 682151497Sru for (auto *BB : Blocks) 683151497Sru for (auto &I : *BB) { 684151497Sru InstructionSet.insert(&I); 685151497Sru 686151497Sru if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) && 687151497Sru !TTI->isTypeLegal(I.getOperand(0)->getType())) 688151497Sru SeenExtFromIllegalType = true; 689151497Sru 690151497Sru // Only deal with non-vector integers up to 64-bits wide. 691151497Sru if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) && 692151497Sru !I.getType()->isVectorTy() && 693151497Sru I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) { 694151497Sru // Don't make work for ourselves. If we know the loaded type is legal, 695151497Sru // don't add it to the worklist. 696151497Sru if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType())) 697151497Sru continue; 698151497Sru 699151497Sru Worklist.push_back(&I); 700151497Sru Roots.insert(&I); 701151497Sru } 702151497Sru } 703151497Sru // Early exit. 704151497Sru if (Worklist.empty() || (TTI && !SeenExtFromIllegalType)) 705151497Sru return MinBWs; 706151497Sru 707151497Sru // Now proceed breadth-first, unioning values together. 708151497Sru while (!Worklist.empty()) { 709151497Sru Value *Val = Worklist.pop_back_val(); 710151497Sru Value *Leader = ECs.getOrInsertLeaderValue(Val); 711151497Sru 712151497Sru if (!Visited.insert(Val).second) 713151497Sru continue; 714151497Sru 715151497Sru // Non-instructions terminate a chain successfully. 716151497Sru if (!isa<Instruction>(Val)) 717151497Sru continue; 718151497Sru Instruction *I = cast<Instruction>(Val); 719151497Sru 720151497Sru // If we encounter a type that is larger than 64 bits, we can't represent 721151497Sru // it so bail out. 722151497Sru if (DB.getDemandedBits(I).getBitWidth() > 64) 723151497Sru return MapVector<Instruction *, uint64_t>(); 724151497Sru 725151497Sru uint64_t V = DB.getDemandedBits(I).getZExtValue(); 726151497Sru DBits[Leader] |= V; 727151497Sru DBits[I] = V; 728151497Sru 729151497Sru // Casts, loads and instructions outside of our range terminate a chain 730151497Sru // successfully. 731151497Sru if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) || 732151497Sru !InstructionSet.count(I)) 733151497Sru continue; 734151497Sru 735151497Sru // Unsafe casts terminate a chain unsuccessfully. We can't do anything 736151497Sru // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to 737151497Sru // transform anything that relies on them. 738151497Sru if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) || 739151497Sru !I->getType()->isIntegerTy()) { 740151497Sru DBits[Leader] |= ~0ULL; 741151497Sru continue; 742151497Sru } 743151497Sru 744151497Sru // We don't modify the types of PHIs. Reductions will already have been 745151497Sru // truncated if possible, and inductions' sizes will have been chosen by 746151497Sru // indvars. 747151497Sru if (isa<PHINode>(I)) 748151497Sru continue; 749151497Sru 750151497Sru if (DBits[Leader] == ~0ULL) 751151497Sru // All bits demanded, no point continuing. 752151497Sru continue; 753151497Sru 754151497Sru for (Value *O : cast<User>(I)->operands()) { 755151497Sru ECs.unionSets(Leader, O); 756151497Sru Worklist.push_back(O); 757151497Sru } 758151497Sru } 759151497Sru 760151497Sru // Now we've discovered all values, walk them to see if there are 761151497Sru // any users we didn't see. If there are, we can't optimize that 762151497Sru // chain. 763151497Sru for (auto &I : DBits) 764151497Sru for (auto *U : I.first->users()) 765151497Sru if (U->getType()->isIntegerTy() && DBits.count(U) == 0) 766151497Sru DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL; 767151497Sru 768151497Sru for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) { 769151497Sru uint64_t LeaderDemandedBits = 0; 770151497Sru for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) 771151497Sru LeaderDemandedBits |= DBits[M]; 772151497Sru 773151497Sru uint64_t MinBW = (sizeof(LeaderDemandedBits) * 8) - 774151497Sru llvm::countLeadingZeros(LeaderDemandedBits); 775151497Sru // Round up to a power of 2 776151497Sru if (!isPowerOf2_64((uint64_t)MinBW)) 777151497Sru MinBW = NextPowerOf2(MinBW); 778151497Sru 779151497Sru // We don't modify the types of PHIs. Reductions will already have been 780151497Sru // truncated if possible, and inductions' sizes will have been chosen by 781151497Sru // indvars. 782151497Sru // If we are required to shrink a PHI, abandon this entire equivalence class. 783151497Sru bool Abort = false; 784151497Sru for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) 785151497Sru if (isa<PHINode>(M) && MinBW < M->getType()->getScalarSizeInBits()) { 786151497Sru Abort = true; 787151497Sru break; 788151497Sru } 789151497Sru if (Abort) 790151497Sru continue; 791151497Sru 792151497Sru for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) { 793151497Sru if (!isa<Instruction>(M)) 794151497Sru continue; 795151497Sru Type *Ty = M->getType(); 796151497Sru if (Roots.count(M)) 797151497Sru Ty = cast<Instruction>(M)->getOperand(0)->getType(); 798151497Sru if (MinBW < Ty->getScalarSizeInBits()) 799151497Sru MinBWs[cast<Instruction>(M)] = MinBW; 800151497Sru } 801151497Sru } 802151497Sru 803151497Sru return MinBWs; 804151497Sru} 805151497Sru 806151497Sru/// Add all access groups in @p AccGroups to @p List. 807151497Srutemplate <typename ListT> 808151497Srustatic void addToAccessGroupList(ListT &List, MDNode *AccGroups) { 809151497Sru // Interpret an access group as a list containing itself. 810151497Sru if (AccGroups->getNumOperands() == 0) { 811151497Sru assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group"); 812151497Sru List.insert(AccGroups); 813151497Sru return; 814151497Sru } 815151497Sru 816151497Sru for (const auto &AccGroupListOp : AccGroups->operands()) { 817151497Sru auto *Item = cast<MDNode>(AccGroupListOp.get()); 818151497Sru assert(isValidAsAccessGroup(Item) && "List item must be an access group"); 819151497Sru List.insert(Item); 820151497Sru } 821151497Sru} 822151497Sru 823151497SruMDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) { 824151497Sru if (!AccGroups1) 825151497Sru return AccGroups2; 826151497Sru if (!AccGroups2) 827151497Sru return AccGroups1; 828151497Sru if (AccGroups1 == AccGroups2) 829151497Sru return AccGroups1; 830151497Sru 831151497Sru SmallSetVector<Metadata *, 4> Union; 832151497Sru addToAccessGroupList(Union, AccGroups1); 833151497Sru addToAccessGroupList(Union, AccGroups2); 834151497Sru 835151497Sru if (Union.size() == 0) 836151497Sru return nullptr; 837151497Sru if (Union.size() == 1) 838151497Sru return cast<MDNode>(Union.front()); 839151497Sru 840151497Sru LLVMContext &Ctx = AccGroups1->getContext(); 841151497Sru return MDNode::get(Ctx, Union.getArrayRef()); 842151497Sru} 843151497Sru 844151497SruMDNode *llvm::intersectAccessGroups(const Instruction *Inst1, 845151497Sru const Instruction *Inst2) { 846151497Sru bool MayAccessMem1 = Inst1->mayReadOrWriteMemory(); 847151497Sru bool MayAccessMem2 = Inst2->mayReadOrWriteMemory(); 848151497Sru 849151497Sru if (!MayAccessMem1 && !MayAccessMem2) 850151497Sru return nullptr; 851151497Sru if (!MayAccessMem1) 852151497Sru return Inst2->getMetadata(LLVMContext::MD_access_group); 853151497Sru if (!MayAccessMem2) 854151497Sru return Inst1->getMetadata(LLVMContext::MD_access_group); 855151497Sru 856151497Sru MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group); 857151497Sru MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group); 858151497Sru if (!MD1 || !MD2) 859151497Sru return nullptr; 860151497Sru if (MD1 == MD2) 861151497Sru return MD1; 862151497Sru 863151497Sru // Use set for scalable 'contains' check. 864151497Sru SmallPtrSet<Metadata *, 4> AccGroupSet2; 865151497Sru addToAccessGroupList(AccGroupSet2, MD2); 866151497Sru 867151497Sru SmallVector<Metadata *, 4> Intersection; 868151497Sru if (MD1->getNumOperands() == 0) { 869151497Sru assert(isValidAsAccessGroup(MD1) && "Node must be an access group"); 870151497Sru if (AccGroupSet2.count(MD1)) 871151497Sru Intersection.push_back(MD1); 872151497Sru } else { 873151497Sru for (const MDOperand &Node : MD1->operands()) { 874151497Sru auto *Item = cast<MDNode>(Node.get()); 875151497Sru assert(isValidAsAccessGroup(Item) && "List item must be an access group"); 876151497Sru if (AccGroupSet2.count(Item)) 877151497Sru Intersection.push_back(Item); 878151497Sru } 879151497Sru } 880151497Sru 881151497Sru if (Intersection.size() == 0) 882151497Sru return nullptr; 883151497Sru if (Intersection.size() == 1) 884151497Sru return cast<MDNode>(Intersection.front()); 885151497Sru 886151497Sru LLVMContext &Ctx = Inst1->getContext(); 887151497Sru return MDNode::get(Ctx, Intersection); 888151497Sru} 889151497Sru 890151497Sru/// \returns \p I after propagating metadata from \p VL. 891151497SruInstruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) { 892151497Sru if (VL.empty()) 893151497Sru return Inst; 894151497Sru Instruction *I0 = cast<Instruction>(VL[0]); 895151497Sru SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 896151497Sru I0->getAllMetadataOtherThanDebugLoc(Metadata); 897151497Sru 898151497Sru for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 899151497Sru LLVMContext::MD_noalias, LLVMContext::MD_fpmath, 900151497Sru LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load, 901151497Sru LLVMContext::MD_access_group}) { 902151497Sru MDNode *MD = I0->getMetadata(Kind); 903151497Sru 904151497Sru for (int J = 1, E = VL.size(); MD && J != E; ++J) { 905151497Sru const Instruction *IJ = cast<Instruction>(VL[J]); 906151497Sru MDNode *IMD = IJ->getMetadata(Kind); 907151497Sru switch (Kind) { 908151497Sru case LLVMContext::MD_tbaa: 909151497Sru MD = MDNode::getMostGenericTBAA(MD, IMD); 910151497Sru break; 911151497Sru case LLVMContext::MD_alias_scope: 912151497Sru MD = MDNode::getMostGenericAliasScope(MD, IMD); 913151497Sru break; 914151497Sru case LLVMContext::MD_fpmath: 915151497Sru MD = MDNode::getMostGenericFPMath(MD, IMD); 916151497Sru break; 917151497Sru case LLVMContext::MD_noalias: 918151497Sru case LLVMContext::MD_nontemporal: 919151497Sru case LLVMContext::MD_invariant_load: 920151497Sru MD = MDNode::intersect(MD, IMD); 921151497Sru break; 922151497Sru case LLVMContext::MD_access_group: 923151497Sru MD = intersectAccessGroups(Inst, IJ); 924151497Sru break; 925151497Sru default: 926151497Sru llvm_unreachable("unhandled metadata"); 927151497Sru } 928151497Sru } 929151497Sru 930151497Sru Inst->setMetadata(Kind, MD); 931151497Sru } 932151497Sru 933151497Sru return Inst; 934151497Sru} 935151497Sru 936151497SruConstant * 937151497Srullvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF, 938151497Sru const InterleaveGroup<Instruction> &Group) { 939151497Sru // All 1's means mask is not needed. 940151497Sru if (Group.getNumMembers() == Group.getFactor()) 941151497Sru return nullptr; 942151497Sru 943151497Sru // TODO: support reversed access. 944151497Sru assert(!Group.isReverse() && "Reversed group not supported."); 945151497Sru 946151497Sru SmallVector<Constant *, 16> Mask; 947151497Sru for (unsigned i = 0; i < VF; i++) 948151497Sru for (unsigned j = 0; j < Group.getFactor(); ++j) { 949151497Sru unsigned HasMember = Group.getMember(j) ? 1 : 0; 950151497Sru Mask.push_back(Builder.getInt1(HasMember)); 951151497Sru } 952151497Sru 953151497Sru return ConstantVector::get(Mask); 954151497Sru} 955151497Sru 956151497Srullvm::SmallVector<int, 16> 957151497Srullvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) { 958151497Sru SmallVector<int, 16> MaskVec; 959151497Sru for (unsigned i = 0; i < VF; i++) 960151497Sru for (unsigned j = 0; j < ReplicationFactor; j++) 961151497Sru MaskVec.push_back(i); 962151497Sru 963151497Sru return MaskVec; 964151497Sru} 965151497Sru 966151497Srullvm::SmallVector<int, 16> llvm::createInterleaveMask(unsigned VF, 967151497Sru unsigned NumVecs) { 968151497Sru SmallVector<int, 16> Mask; 969151497Sru for (unsigned i = 0; i < VF; i++) 970151497Sru for (unsigned j = 0; j < NumVecs; j++) 971151497Sru Mask.push_back(j * VF + i); 972151497Sru 973151497Sru return Mask; 974151497Sru} 975151497Sru 976151497Srullvm::SmallVector<int, 16> 977151497Srullvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) { 978151497Sru SmallVector<int, 16> Mask; 979151497Sru for (unsigned i = 0; i < VF; i++) 980151497Sru Mask.push_back(Start + i * Stride); 981151497Sru 982151497Sru return Mask; 983151497Sru} 984151497Sru 985151497Srullvm::SmallVector<int, 16> llvm::createSequentialMask(unsigned Start, 986151497Sru unsigned NumInts, 987151497Sru unsigned NumUndefs) { 988151497Sru SmallVector<int, 16> Mask; 989151497Sru for (unsigned i = 0; i < NumInts; i++) 990151497Sru Mask.push_back(Start + i); 991151497Sru 992151497Sru for (unsigned i = 0; i < NumUndefs; i++) 993151497Sru Mask.push_back(-1); 994151497Sru 995151497Sru return Mask; 996151497Sru} 997151497Sru 998151497Srullvm::SmallVector<int, 16> llvm::createUnaryMask(ArrayRef<int> Mask, 999151497Sru unsigned NumElts) { 1000151497Sru // Avoid casts in the loop and make sure we have a reasonable number. 1001151497Sru int NumEltsSigned = NumElts; 1002151497Sru assert(NumEltsSigned > 0 && "Expected smaller or non-zero element count"); 1003151497Sru 1004151497Sru // If the mask chooses an element from operand 1, reduce it to choose from the 1005151497Sru // corresponding element of operand 0. Undef mask elements are unchanged. 1006151497Sru SmallVector<int, 16> UnaryMask; 1007151497Sru for (int MaskElt : Mask) { 1008151497Sru assert((MaskElt < NumEltsSigned * 2) && "Expected valid shuffle mask"); 1009151497Sru int UnaryElt = MaskElt >= NumEltsSigned ? MaskElt - NumEltsSigned : MaskElt; 1010151497Sru UnaryMask.push_back(UnaryElt); 1011151497Sru } 1012151497Sru return UnaryMask; 1013151497Sru} 1014151497Sru 1015151497Sru/// A helper function for concatenating vectors. This function concatenates two 1016151497Sru/// vectors having the same element type. If the second vector has fewer 1017151497Sru/// elements than the first, it is padded with undefs. 1018151497Srustatic Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1, 1019151497Sru Value *V2) { 1020151497Sru VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 1021151497Sru VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 1022151497Sru assert(VecTy1 && VecTy2 && 1023151497Sru VecTy1->getScalarType() == VecTy2->getScalarType() && 1024151497Sru "Expect two vectors with the same element type"); 1025151497Sru 1026151497Sru unsigned NumElts1 = cast<FixedVectorType>(VecTy1)->getNumElements(); 1027151497Sru unsigned NumElts2 = cast<FixedVectorType>(VecTy2)->getNumElements(); 1028151497Sru assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 1029151497Sru 1030151497Sru if (NumElts1 > NumElts2) { 1031151497Sru // Extend with UNDEFs. 1032151497Sru V2 = Builder.CreateShuffleVector( 1033151497Sru V2, createSequentialMask(0, NumElts2, NumElts1 - NumElts2)); 1034151497Sru } 1035151497Sru 1036151497Sru return Builder.CreateShuffleVector( 1037151497Sru V1, V2, createSequentialMask(0, NumElts1 + NumElts2, 0)); 1038151497Sru} 1039151497Sru 1040151497SruValue *llvm::concatenateVectors(IRBuilderBase &Builder, 1041151497Sru ArrayRef<Value *> Vecs) { 1042151497Sru unsigned NumVecs = Vecs.size(); 1043151497Sru assert(NumVecs > 1 && "Should be at least two vectors"); 1044151497Sru 1045151497Sru SmallVector<Value *, 8> ResList; 1046151497Sru ResList.append(Vecs.begin(), Vecs.end()); 1047151497Sru do { 1048151497Sru SmallVector<Value *, 8> TmpList; 1049151497Sru for (unsigned i = 0; i < NumVecs - 1; i += 2) { 1050151497Sru Value *V0 = ResList[i], *V1 = ResList[i + 1]; 1051151497Sru assert((V0->getType() == V1->getType() || i == NumVecs - 2) && 1052151497Sru "Only the last vector may have a different type"); 1053151497Sru 1054151497Sru TmpList.push_back(concatenateTwoVectors(Builder, V0, V1)); 1055151497Sru } 1056151497Sru 1057151497Sru // Push the last vector if the total number of vectors is odd. 1058151497Sru if (NumVecs % 2 != 0) 1059151497Sru TmpList.push_back(ResList[NumVecs - 1]); 1060151497Sru 1061151497Sru ResList = TmpList; 1062151497Sru NumVecs = ResList.size(); 1063151497Sru } while (NumVecs > 1); 1064151497Sru 1065151497Sru return ResList[0]; 1066151497Sru} 1067151497Sru 1068151497Srubool llvm::maskIsAllZeroOrUndef(Value *Mask) { 1069151497Sru assert(isa<VectorType>(Mask->getType()) && 1070151497Sru isa<IntegerType>(Mask->getType()->getScalarType()) && 1071151497Sru cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == 1072151497Sru 1 && 1073151497Sru "Mask must be a vector of i1"); 1074151497Sru 1075151497Sru auto *ConstMask = dyn_cast<Constant>(Mask); 1076151497Sru if (!ConstMask) 1077151497Sru return false; 1078151497Sru if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask)) 1079151497Sru return true; 1080151497Sru if (isa<ScalableVectorType>(ConstMask->getType())) 1081151497Sru return false; 1082151497Sru for (unsigned 1083151497Sru I = 0, 1084151497Sru E = cast<FixedVectorType>(ConstMask->getType())->getNumElements(); 1085151497Sru I != E; ++I) { 1086151497Sru if (auto *MaskElt = ConstMask->getAggregateElement(I)) 1087151497Sru if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt)) 1088151497Sru continue; 1089151497Sru return false; 1090151497Sru } 1091151497Sru return true; 1092151497Sru} 1093151497Sru 1094151497Srubool llvm::maskIsAllOneOrUndef(Value *Mask) { 1095151497Sru assert(isa<VectorType>(Mask->getType()) && 1096151497Sru isa<IntegerType>(Mask->getType()->getScalarType()) && 1097151497Sru cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == 1098151497Sru 1 && 1099151497Sru "Mask must be a vector of i1"); 1100151497Sru 1101151497Sru auto *ConstMask = dyn_cast<Constant>(Mask); 1102151497Sru if (!ConstMask) 1103151497Sru return false; 1104151497Sru if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask)) 1105151497Sru return true; 1106151497Sru if (isa<ScalableVectorType>(ConstMask->getType())) 1107151497Sru return false; 1108151497Sru for (unsigned 1109151497Sru I = 0, 1110151497Sru E = cast<FixedVectorType>(ConstMask->getType())->getNumElements(); 1111151497Sru I != E; ++I) { 1112151497Sru if (auto *MaskElt = ConstMask->getAggregateElement(I)) 1113151497Sru if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt)) 1114151497Sru continue; 1115151497Sru return false; 1116151497Sru } 1117151497Sru return true; 1118151497Sru} 1119151497Sru 1120151497Sru/// TODO: This is a lot like known bits, but for 1121151497Sru/// vectors. Is there something we can common this with? 1122151497SruAPInt llvm::possiblyDemandedEltsInMask(Value *Mask) { 1123151497Sru assert(isa<FixedVectorType>(Mask->getType()) && 1124151497Sru isa<IntegerType>(Mask->getType()->getScalarType()) && 1125151497Sru cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == 1126151497Sru 1 && 1127151497Sru "Mask must be a fixed width vector of i1"); 1128151497Sru 1129151497Sru const unsigned VWidth = 1130151497Sru cast<FixedVectorType>(Mask->getType())->getNumElements(); 1131151497Sru APInt DemandedElts = APInt::getAllOnes(VWidth); 1132151497Sru if (auto *CV = dyn_cast<ConstantVector>(Mask)) 1133151497Sru for (unsigned i = 0; i < VWidth; i++) 1134151497Sru if (CV->getAggregateElement(i)->isNullValue()) 1135151497Sru DemandedElts.clearBit(i); 1136151497Sru return DemandedElts; 1137151497Sru} 1138151497Sru 1139151497Srubool InterleavedAccessInfo::isStrided(int Stride) { 1140151497Sru unsigned Factor = std::abs(Stride); 1141151497Sru return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 1142151497Sru} 1143151497Sru 1144151497Sruvoid InterleavedAccessInfo::collectConstStrideAccesses( 1145151497Sru MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 1146151497Sru const ValueToValueMap &Strides) { 1147151497Sru auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 1148151497Sru 1149151497Sru // Since it's desired that the load/store instructions be maintained in 1150151497Sru // "program order" for the interleaved access analysis, we have to visit the 1151151497Sru // blocks in the loop in reverse postorder (i.e., in a topological order). 1152151497Sru // Such an ordering will ensure that any load/store that may be executed 1153151497Sru // before a second load/store will precede the second load/store in 1154151497Sru // AccessStrideInfo. 1155151497Sru LoopBlocksDFS DFS(TheLoop); 1156151497Sru DFS.perform(LI); 1157151497Sru for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 1158151497Sru for (auto &I : *BB) { 1159151497Sru Value *Ptr = getLoadStorePointerOperand(&I); 1160151497Sru if (!Ptr) 1161151497Sru continue; 1162151497Sru Type *ElementTy = getLoadStoreType(&I); 1163151497Sru 1164151497Sru // Currently, codegen doesn't support cases where the type size doesn't 1165151497Sru // match the alloc size. Skip them for now. 1166151497Sru uint64_t Size = DL.getTypeAllocSize(ElementTy); 1167151497Sru if (Size * 8 != DL.getTypeSizeInBits(ElementTy)) 1168151497Sru continue; 1169151497Sru 1170151497Sru // We don't check wrapping here because we don't know yet if Ptr will be 1171151497Sru // part of a full group or a group with gaps. Checking wrapping for all 1172151497Sru // pointers (even those that end up in groups with no gaps) will be overly 1173151497Sru // conservative. For full groups, wrapping should be ok since if we would 1174151497Sru // wrap around the address space we would do a memory access at nullptr 1175151497Sru // even without the transformation. The wrapping checks are therefore 1176151497Sru // deferred until after we've formed the interleaved groups. 1177151497Sru int64_t Stride = 1178151497Sru getPtrStride(PSE, ElementTy, Ptr, TheLoop, Strides, 1179151497Sru /*Assume=*/true, /*ShouldCheckWrap=*/false).value_or(0); 1180151497Sru 1181151497Sru const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 1182151497Sru AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, 1183151497Sru getLoadStoreAlignment(&I)); 1184151497Sru } 1185151497Sru} 1186151497Sru 1187151497Sru// Analyze interleaved accesses and collect them into interleaved load and 1188151497Sru// store groups. 1189151497Sru// 1190151497Sru// When generating code for an interleaved load group, we effectively hoist all 1191151497Sru// loads in the group to the location of the first load in program order. When 1192151497Sru// generating code for an interleaved store group, we sink all stores to the 1193151497Sru// location of the last store. This code motion can change the order of load 1194151497Sru// and store instructions and may break dependences. 1195151497Sru// 1196151497Sru// The code generation strategy mentioned above ensures that we won't violate 1197151497Sru// any write-after-read (WAR) dependences. 1198151497Sru// 1199151497Sru// E.g., for the WAR dependence: a = A[i]; // (1) 1200151497Sru// A[i] = b; // (2) 1201151497Sru// 1202151497Sru// The store group of (2) is always inserted at or below (2), and the load 1203151497Sru// group of (1) is always inserted at or above (1). Thus, the instructions will 1204151497Sru// never be reordered. All other dependences are checked to ensure the 1205151497Sru// correctness of the instruction reordering. 1206151497Sru// 1207151497Sru// The algorithm visits all memory accesses in the loop in bottom-up program 1208151497Sru// order. Program order is established by traversing the blocks in the loop in 1209151497Sru// reverse postorder when collecting the accesses. 1210151497Sru// 1211151497Sru// We visit the memory accesses in bottom-up order because it can simplify the 1212151497Sru// construction of store groups in the presence of write-after-write (WAW) 1213151497Sru// dependences. 1214151497Sru// 1215151497Sru// E.g., for the WAW dependence: A[i] = a; // (1) 1216151497Sru// A[i] = b; // (2) 1217151497Sru// A[i + 1] = c; // (3) 1218151497Sru// 1219151497Sru// We will first create a store group with (3) and (2). (1) can't be added to 1220151497Sru// this group because it and (2) are dependent. However, (1) can be grouped 1221151497Sru// with other accesses that may precede it in program order. Note that a 1222151497Sru// bottom-up order does not imply that WAW dependences should not be checked. 1223151497Sruvoid InterleavedAccessInfo::analyzeInterleaving( 1224151497Sru bool EnablePredicatedInterleavedMemAccesses) { 1225151497Sru LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 1226151497Sru const ValueToValueMap &Strides = LAI->getSymbolicStrides(); 1227151497Sru 1228151497Sru // Holds all accesses with a constant stride. 1229151497Sru MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 1230151497Sru collectConstStrideAccesses(AccessStrideInfo, Strides); 1231151497Sru 1232151497Sru if (AccessStrideInfo.empty()) 1233151497Sru return; 1234151497Sru 1235151497Sru // Collect the dependences in the loop. 1236151497Sru collectDependences(); 1237151497Sru 1238151497Sru // Holds all interleaved store groups temporarily. 1239151497Sru SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups; 1240151497Sru // Holds all interleaved load groups temporarily. 1241151497Sru SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups; 1242151497Sru 1243151497Sru // Search in bottom-up program order for pairs of accesses (A and B) that can 1244151497Sru // form interleaved load or store groups. In the algorithm below, access A 1245151497Sru // precedes access B in program order. We initialize a group for B in the 1246151497Sru // outer loop of the algorithm, and then in the inner loop, we attempt to 1247151497Sru // insert each A into B's group if: 1248151497Sru // 1249151497Sru // 1. A and B have the same stride, 1250151497Sru // 2. A and B have the same memory object size, and 1251151497Sru // 3. A belongs in B's group according to its distance from B. 1252151497Sru // 1253151497Sru // Special care is taken to ensure group formation will not break any 1254151497Sru // dependences. 1255151497Sru for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 1256151497Sru BI != E; ++BI) { 1257151497Sru Instruction *B = BI->first; 1258151497Sru StrideDescriptor DesB = BI->second; 1259151497Sru 1260151497Sru // Initialize a group for B if it has an allowable stride. Even if we don't 1261151497Sru // create a group for B, we continue with the bottom-up algorithm to ensure 1262151497Sru // we don't break any of B's dependences. 1263151497Sru InterleaveGroup<Instruction> *Group = nullptr; 1264151497Sru if (isStrided(DesB.Stride) && 1265151497Sru (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) { 1266151497Sru Group = getInterleaveGroup(B); 1267151497Sru if (!Group) { 1268151497Sru LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B 1269151497Sru << '\n'); 1270151497Sru Group = createInterleaveGroup(B, DesB.Stride, DesB.Alignment); 1271151497Sru } 1272151497Sru if (B->mayWriteToMemory()) 1273151497Sru StoreGroups.insert(Group); 1274151497Sru else 1275151497Sru LoadGroups.insert(Group); 1276151497Sru } 1277151497Sru 1278151497Sru for (auto AI = std::next(BI); AI != E; ++AI) { 1279151497Sru Instruction *A = AI->first; 1280151497Sru StrideDescriptor DesA = AI->second; 1281151497Sru 1282151497Sru // Our code motion strategy implies that we can't have dependences 1283151497Sru // between accesses in an interleaved group and other accesses located 1284151497Sru // between the first and last member of the group. Note that this also 1285151497Sru // means that a group can't have more than one member at a given offset. 1286151497Sru // The accesses in a group can have dependences with other accesses, but 1287151497Sru // we must ensure we don't extend the boundaries of the group such that 1288151497Sru // we encompass those dependent accesses. 1289151497Sru // 1290151497Sru // For example, assume we have the sequence of accesses shown below in a 1291151497Sru // stride-2 loop: 1292151497Sru // 1293151497Sru // (1, 2) is a group | A[i] = a; // (1) 1294151497Sru // | A[i-1] = b; // (2) | 1295151497Sru // A[i-3] = c; // (3) 1296151497Sru // A[i] = d; // (4) | (2, 4) is not a group 1297151497Sru // 1298151497Sru // Because accesses (2) and (3) are dependent, we can group (2) with (1) 1299151497Sru // but not with (4). If we did, the dependent access (3) would be within 1300151497Sru // the boundaries of the (2, 4) group. 1301151497Sru if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 1302151497Sru // If a dependence exists and A is already in a group, we know that A 1303151497Sru // must be a store since A precedes B and WAR dependences are allowed. 1304151497Sru // Thus, A would be sunk below B. We release A's group to prevent this 1305151497Sru // illegal code motion. A will then be free to form another group with 1306151497Sru // instructions that precede it. 1307151497Sru if (isInterleaved(A)) { 1308151497Sru InterleaveGroup<Instruction> *StoreGroup = getInterleaveGroup(A); 1309151497Sru 1310151497Sru LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to " 1311151497Sru "dependence between " << *A << " and "<< *B << '\n'); 1312151497Sru 1313151497Sru StoreGroups.remove(StoreGroup); 1314151497Sru releaseGroup(StoreGroup); 1315151497Sru } 1316151497Sru 1317151497Sru // If a dependence exists and A is not already in a group (or it was 1318151497Sru // and we just released it), B might be hoisted above A (if B is a 1319151497Sru // load) or another store might be sunk below A (if B is a store). In 1320151497Sru // either case, we can't add additional instructions to B's group. B 1321151497Sru // will only form a group with instructions that it precedes. 1322151497Sru break; 1323151497Sru } 1324151497Sru 1325151497Sru // At this point, we've checked for illegal code motion. If either A or B 1326151497Sru // isn't strided, there's nothing left to do. 1327151497Sru if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 1328151497Sru continue; 1329151497Sru 1330151497Sru // Ignore A if it's already in a group or isn't the same kind of memory 1331151497Sru // operation as B. 1332151497Sru // Note that mayReadFromMemory() isn't mutually exclusive to 1333151497Sru // mayWriteToMemory in the case of atomic loads. We shouldn't see those 1334151497Sru // here, canVectorizeMemory() should have returned false - except for the 1335151497Sru // case we asked for optimization remarks. 1336151497Sru if (isInterleaved(A) || 1337151497Sru (A->mayReadFromMemory() != B->mayReadFromMemory()) || 1338151497Sru (A->mayWriteToMemory() != B->mayWriteToMemory())) 1339151497Sru continue; 1340151497Sru 1341151497Sru // Check rules 1 and 2. Ignore A if its stride or size is different from 1342151497Sru // that of B. 1343151497Sru if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 1344151497Sru continue; 1345151497Sru 1346151497Sru // Ignore A if the memory object of A and B don't belong to the same 1347151497Sru // address space 1348151497Sru if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B)) 1349151497Sru continue; 1350151497Sru 1351151497Sru // Calculate the distance from A to B. 1352151497Sru const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 1353151497Sru PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 1354151497Sru if (!DistToB) 1355151497Sru continue; 1356151497Sru int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 1357151497Sru 1358151497Sru // Check rule 3. Ignore A if its distance to B is not a multiple of the 1359151497Sru // size. 1360151497Sru if (DistanceToB % static_cast<int64_t>(DesB.Size)) 1361151497Sru continue; 1362151497Sru 1363151497Sru // All members of a predicated interleave-group must have the same predicate, 1364151497Sru // and currently must reside in the same BB. 1365151497Sru BasicBlock *BlockA = A->getParent(); 1366151497Sru BasicBlock *BlockB = B->getParent(); 1367151497Sru if ((isPredicated(BlockA) || isPredicated(BlockB)) && 1368151497Sru (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB)) 1369151497Sru continue; 1370151497Sru 1371151497Sru // The index of A is the index of B plus A's distance to B in multiples 1372151497Sru // of the size. 1373151497Sru int IndexA = 1374151497Sru Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 1375151497Sru 1376151497Sru // Try to insert A into B's group. 1377151497Sru if (Group->insertMember(A, IndexA, DesA.Alignment)) { 1378151497Sru LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 1379151497Sru << " into the interleave group with" << *B 1380151497Sru << '\n'); 1381151497Sru InterleaveGroupMap[A] = Group; 1382151497Sru 1383151497Sru // Set the first load in program order as the insert position. 1384151497Sru if (A->mayReadFromMemory()) 1385151497Sru Group->setInsertPos(A); 1386151497Sru } 1387151497Sru } // Iteration over A accesses. 1388151497Sru } // Iteration over B accesses. 1389 1390 auto InvalidateGroupIfMemberMayWrap = [&](InterleaveGroup<Instruction> *Group, 1391 int Index, 1392 std::string FirstOrLast) -> bool { 1393 Instruction *Member = Group->getMember(Index); 1394 assert(Member && "Group member does not exist"); 1395 Value *MemberPtr = getLoadStorePointerOperand(Member); 1396 Type *AccessTy = getLoadStoreType(Member); 1397 if (getPtrStride(PSE, AccessTy, MemberPtr, TheLoop, Strides, 1398 /*Assume=*/false, /*ShouldCheckWrap=*/true).value_or(0)) 1399 return false; 1400 LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 1401 << FirstOrLast 1402 << " group member potentially pointer-wrapping.\n"); 1403 releaseGroup(Group); 1404 return true; 1405 }; 1406 1407 // Remove interleaved groups with gaps whose memory 1408 // accesses may wrap around. We have to revisit the getPtrStride analysis, 1409 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does 1410 // not check wrapping (see documentation there). 1411 // FORNOW we use Assume=false; 1412 // TODO: Change to Assume=true but making sure we don't exceed the threshold 1413 // of runtime SCEV assumptions checks (thereby potentially failing to 1414 // vectorize altogether). 1415 // Additional optional optimizations: 1416 // TODO: If we are peeling the loop and we know that the first pointer doesn't 1417 // wrap then we can deduce that all pointers in the group don't wrap. 1418 // This means that we can forcefully peel the loop in order to only have to 1419 // check the first pointer for no-wrap. When we'll change to use Assume=true 1420 // we'll only need at most one runtime check per interleaved group. 1421 for (auto *Group : LoadGroups) { 1422 // Case 1: A full group. Can Skip the checks; For full groups, if the wide 1423 // load would wrap around the address space we would do a memory access at 1424 // nullptr even without the transformation. 1425 if (Group->getNumMembers() == Group->getFactor()) 1426 continue; 1427 1428 // Case 2: If first and last members of the group don't wrap this implies 1429 // that all the pointers in the group don't wrap. 1430 // So we check only group member 0 (which is always guaranteed to exist), 1431 // and group member Factor - 1; If the latter doesn't exist we rely on 1432 // peeling (if it is a non-reversed accsess -- see Case 3). 1433 if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first"))) 1434 continue; 1435 if (Group->getMember(Group->getFactor() - 1)) 1436 InvalidateGroupIfMemberMayWrap(Group, Group->getFactor() - 1, 1437 std::string("last")); 1438 else { 1439 // Case 3: A non-reversed interleaved load group with gaps: We need 1440 // to execute at least one scalar epilogue iteration. This will ensure 1441 // we don't speculatively access memory out-of-bounds. We only need 1442 // to look for a member at index factor - 1, since every group must have 1443 // a member at index zero. 1444 if (Group->isReverse()) { 1445 LLVM_DEBUG( 1446 dbgs() << "LV: Invalidate candidate interleaved group due to " 1447 "a reverse access with gaps.\n"); 1448 releaseGroup(Group); 1449 continue; 1450 } 1451 LLVM_DEBUG( 1452 dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 1453 RequiresScalarEpilogue = true; 1454 } 1455 } 1456 1457 for (auto *Group : StoreGroups) { 1458 // Case 1: A full group. Can Skip the checks; For full groups, if the wide 1459 // store would wrap around the address space we would do a memory access at 1460 // nullptr even without the transformation. 1461 if (Group->getNumMembers() == Group->getFactor()) 1462 continue; 1463 1464 // Interleave-store-group with gaps is implemented using masked wide store. 1465 // Remove interleaved store groups with gaps if 1466 // masked-interleaved-accesses are not enabled by the target. 1467 if (!EnablePredicatedInterleavedMemAccesses) { 1468 LLVM_DEBUG( 1469 dbgs() << "LV: Invalidate candidate interleaved store group due " 1470 "to gaps.\n"); 1471 releaseGroup(Group); 1472 continue; 1473 } 1474 1475 // Case 2: If first and last members of the group don't wrap this implies 1476 // that all the pointers in the group don't wrap. 1477 // So we check only group member 0 (which is always guaranteed to exist), 1478 // and the last group member. Case 3 (scalar epilog) is not relevant for 1479 // stores with gaps, which are implemented with masked-store (rather than 1480 // speculative access, as in loads). 1481 if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first"))) 1482 continue; 1483 for (int Index = Group->getFactor() - 1; Index > 0; Index--) 1484 if (Group->getMember(Index)) { 1485 InvalidateGroupIfMemberMayWrap(Group, Index, std::string("last")); 1486 break; 1487 } 1488 } 1489} 1490 1491void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() { 1492 // If no group had triggered the requirement to create an epilogue loop, 1493 // there is nothing to do. 1494 if (!requiresScalarEpilogue()) 1495 return; 1496 1497 bool ReleasedGroup = false; 1498 // Release groups requiring scalar epilogues. Note that this also removes them 1499 // from InterleaveGroups. 1500 for (auto *Group : make_early_inc_range(InterleaveGroups)) { 1501 if (!Group->requiresScalarEpilogue()) 1502 continue; 1503 LLVM_DEBUG( 1504 dbgs() 1505 << "LV: Invalidate candidate interleaved group due to gaps that " 1506 "require a scalar epilogue (not allowed under optsize) and cannot " 1507 "be masked (not enabled). \n"); 1508 releaseGroup(Group); 1509 ReleasedGroup = true; 1510 } 1511 assert(ReleasedGroup && "At least one group must be invalidated, as a " 1512 "scalar epilogue was required"); 1513 (void)ReleasedGroup; 1514 RequiresScalarEpilogue = false; 1515} 1516 1517template <typename InstT> 1518void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const { 1519 llvm_unreachable("addMetadata can only be used for Instruction"); 1520} 1521 1522namespace llvm { 1523template <> 1524void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const { 1525 SmallVector<Value *, 4> VL; 1526 std::transform(Members.begin(), Members.end(), std::back_inserter(VL), 1527 [](std::pair<int, Instruction *> p) { return p.second; }); 1528 propagateMetadata(NewInst, VL); 1529} 1530} 1531 1532std::string VFABI::mangleTLIVectorName(StringRef VectorName, 1533 StringRef ScalarName, unsigned numArgs, 1534 ElementCount VF) { 1535 SmallString<256> Buffer; 1536 llvm::raw_svector_ostream Out(Buffer); 1537 Out << "_ZGV" << VFABI::_LLVM_ << "N"; 1538 if (VF.isScalable()) 1539 Out << 'x'; 1540 else 1541 Out << VF.getFixedValue(); 1542 for (unsigned I = 0; I < numArgs; ++I) 1543 Out << "v"; 1544 Out << "_" << ScalarName << "(" << VectorName << ")"; 1545 return std::string(Out.str()); 1546} 1547 1548void VFABI::getVectorVariantNames( 1549 const CallInst &CI, SmallVectorImpl<std::string> &VariantMappings) { 1550 const StringRef S = CI.getFnAttr(VFABI::MappingsAttrName).getValueAsString(); 1551 if (S.empty()) 1552 return; 1553 1554 SmallVector<StringRef, 8> ListAttr; 1555 S.split(ListAttr, ","); 1556 1557 for (const auto &S : SetVector<StringRef>(ListAttr.begin(), ListAttr.end())) { 1558#ifndef NDEBUG 1559 LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << S << "'\n"); 1560 std::optional<VFInfo> Info = 1561 VFABI::tryDemangleForVFABI(S, *(CI.getModule())); 1562 assert(Info && "Invalid name for a VFABI variant."); 1563 assert(CI.getModule()->getFunction(Info->VectorName) && 1564 "Vector function is missing."); 1565#endif 1566 VariantMappings.push_back(std::string(S)); 1567 } 1568} 1569 1570bool VFShape::hasValidParameterList() const { 1571 for (unsigned Pos = 0, NumParams = Parameters.size(); Pos < NumParams; 1572 ++Pos) { 1573 assert(Parameters[Pos].ParamPos == Pos && "Broken parameter list."); 1574 1575 switch (Parameters[Pos].ParamKind) { 1576 default: // Nothing to check. 1577 break; 1578 case VFParamKind::OMP_Linear: 1579 case VFParamKind::OMP_LinearRef: 1580 case VFParamKind::OMP_LinearVal: 1581 case VFParamKind::OMP_LinearUVal: 1582 // Compile time linear steps must be non-zero. 1583 if (Parameters[Pos].LinearStepOrPos == 0) 1584 return false; 1585 break; 1586 case VFParamKind::OMP_LinearPos: 1587 case VFParamKind::OMP_LinearRefPos: 1588 case VFParamKind::OMP_LinearValPos: 1589 case VFParamKind::OMP_LinearUValPos: 1590 // The runtime linear step must be referring to some other 1591 // parameters in the signature. 1592 if (Parameters[Pos].LinearStepOrPos >= int(NumParams)) 1593 return false; 1594 // The linear step parameter must be marked as uniform. 1595 if (Parameters[Parameters[Pos].LinearStepOrPos].ParamKind != 1596 VFParamKind::OMP_Uniform) 1597 return false; 1598 // The linear step parameter can't point at itself. 1599 if (Parameters[Pos].LinearStepOrPos == int(Pos)) 1600 return false; 1601 break; 1602 case VFParamKind::GlobalPredicate: 1603 // The global predicate must be the unique. Can be placed anywhere in the 1604 // signature. 1605 for (unsigned NextPos = Pos + 1; NextPos < NumParams; ++NextPos) 1606 if (Parameters[NextPos].ParamKind == VFParamKind::GlobalPredicate) 1607 return false; 1608 break; 1609 } 1610 } 1611 return true; 1612} 1613