/freebsd-12-stable/contrib/llvm-project/llvm/include/llvm/Analysis/Utils/ |
H A D | Local.h | 29 Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP, argument 31 GEPOperator *GEPOp = cast<GEPOperator>(GEP); 32 Type *IntIdxTy = DL.getIndexType(GEP->getType()); 35 // If the GEP is inbounds, we know that none of the addressing operations will 44 gep_type_iterator GTI = gep_type_begin(GEP); 45 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e; 60 GEP->getName()+".offs"); 73 Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs"); 87 GEP [all...] |
/freebsd-12-stable/contrib/llvm-project/llvm/lib/Transforms/IPO/ |
H A D | GlobalSplit.cpp | 9 // This pass uses inrange annotations on GEP indices to split globals where 60 auto *GEP = dyn_cast<GEPOperator>(U); 61 if (!GEP || !GEP->getInRangeIndex() || *GEP->getInRangeIndex() != 1 || 62 !isa<ConstantInt>(GEP->getOperand(1)) || 63 !cast<ConstantInt>(GEP->getOperand(1))->isZero() || 64 !isa<ConstantInt>(GEP->getOperand(2))) 117 auto *GEP = cast<GEPOperator>(U); local 118 unsigned I = cast<ConstantInt>(GEP [all...] |
H A D | ArgumentPromotion.cpp | 99 /// A vector used to hold the indices of a single GEP instruction 122 // handle cases where there are both a direct load and GEP accesses. 127 // what the new GEP/Load instructions we are inserting look like. 179 // and gep+loads with the GEP indices. 250 // Loop over the operands, inserting GEP and loads in the caller as 260 // Emit a GEP and load for each element of the struct. 289 // This satisfies GEP constraints. 300 // And create a GEP to extract those indices. 439 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->user_back()); local 441 Operands.reserve(GEP [all...] |
/freebsd-12-stable/contrib/llvm-project/llvm/lib/Transforms/Scalar/ |
H A D | NaryReassociate.cpp | 298 static bool isGEPFoldable(GetElementPtrInst *GEP, argument 301 for (auto I = GEP->idx_begin(); I != GEP->idx_end(); ++I) 303 return TTI->getGEPCost(GEP->getSourceElementType(), GEP->getPointerOperand(), 307 Instruction *NaryReassociatePass::tryReassociateGEP(GetElementPtrInst *GEP) { argument 308 // Not worth reassociating GEP if it is foldable. 309 if (isGEPFoldable(GEP, TTI)) 312 gep_type_iterator GTI = gep_type_begin(*GEP); 313 for (unsigned I = 1, E = GEP 324 requiresSignExtension(Value *Index, GetElementPtrInst *GEP) argument 332 tryReassociateGEPAtIndex(GetElementPtrInst *GEP, unsigned I, Type *IndexedType) argument 367 tryReassociateGEPAtIndex(GetElementPtrInst *GEP, unsigned I, Value *LHS, Value *RHS, Type *IndexedType) argument [all...] |
H A D | SeparateConstOffsetFromGEP.cpp | 34 // each GEP, wasting tons of registers. It emits the following PTX for the 50 // It works by splitting each GEP into a variadic base and a constant offset. 81 // Another improvement enabled by the LowerGEP flag is to lower a GEP with 102 // lower a GEP with multiple indices into arithmetic operations: 124 // If the target uses alias analysis in codegen, this pass will lower a GEP 148 // LICM (Loop Invariant Code Motion) can not hoist/sink a GEP of multiple 149 // indices if one of the index is variant. If we lower such GEP into invariant 152 // target's addressing modes. A GEP with multiple indices may not match and will 153 // not be sunk. If we lower such GEP into smaller parts, CGP may sink some of 202 cl::desc("Do not separate the constant offset from a GEP instructio 736 Extract(Value *Idx, GetElementPtrInst *GEP, User *&UserChainTail, const DominatorTree *DT) argument 754 Find(Value *Idx, GetElementPtrInst *GEP, const DominatorTree *DT) argument 763 canonicalizeArrayIndicesToPointerSize( GetElementPtrInst *GEP) argument 782 accumulateByteOffset(GetElementPtrInst *GEP, bool &NeedsExtraction) argument 931 splitGEP(GetElementPtrInst *GEP) argument [all...] |
H A D | StraightLineStrengthReduce.cpp | 107 GEP, // &B[..][i * S][..] enumerator in enum:__anon5727::StraightLineStrengthReduce::Candidate::Kind 119 // Note that Index and Stride of a GEP candidate do not necessarily have the 201 void allocateCandidatesAndFindBasisForGEP(GetElementPtrInst *GEP); 222 GetElementPtrInst *GEP); 225 // GEP and the bump is not divisible by the element size of the GEP, this 227 // basis using an ugly GEP. 273 static bool isGEPFoldable(GetElementPtrInst *GEP, argument 276 for (auto I = GEP->idx_begin(); I != GEP 302 hasOnlyOneNonZeroIndex(GetElementPtrInst *GEP) argument 483 factorArrayIndex(Value *ArrayIdx, const SCEV *Base, uint64_t ElementSize, GetElementPtrInst *GEP) argument 518 allocateCandidatesAndFindBasisForGEP( GetElementPtrInst *GEP) argument [all...] |
H A D | MergeICmps.cpp | 83 BCEAtom(GetElementPtrInst *GEP, LoadInst *LoadI, int BaseId, APInt Offset) argument 84 : GEP(GEP), LoadI(LoadI), BaseId(BaseId), Offset(Offset) {} 93 GEP = that.GEP; 114 GetElementPtrInst *GEP = nullptr; member in struct:__anon5701::BCEAtom 157 auto *const GEP = dyn_cast<GetElementPtrInst>(Addr); local 158 if (!GEP) 160 LLVM_DEBUG(dbgs() << "GEP\n"); 161 if (GEP [all...] |
/freebsd-12-stable/contrib/llvm-project/llvm/include/llvm/IR/ |
H A D | GetElementPtrTypeIterator.h | 94 // the first element this is an unbounded array of the GEP's source element 127 inline gep_type_iterator gep_type_begin(const User *GEP) { argument 128 auto *GEPOp = cast<GEPOperator>(GEP); 131 GEP->op_begin() + 1); 134 inline gep_type_iterator gep_type_end(const User *GEP) { argument 135 return gep_type_iterator::end(GEP->op_end()); 138 inline gep_type_iterator gep_type_begin(const User &GEP) { argument 139 auto &GEPOp = cast<GEPOperator>(GEP); 142 GEP.op_begin() + 1); 145 inline gep_type_iterator gep_type_end(const User &GEP) { argument [all...] |
/freebsd-12-stable/contrib/llvm-project/llvm/include/llvm/Transforms/Scalar/ |
H A D | NaryReassociate.h | 119 // Reassociate GEP for better CSE. 120 Instruction *tryReassociateGEP(GetElementPtrInst *GEP); 122 // Try splitting GEP at the I-th index and see whether either part can be 125 // \p IndexedType The element type indexed by GEP's I-th index. This is 127 // GEP->getIndexedType(GEP->getPointerOperand(), 0-th index, 129 GetElementPtrInst *tryReassociateGEPAtIndex(GetElementPtrInst *GEP, 132 // Given GEP's I-th index = LHS + RHS, see whether &Base[..][LHS][..] or 133 // &Base[..][RHS][..] can be CSE'ed and rewrite GEP accordingly. 134 GetElementPtrInst *tryReassociateGEPAtIndex(GetElementPtrInst *GEP, [all...] |
/freebsd-12-stable/contrib/llvm-project/llvm/lib/Analysis/ |
H A D | PHITransAddr.cpp | 214 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) { 217 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) { 218 Value *GEPOp = PHITranslateSubExpr(GEP->getOperand(i), CurBB, PredBB, DT); 221 AnyChanged |= GEPOp != GEP->getOperand(i); 226 return GEP; 228 // Simplify the GEP to handle 'gep x, 0' -> x etc. 229 if (Value *V = SimplifyGEPInst(GEP->getSourceElementType(), 237 // Scan to see if we have this GEP available. 241 if (GEPI->getType() == GEP->getType() && 396 if (GetElementPtrInst *GEP [all...] |
H A D | TypeMetadataUtils.cpp | 60 } else if (auto GEP = dyn_cast<GetElementPtrInst>(User)) { 61 // Take into account the GEP offset. 62 if (VPtr == GEP->getPointerOperand() && GEP->hasAllConstantIndices()) { 63 SmallVector<Value *, 8> Indices(GEP->op_begin() + 1, GEP->op_end()); 65 GEP->getSourceElementType(), Indices);
|
H A D | MemoryBuiltins.cpp | 590 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 591 return visitGEPOperator(*GEP); 748 SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) { argument 749 SizeOffsetType PtrData = compute(GEP.getPointerOperand()); 750 APInt Offset(DL.getIndexTypeSizeInBits(GEP.getPointerOperand()->getType()), 0); 751 if (!bothKnown(PtrData) || !GEP.accumulateConstantOffset(DL, Offset)) 894 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 895 Result = visitGEPOperator(*GEP); 971 ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) { argument 972 SizeOffsetEvalType PtrData = compute_(GEP [all...] |
H A D | InlineCost.cpp | 273 bool isGEPFree(GetElementPtrInst &GEP); 275 bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset); 670 /// Accumulate a constant GEP offset into an APInt if possible. 674 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) { argument 675 unsigned IntPtrWidth = DL.getIndexTypeSizeInBits(GEP.getType()); 678 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 703 /// Use TTI to check whether a GEP is free. 706 bool CallAnalyzer::isGEPFree(GetElementPtrInst &GEP) { argument 708 Operands.push_back(GEP [all...] |
H A D | Loads.cpp | 83 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 84 const Value *Base = GEP->getPointerOperand(); 86 APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0); 87 if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() || 93 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base 95 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
|
/freebsd-12-stable/contrib/llvm-project/llvm/lib/Transforms/InstCombine/ |
H A D | InstructionCombining.cpp | 161 Value *InstCombiner::EmitGEPOffset(User *GEP) { argument 162 return llvm::EmitGEPOffset(&Builder, DL, GEP); 1121 /// is a sequence of GEP indices into the pointed type that will land us at the 1182 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) { argument 1183 // If this GEP has only 0 indices, it is the same pointer as 1184 // Src. If Src is not a trivial GEP too, don't combine 1186 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() && 1653 // At least one GEP must be inbounds. 1661 Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { argument 1662 SmallVector<Value*, 8> Ops(GEP 2797 Value *GEP = Builder.CreateInBoundsGEP(L->getType(), local [all...] |
H A D | InstCombinePHI.cpp | 393 // This is true if all GEP bases are allocas and if all indices into them are 406 GetElementPtrInst *GEP= dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i)); local 407 if (!GEP || !GEP->hasOneUse() || GEP->getType() != FirstInst->getType() || 408 GEP->getNumOperands() != FirstInst->getNumOperands()) 411 AllInBounds &= GEP->isInBounds(); 415 (!isa<AllocaInst>(GEP->getOperand(0)) || 416 !GEP->hasAllConstantIndices())) 421 if (FirstInst->getOperand(op) == GEP [all...] |
/freebsd-12-stable/contrib/llvm-project/llvm/lib/Transforms/Vectorize/ |
H A D | VPlanTransforms.cpp | 69 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) { 70 NewRecipe = new VPWidenGEPRecipe(GEP, OrigLoop);
|
/freebsd-12-stable/contrib/llvm-project/llvm/lib/Target/ARM/ |
H A D | MVEGatherScatterLowering.cpp | 120 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); local 121 if (!GEP) { 127 Value *GEPPtr = GEP->getPointerOperand(); 134 if (GEP->getNumOperands() != 2) { 139 Offsets = GEP->getOperand(1);
|
/freebsd-12-stable/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | AMDGPUPerfHintAnalysis.cpp | 169 if (auto GEP = dyn_cast<GetElementPtrInst>(V)) { 170 auto P = GEP->getPointerOperand(); 172 for (unsigned I = 1, E = GEP->getNumIndices() + 1; I != E; ++I) 173 WorkSet.insert(GEP->getOperand(I)); 241 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 243 auto *Ptr = GetPointerBaseWithConstantOffset(GEP, AM.BaseOffs, *DL); 246 if (TLI->isLegalAddressingMode(*DL, AM, GEP->getResultElementType(), 247 GEP->getPointerAddressSpace()))
|
H A D | AMDGPUInstructionSelector.h | 63 const MachineInstr &GEP; member in struct:llvm::AMDGPUInstructionSelector::GEPInfo 67 GEPInfo(const MachineInstr &GEP) : GEP(GEP), Imm(0) { } argument
|
H A D | AMDGPUPromoteAlloca.cpp | 308 GetElementPtrInst *GEP = cast<GetElementPtrInst>(Ptr); local 310 auto I = GEPIdx.find(GEP); 314 static Value* GEPToVectorIndex(GetElementPtrInst *GEP) { argument 316 if (GEP->getNumOperands() != 3) 319 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1)); 323 return GEP->getOperand(2); 333 // Currently only handle the case where the Pointer Operand is a GEP. 346 // since it should be canonical form, the User should be a GEP. 387 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser); local 388 if (!GEP) { [all...] |
/freebsd-12-stable/contrib/llvm-project/llvm/lib/Target/NVPTX/ |
H A D | NVVMReflect.cpp | 137 const ConstantExpr *GEP = cast<ConstantExpr>(Str); local 139 const Value *Sym = GEP->getOperand(0);
|
/freebsd-12-stable/contrib/llvm-project/llvm/lib/Transforms/Utils/ |
H A D | Evaluator.cpp | 99 // GEP is fine if it is simple + constant offset. 129 /// globals and GEP's of globals. This should be kept up to date with 433 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) { 434 Constant *P = getVal(GEP->getOperand(0)); 436 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); 440 ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), P, GEPOps, 441 cast<GEPOperator>(GEP)->isInBounds()); 442 LLVM_DEBUG(dbgs() << "Found a GEP! Simplifying: " << *InstResult << "\n");
|
/freebsd-12-stable/contrib/llvm-project/llvm/lib/CodeGen/ |
H A D | InterleavedLoadCombinePass.cpp | 149 // Pa_3 = %IDX/2 + 1 #1 | GEP, step signext to i64 150 // Pa_4 = (%IDX/2)*16 + 16 #0 | GEP, multiply index by sizeof(4) for floats 151 // Pa_5 = (%IDX/2)*16 + 16 #0 | GEP, add offset of leading components 156 // Pb_3 = %IDX/2 + 2 #1 | GEP, step signext to i64 157 // Pb_4 = (%IDX/2)*16 + 32 #0 | GEP, multiply index by sizeof(4) for floats 158 // Pb_5 = (%IDX/2)*16 + 16 #0 | GEP, add offset of leading components 984 GetElementPtrInst &GEP = *cast<GetElementPtrInst>(&Ptr); local 989 if (GEP.accumulateConstantOffset(DL, BaseOffset)) { 991 BasePtr = GEP.getPointerOperand(); 994 // Otherwise we allow that the last index operand of the GEP i [all...] |
/freebsd-12-stable/contrib/llvm-project/llvm/lib/IR/ |
H A D | Value.cpp | 477 if (auto *GEP = dyn_cast<GEPOperator>(V)) { 483 if (!GEP->hasAllZeroIndices()) 487 if (!GEP->hasAllConstantIndices()) 491 if (!GEP->isInBounds()) 495 V = GEP->getPointerOperand(); 566 if (auto *GEP = dyn_cast<GEPOperator>(V)) { 568 if (!AllowNonInbounds && !GEP->isInBounds()) 572 // the pointer type of this GEP may be different from the type 575 // of GEP's pointer type rather than the size of the original 578 if (!GEP [all...] |