• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/contrib/llvm-project/llvm/lib/Transforms/InstCombine/

Lines Matching refs:GEP

158 Value *InstCombiner::EmitGEPOffset(User *GEP) {
159 return llvm::EmitGEPOffset(&Builder, DL, GEP);
1138 /// is a sequence of GEP indices into the pointed type that will land us at the
1199 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
1200 // If this GEP has only 0 indices, it is the same pointer as
1201 // Src. If Src is not a trivial GEP too, don't combine
1203 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
1727 // At least one GEP must be inbounds.
1735 /// Thread a GEP operation with constant indices through the constant true/false
1737 static Instruction *foldSelectGEP(GetElementPtrInst &GEP,
1739 if (!GEP.hasAllConstantIndices())
1745 if (!match(GEP.getPointerOperand(), m_Instruction(Sel)) ||
1753 SmallVector<Value *, 4> IndexC(GEP.idx_begin(), GEP.idx_end());
1754 bool IsInBounds = GEP.isInBounds();
1762 Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
1763 SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
1764 Type *GEPType = GEP.getType();
1765 Type *GEPEltType = GEP.getSourceElementType();
1767 if (Value *V = SimplifyGEPInst(GEPEltType, Ops, SQ.getWithInstruction(&GEP)))
1768 return replaceInstUsesWith(GEP, V);
1771 // Skip if GEP return type is scalable. The number of elements is unknown at
1777 if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask,
1779 if (V != &GEP)
1780 return replaceInstUsesWith(GEP, V);
1781 return &GEP;
1789 Value *PtrOp = GEP.getOperand(0);
1798 DL.getIndexType(GEP.getPointerOperandType()->getScalarType());
1800 gep_type_iterator GTI = gep_type_begin(GEP);
1801 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
1832 return &GEP;
1840 // Don't fold a GEP into itself through a PHI node. This can only happen
1841 // through the back-edge of a loop. Folding a GEP into itself means that
1844 // actually achieving anything (the GEP still needs to be executed once per
1846 if (Op1 == &GEP)
1856 // As for Op1 above, don't try to fold a GEP into itself.
1857 if (Op2 == &GEP)
1860 // Keep track of the type as we walk the GEP.
1873 // The first two arguments can vary for any GEP, the rest have to be
1883 // The GEP is different by more than one input. While this could be
1914 // BB so that it can be merged with the current GEP.
1916 // All the GEPs feeding the PHI differ at a single offset. Clone a GEP
1934 GEP.getParent()->getInstList().insert(
1935 GEP.getParent()->getFirstInsertionPt(), NewGEP);
1936 replaceOperand(GEP, 0, NewGEP);
1944 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
1947 // Try to reassociate loop invariant GEP chains to enable LICM.
1948 if (LI && Src->getNumOperands() == 2 && GEP.getNumOperands() == 2 &&
1950 if (Loop *L = LI->getLoopFor(GEP.getParent())) {
1951 Value *GO1 = GEP.getOperand(1);
1979 GEP.setOperand(1, SO1);
1980 return &GEP;
1990 NewGEP->setIsInBounds(GEP.isInBounds());
2006 // Find out whether the last index in the source GEP is a sequential idx.
2017 Value *GO1 = GEP.getOperand(1);
2027 SimplifyAddInst(GO1, SO1, false, false, SQ.getWithInstruction(&GEP));
2033 // Update the GEP in place if possible.
2035 GEP.setIsInBounds(isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP)));
2036 replaceOperand(GEP, 0, Src->getOperand(0));
2037 replaceOperand(GEP, 1, Sum);
2038 return &GEP;
2042 Indices.append(GEP.op_begin()+2, GEP.op_end());
2043 } else if (isa<Constant>(*GEP.idx_begin()) &&
2044 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
2046 // Otherwise we can do the fold if the first index of the GEP is a zero
2048 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
2052 return isMergedGEPInBounds(*Src, *cast<GEPOperator>(&GEP))
2055 GEP.getName())
2058 GEP.getName());
2061 // Skip if GEP source element type is scalable. The type alloc size is unknown
2063 if (GEP.getNumIndices() == 1 && !IsGEPSrcEleScalable) {
2064 unsigned AS = GEP.getPointerAddressSpace();
2065 if (GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
2073 V = GEP.getOperand(1);
2075 } else if (match(GEP.getOperand(1),
2079 } else if (match(GEP.getOperand(1),
2088 // The GEP pattern is emitted by the SCEV expander for certain kinds of
2100 m_PtrToInt(m_Specific(GEP.getOperand(0))))))
2118 if (auto *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
2121 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
2122 // into : GEP [10 x i8]* X, i32 0, ...
2124 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
2125 // into : GEP i8* X, ...
2130 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
2132 // -> GEP i8* X, ...
2133 SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end());
2135 StrippedPtrEltTy, StrippedPtr, Idx, GEP.getName());
2136 Res->setIsInBounds(GEP.isInBounds());
2137 if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace())
2141 // GEP (addrspacecast i8 addrspace(1)* X to [0 x i8]*), i32 0, ...
2143 // %0 = GEP i8 addrspace(1)* X, ...
2149 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
2151 // -> GEP [10 x i8]* X, i32 0, ...
2155 // is a leading zero) we can fold the cast into this GEP.
2156 if (StrippedPtrTy->getAddressSpace() == GEP.getAddressSpace()) {
2157 GEP.setSourceElementType(XATy);
2158 return replaceOperand(GEP, 0, StrippedPtr);
2161 // address space is different. Instead, create a new GEP followed by
2164 // GEP (addrspacecast [10 x i8] addrspace(1)* X to [0 x i8]*),
2167 // %0 = GEP [10 x i8] addrspace(1)* X, ...
2169 SmallVector<Value*, 8> Idx(GEP.idx_begin(), GEP.idx_end());
2171 GEP.isInBounds()
2173 Idx, GEP.getName())
2175 GEP.getName());
2180 } else if (GEP.getNumOperands() == 2 && !IsGEPSrcEleScalable) {
2181 // Skip if GEP source element type is scalable. The type alloc size is
2190 Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };
2192 GEP.isInBounds()
2194 GEP.getName())
2196 GEP.getName());
2198 // V and GEP are both pointer types --> BitCast
2212 Value *Idx = GEP.getOperand(1);
2224 // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
2226 // GEP may not be "inbounds".
2228 GEP.isInBounds() && NSW
2230 NewIdx, GEP.getName())
2232 GEP.getName());
2254 Value *Idx = GEP.getOperand(1);
2266 // Successfully decomposed Idx as NewIdx * Scale, form a new GEP.
2268 // GEP may not be "inbounds".
2273 GEP.isInBounds() && NSW
2275 Off, GEP.getName())
2277 GEP.getName());
2288 // addrspacecast. To take advantage of the below bitcast + struct GEP, look
2295 // Into an addrspacecasted GEP of the struct.
2305 // GEP directly using the source operand if this GEP is accessing an element
2316 if (GEP.getNumOperands() == 3 &&
2322 // Create a new GEP here, as using `setOperand()` followed by
2324 // existing GEP Value. Causing issues if this Value is accessed when
2327 GEP.isInBounds()
2330 NGEP->takeName(&GEP);
2332 // Preserve GEP address space to satisfy users
2333 if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
2336 return replaceInstUsesWith(GEP, NGEP);
2346 if (!isa<BitCastInst>(SrcOp) && GEP.accumulateConstantOffset(DL, Offset)) {
2347 // If this GEP instruction doesn't move the pointer, just replace the GEP
2353 // See if the bitcast simplifies, if so, don't nuke this GEP yet.
2360 return &GEP;
2364 if (SrcType->getPointerAddressSpace() != GEP.getAddressSpace())
2371 // GEP.
2375 GEP.isInBounds()
2380 return replaceInstUsesWith(GEP, NGEP);
2381 NGEP->takeName(&GEP);
2383 if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
2390 if (!GEP.isInBounds()) {
2398 if (GEP.accumulateConstantOffset(DL, BasePtrOffset) &&
2405 GEP.getSourceElementType(), PtrOp, makeArrayRef(Ops).slice(1),
2406 GEP.getName());
2412 if (Instruction *R = foldSelectGEP(GEP, Builder))
2539 // use a bitcast/GEP of the alloca we are removing.
2569 // Casts, GEP, or anything else: we're about to delete this instruction,
2939 // load from a GEP. This reduces the size of the load. If a load is used
2955 Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
2957 Instruction *NL = Builder.CreateLoad(EV.getType(), GEP);