/freebsd-13-stable/contrib/llvm-project/llvm/include/llvm/MC/ |
H A D | LaneBitmask.h | 16 /// Lane masks for sub-register indices are similar to register units for 84 static constexpr LaneBitmask getLane(unsigned Lane) { argument 85 return LaneBitmask(Type(1) << Lane);
|
/freebsd-13-stable/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/ |
H A D | AArch64PostLegalizerCombiner.cpp | 275 static bool matchDupFromInsertVectorElt(int Lane, MachineInstr &MI, argument 278 if (Lane != 0) 315 static bool matchDupFromBuildVector(int Lane, MachineInstr &MI, argument 318 assert(Lane >= 0 && "Expected positive lane?"); 325 Register Reg = BuildVecMI->getOperand(Lane + 1).getReg(); 337 int Lane = *MaybeLane; local 339 if (Lane < 0) 340 Lane = 0; 341 if (matchDupFromInsertVectorElt(Lane, MI, MRI, MatchInfo)) 343 if (matchDupFromBuildVector(Lane, M [all...] |
/freebsd-13-stable/contrib/llvm-project/llvm/lib/Target/ARM/ |
H A D | A15SDOptimizer.cpp | 68 const DebugLoc &DL, unsigned Reg, unsigned Lane, 74 unsigned Lane, const TargetRegisterClass *TRC); 88 unsigned Lane, unsigned ToInsert); 419 unsigned Lane, bool QPR) { 425 .addImm(Lane) 434 const DebugLoc &DL, unsigned DReg, unsigned Lane, 441 .addReg(DReg, 0, Lane); 479 const DebugLoc &DL, unsigned DReg, unsigned Lane, unsigned ToInsert) { 487 .addImm(Lane); 544 unsigned Lane; local 416 createDupLane(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const DebugLoc &DL, unsigned Reg, unsigned Lane, bool QPR) argument 432 createExtractSubreg( MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const DebugLoc &DL, unsigned DReg, unsigned Lane, const TargetRegisterClass *TRC) argument 477 createInsertSubreg( MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const DebugLoc &DL, unsigned DReg, unsigned Lane, unsigned ToInsert) argument [all...] |
H A D | ARMBaseInstrInfo.cpp | 4929 unsigned SReg, unsigned &Lane) { 4931 Lane = 0; 4936 Lane = 1; 4951 /// an SPR to a DPR[Lane]. A use of the DPR is being added, which may conflict 4952 /// with an earlier def of an SPR corresponding to DPR[Lane^1] (i.e. the other 4960 unsigned Lane, unsigned &ImplicitSReg) { 4970 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1); 4988 unsigned Lane; local 5031 DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane); 5033 // Convert to %RDst = VGETLNi32 %DSrc, Lane, 1 4928 getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI, unsigned SReg, unsigned &Lane) argument 4958 getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI, MachineInstr &MI, unsigned DReg, unsigned Lane, unsigned &ImplicitSReg) argument [all...] |
H A D | ARMExpandPseudoInsts.cpp | 703 unsigned Lane = MI.getOperand(MI.getDesc().getNumOperands() - 3).getImm(); local 707 if (RegSpc == EvenDblSpc && Lane >= RegElts) { 709 Lane -= RegElts; 711 assert(Lane < RegElts && "out of range lane for VLD/VST-lane"); 756 MIB.addImm(Lane);
|
H A D | ARMISelLowering.cpp | 8266 int Lane = SVN->getSplatIndex(); local 8268 if (Lane == -1) Lane = 0; 8271 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 8277 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && 8289 DAG.getConstant(Lane, dl, MVT::i32)); 8455 unsigned Lane = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); local 8458 unsigned Mask = ((1 << LaneWidth) - 1) << Lane * LaneWidth; 8469 SDValue Lane = Op.getOperand(2); local 8470 if (!isa<ConstantSDNode>(Lane)) 8518 unsigned Lane = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); local 8529 SDValue Lane = Op.getOperand(1); local 15265 SDValue Lane = N0.getOperand(1); local [all...] |
/freebsd-13-stable/contrib/llvm-project/llvm/lib/Transforms/Vectorize/ |
H A D | VPlanSLP.cpp | 316 for (unsigned Lane = 1, E = MultiNodeOps[0].second.size(); Lane < E; ++Lane) { 317 LLVM_DEBUG(dbgs() << " Finding best value for lane " << Lane << "\n"); 322 dbgs() << *cast<VPInstruction>(Ops.second[Lane])->getUnderlyingInstr() 324 Candidates.insert(Ops.second[Lane]); 333 VPValue *Last = FinalOrder[Op].second[Lane - 1];
|
H A D | SLPVectorizer.cpp | 775 /// \returns the operand data at \p OpIdx and \p Lane. 776 OperandData &getData(unsigned OpIdx, unsigned Lane) { argument 777 return OpsVec[OpIdx][Lane]; 780 /// \returns the operand data at \p OpIdx and \p Lane. Const version. 781 const OperandData &getData(unsigned OpIdx, unsigned Lane) const { 782 return OpsVec[OpIdx][Lane]; 789 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 790 ++Lane) 791 OpsVec[OpIdx][Lane] 795 swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) argument 1023 getBestOperand(unsigned OpIdx, int Lane, int LastLane, ArrayRef<ReorderingMode> ReorderingModes) argument 1181 shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) argument 1291 int Lane = FirstLane + Direction * Distance; local 1669 unsigned Lane = 0; local 1737 int Lane; member in struct:llvm::slpvectorizer::ExternalUser 1933 int Lane = -1; member in struct:llvm::slpvectorizer::ScheduleData 2031 int Lane = BundleMember->Lane; local [all...] |
H A D | VPlan.cpp | 203 !(State->Instance->Part == 0 && State->Instance->Lane == 0); 303 for (unsigned Lane = 0, VF = State->VF; Lane < VF; ++Lane) { 304 State->Instance->Lane = Lane; 838 for (unsigned Lane = 0; Lane < VF; ++Lane) 839 Indices.push_back(ConstantInt::get(STy, Part * VF + Lane)); [all...] |
H A D | LoopVectorize.cpp | 605 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 620 unsigned Lane = UINT_MAX); 1793 Value *VectorLoopVal, unsigned Part, unsigned Lane) { 1813 if (Lane < UINT_MAX) 1814 VectorLoopValueMap.setScalarValue(CastInst, {Part, Lane}, VectorLoopVal); 2013 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2014 auto *StartIdx = getSignedIntOrFpConstant(ScalarIVTy, VF * Part + Lane); 2017 VectorLoopValueMap.setScalarValue(EntryVal, {Part, Lane}, Ad [all...] |
H A D | VPlan.h | 88 unsigned Lane; member in struct:llvm::VPIteration 153 assert(Instance.Lane < VF && "Queried Scalar Lane is too large."); 160 return Entry[Instance.Part][Instance.Lane] != nullptr; 174 return ScalarMapStorage[Key][Instance.Part][Instance.Lane]; 200 ScalarMapStorage[Key][Instance.Part][Instance.Lane] = Scalar; 212 /// Reset the scalar value associated with \p Key for \p Part and \p Lane. 220 ScalarMapStorage[Key][Instance.Part][Instance.Lane] = Scalar; 273 /// Get the generated Value for a given VPValue and given Part and Lane. 284 Builder.getInt32(Instance.Lane)); [all...] |
/freebsd-13-stable/contrib/llvm-project/llvm/lib/CodeGen/ |
H A D | InterleavedAccessPass.cpp | 220 // Lane computes x's position in the Mask 221 unsigned Lane = J * Factor + I; local 222 unsigned NextLane = Lane + Factor; 223 int LaneValue = Mask[Lane];
|
/freebsd-13-stable/contrib/llvm-project/llvm/lib/Target/WebAssembly/ |
H A D | WebAssemblyISelLowering.cpp | 1450 auto GetSwizzleSrcs = [](size_t I, const SDValue &Lane) { 1452 if (Lane->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 1454 const SDValue &SwizzleSrc = Lane->getOperand(0); 1455 const SDValue &IndexExt = Lane->getOperand(1); 1498 const SDValue &Lane = Op->getOperand(I); local 1499 if (Lane.isUndef()) 1502 AddCount(SplatValueCounts, Lane); 1504 if (IsConstant(Lane)) { 1507 auto SwizzleSrcs = GetSwizzleSrcs(I, Lane); 1535 IsLaneConstructed = [&, Swizzled](size_t I, const SDValue &Lane) { 1575 const SDValue &Lane = Op->getOperand(I); local [all...] |
/freebsd-13-stable/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | SIMachineFunctionInfo.h | 438 int Lane = -1; member in struct:llvm::final::SpilledReg 441 SpilledReg(Register R, int L) : VGPR (R), Lane (L) {} 443 bool hasLane() { return Lane != -1;} 527 MCPhysReg getVGPRToAGPRSpill(int FrameIndex, unsigned Lane) const { 530 : I->second.Lanes[Lane];
|
H A D | SIFrameLowering.cpp | 98 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane 122 << printReg(Spill.VGPR, TRI) << ':' << Spill.Lane 836 .addImm(Spill[0].Lane) 855 .addImm(Spill[0].Lane) 1007 .addImm(Spill[0].Lane); 1033 .addImm(Spill[0].Lane);
|
H A D | SIRegisterInfo.cpp | 656 unsigned Lane, 664 MCPhysReg Reg = MFI->getVGPRToAGPRSpill(Index, Lane); 1036 .addImm(Spill.Lane) 1137 .addImm(Spill.Lane); 653 spillVGPRtoAGPR(const GCNSubtarget &ST, MachineBasicBlock::iterator MI, int Index, unsigned Lane, unsigned ValueReg, bool IsKill) argument
|
H A D | SIISelLowering.cpp | 10660 unsigned Lane = 0; local 10695 // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used. 10696 // Note that subregs are packed, i.e. Lane==0 is the first bit set 10697 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit 10699 Lane = SubIdx2Lane(I->getConstantOperandVal(1)); 10702 if (UsesTFC && Lane == TFCLane) { 10703 Users[Lane] = *I; 10707 for (unsigned i = 0, Dmask = OldDmask; (i <= Lane) && (Dmask != 0); i++) { 10713 if (Users[Lane]) 10716 Users[Lane] [all...] |
/freebsd-13-stable/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86InterleavedAccess.cpp | 433 // {0, Stride%(VF/Lane), (2*Stride%(VF/Lane))...(VF*Stride/Lane)%(VF/Lane), 434 // (VF/ Lane) ,(VF / Lane)+Stride%(VF/Lane),..., 435 // (VF / Lane)+(VF*Stride/Lane)%(VF/Lane)} 621 int Lane = (VectorWidth / 128 > 0) ? VectorWidth / 128 : 1; local [all...] |
H A D | X86ISelLowering.cpp | 6719 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 6722 Mask.push_back(Elt + (Lane * NumEltsPerLane)); 6724 Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset); 6742 for (int Lane = 0; Lane != NumLanes; ++Lane) { 6744 int OuterIdx = (Lane * NumEltsPerLane) + Elt; 6745 int InnerIdx = (Lane * NumInnerEltsPerLan [all...] |
/freebsd-13-stable/contrib/llvm-project/llvm/lib/Transforms/InstCombine/ |
H A D | InstCombineSimplifyDemanded.cpp | 1778 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1779 unsigned LaneIdx = Lane * VWidthPerLane; 1783 OpDemandedElts.setBit((Lane * InnerVWidthPerLane) + Elt); 1793 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1794 APInt LaneElts = OpUndefElts.lshr(InnerVWidthPerLane * Lane); 1796 LaneElts <<= InnerVWidthPerLane * (2 * Lane [all...] |
H A D | InstCombineCalls.cpp | 667 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 669 PackMask.push_back(Elt + (Lane * NumSrcEltsPerLane)); 671 PackMask.push_back(Elt + (Lane * NumSrcEltsPerLane) + NumSrcElts);
|
/freebsd-13-stable/contrib/llvm-project/llvm/lib/Target/Mips/ |
H A D | MipsSEISelLowering.cpp | 3176 unsigned Lane = MI.getOperand(2).getImm(); local 3178 if (Lane == 0) { 3194 BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wt).addReg(Ws).addImm(Lane); 3221 unsigned Lane = MI.getOperand(2).getImm() * 2; local 3224 if (Lane == 0) 3251 unsigned Lane = MI.getOperand(2).getImm(); local 3263 .addImm(Lane) 3287 unsigned Lane = MI.getOperand(2).getImm(); local 3297 .addImm(Lane)
|
/freebsd-13-stable/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64ISelLowering.cpp | 7539 SDValue Lane = DAG.getConstant(OpNum - OP_VDUP0, dl, MVT::i64); local 7540 return DAG.getNode(Opcode, dl, VT, OpLHS, Lane); 7662 int Lane = SVN->getSplatIndex(); local 7664 if (Lane == -1) 7665 Lane = 0; 7667 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) 7673 !isa<ConstantSDNode>(V1.getOperand(Lane))) 7674 return DAG.getNode(AArch64ISD::DUP, dl, VT, V1.getOperand(Lane)); 7711 if (getScaledOffsetDup(V1, Lane, CastVT)) { 7716 Lane 8544 SDValue Lane = Value.getOperand(1); local 11309 SDValue Lane = Op1.getOperand(1); local [all...] |
/freebsd-13-stable/contrib/llvm-project/llvm/lib/Analysis/ |
H A D | ConstantFolding.cpp | 2719 SmallVector<Constant *, 4> Lane(Operands.size()); 2766 Lane[J] = Operands[J]; 2774 Lane[J] = Agg; 2779 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call);
|
/freebsd-13-stable/contrib/llvm-project/llvm/lib/Target/AMDGPU/AsmParser/ |
H A D | AMDGPUAsmParser.cpp | 5651 int64_t Lane[LANE_NUM]; 5652 if (parseSwizzleOperands(LANE_NUM, Lane, 0, LANE_MAX, 5656 Imm |= Lane[I] << (LANE_SHIFT * I);
|