Searched defs:DefRC (Results 1 - 10 of 10) sorted by relevance

/freebsd-11-stable/contrib/llvm-project/llvm/lib/CodeGen/
H A DDetectDeadLanes.cpp373 const TargetRegisterClass *DefRC = MRI->getRegClass(Reg); local
H A DPeepholeOptimizer.cpp669 const TargetRegisterClass *DefRC = MRI->getRegClass(Reg); local
1232 const TargetRegisterClass *DefRC = MRI->getRegClass(Def.Reg); local
H A DRegisterCoalescer.cpp1284 const TargetRegisterClass *DefRC = TII->getRegClass(MCID, 0, TRI, *MF); local
/freebsd-11-stable/contrib/llvm-project/llvm/lib/Target/X86/
H A DX86RegisterInfo.cpp219 bool X86RegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC, argument
H A DX86SpeculativeLoadHardening.cpp2336 auto *DefRC = MRI->getRegClass(OldDefReg); local
/freebsd-11-stable/contrib/llvm-project/llvm/lib/Target/Mips/
H A DMipsInstructionSelector.cpp394 const TargetRegisterClass *DefRC = nullptr; local
/freebsd-11-stable/contrib/llvm-project/llvm/lib/Target/AMDGPU/
H A DSIRegisterInfo.cpp1295 assert(Size < 32 && �); return false; } } bool SIRegisterInfo::hasAGPRs(const TargetRegisterClass *RC) const { unsigned Size = getRegSizeInBits(*RC); if (Size < 32) return false; switch (Size) { case 32: return getCommonSubClass(&AMDGPU::AGPR_32RegClass, RC) != nullptr; case 64: return getCommonSubClass(&AMDGPU::AReg_64RegClass, RC) != nullptr; case 96: return false; case 128: return getCommonSubClass(&AMDGPU::AReg_128RegClass, RC) != nullptr; case 160: case 256: return false; case 512: return getCommonSubClass(&AMDGPU::AReg_512RegClass, RC) != nullptr; case 1024: return getCommonSubClass(&AMDGPU::AReg_1024RegClass, RC) != nullptr; default: llvm_unreachable(�); } } const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass( const TargetRegisterClass *SRC) const { switch (getRegSizeInBits(*SRC)) { case 32: return &AMDGPU::VGPR_32RegClass; case 64: return &AMDGPU::VReg_64RegClass; case 96: return &AMDGPU::VReg_96RegClass; case 128: return &AMDGPU::VReg_128RegClass; case 160: return &AMDGPU::VReg_160RegClass; case 256: return &AMDGPU::VReg_256RegClass; case 512: return &AMDGPU::VReg_512RegClass; case 1024: return &AMDGPU::VReg_1024RegClass; case 1: return &AMDGPU::VReg_1RegClass; default: llvm_unreachable(�); } } const TargetRegisterClass *SIRegisterInfo::getEquivalentAGPRClass( const TargetRegisterClass *SRC) const { switch (getRegSizeInBits(*SRC)) { case 32: return &AMDGPU::AGPR_32RegClass; case 64: return &AMDGPU::AReg_64RegClass; case 128: return &AMDGPU::AReg_128RegClass; case 512: return &AMDGPU::AReg_512RegClass; case 1024: return &AMDGPU::AReg_1024RegClass; default: llvm_unreachable(�); } } const TargetRegisterClass *SIRegisterInfo::getEquivalentSGPRClass( const TargetRegisterClass *VRC) const { switch (getRegSizeInBits(*VRC)) { case 32: return &AMDGPU::SGPR_32RegClass; case 64: return &AMDGPU::SReg_64RegClass; case 96: return &AMDGPU::SReg_96RegClass; case 128: return &AMDGPU::SGPR_128RegClass; case 160: return &AMDGPU::SReg_160RegClass; case 256: return &AMDGPU::SReg_256RegClass; case 512: return &AMDGPU::SReg_512RegClass; case 1024: return &AMDGPU::SReg_1024RegClass; default: llvm_unreachable(�); } } const TargetRegisterClass *SIRegisterInfo::getSubRegClass( const TargetRegisterClass *RC, unsigned SubIdx) const { if (SubIdx == AMDGPU::NoSubRegister) return RC; unsigned Count = getSubRegIndexLaneMask(SubIdx).getNumLanes(); if (isSGPRClass(RC)) { switch (Count) { case 1: return &AMDGPU::SGPR_32RegClass; case 2: return &AMDGPU::SReg_64RegClass; case 3: return &AMDGPU::SReg_96RegClass; case 4: return &AMDGPU::SGPR_128RegClass; case 5: return &AMDGPU::SReg_160RegClass; case 8: return &AMDGPU::SReg_256RegClass; case 16: return &AMDGPU::SReg_512RegClass; case 32: default: llvm_unreachable(�); } } else if (hasAGPRs(RC)) { switch (Count) { case 1: return &AMDGPU::AGPR_32RegClass; case 2: return &AMDGPU::AReg_64RegClass; case 4: return &AMDGPU::AReg_128RegClass; case 16: return &AMDGPU::AReg_512RegClass; case 32: default: llvm_unreachable(�); } } else { switch (Count) { case 1: return &AMDGPU::VGPR_32RegClass; case 2: return &AMDGPU::VReg_64RegClass; case 3: return &AMDGPU::VReg_96RegClass; case 4: return &AMDGPU::VReg_128RegClass; case 5: return &AMDGPU::VReg_160RegClass; case 8: return &AMDGPU::VReg_256RegClass; case 16: return &AMDGPU::VReg_512RegClass; case 32: default: llvm_unreachable(�); } } } bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const { if (OpType >= AMDGPU::OPERAND_REG_INLINE_AC_FIRST && OpType <= AMDGPU::OPERAND_REG_INLINE_AC_LAST) return !ST.hasMFMAInlineLiteralBug(); return OpType >= AMDGPU::OPERAND_SRC_FIRST && OpType <= AMDGPU::OPERAND_SRC_LAST; } bool SIRegisterInfo::shouldRewriteCopySrc( const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const { return getCommonSubClass(DefRC, SrcRC) != nullptr; } unsigned SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI, const TargetRegisterClass *RC, const MachineFunction &MF) const { for (unsigned Reg : *RC) if (MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg)) return Reg; return AMDGPU::NoRegister; } ArrayRef<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass *RC, unsigned EltSize) const { if (EltSize == 4) { static const int16_t Sub0_31[] = { AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, AMDGPU::sub16, AMDGPU::sub17, AMDGPU::sub18, AMDGPU::sub19, AMDGPU::sub20, AMDGPU::sub21, AMDGPU::sub22, AMDGPU::sub23, AMDGPU::sub24, AMDGPU::sub25, AMDGPU::sub26, AMDGPU::sub27, AMDGPU::sub28, AMDGPU::sub29, AMDGPU::sub30, AMDGPU::sub31, }; static const int16_t Sub0_15[] = { AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, }; static const int16_t Sub0_7[] = { AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, }; static const int16_t Sub0_4[] = { AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, AMDGPU::sub4, }; static const int16_t Sub0_3[] = { AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, }; static const int16_t Sub0_2[] = { AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, }; static const int16_t Sub0_1[] = { AMDGPU::sub0, AMDGPU::sub1, }; switch (AMDGPU::getRegBitWidth(*RC->MC)) { case 32: return {}; case 64: return makeArrayRef(Sub0_1); case 96: return makeArrayRef(Sub0_2); case 128: return makeArrayRef(Sub0_3); case 160: return makeArrayRef(Sub0_4); case 256: return makeArrayRef(Sub0_7); case 512: return makeArrayRef(Sub0_15); case 1024: return makeArrayRef(Sub0_31); default: llvm_unreachable(�); } } if (EltSize == 8) { static const int16_t Sub0_31_64[] = { AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, AMDGPU::sub16_sub17, AMDGPU::sub18_sub19, AMDGPU::sub20_sub21, AMDGPU::sub22_sub23, AMDGPU::sub24_sub25, AMDGPU::sub26_sub27, AMDGPU::sub28_sub29, AMDGPU::sub30_sub31 }; static const int16_t Sub0_15_64[] = { AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, AMDGPU::sub12_sub13, AMDGPU::sub14_sub15 }; static const int16_t Sub0_7_64[] = { AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, AMDGPU::sub4_sub5, AMDGPU::sub6_sub7 }; static const int16_t Sub0_3_64[] = { AMDGPU::sub0_sub1, AMDGPU::sub2_sub3 }; switch (AMDGPU::getRegBitWidth(*RC->MC) argument
H A DAMDGPUInstructionSelector.cpp182 const TargetRegisterClass *DefRC local
/freebsd-11-stable/contrib/llvm-project/llvm/lib/Target/Hexagon/
H A DHexagonConstPropagation.cpp1954 const TargetRegisterClass &DefRC = *MRI->getRegClass(DefR.Reg); local
/freebsd-11-stable/contrib/llvm-project/llvm/lib/Target/AArch64/
H A DAArch64InstructionSelector.cpp1435 const TargetRegisterClass *DefRC local

Completed in 213 milliseconds