• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/contrib/llvm-project/llvm/lib/Target/AMDGPU/

Lines Matching refs:AMDGPU

45     : AMDGPUGenRegisterInfo(AMDGPU::PC_REG, ST.getAMDGPUDwarfFlavour()), ST(ST),
48 assert(getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() == 3 &&
49 getSubRegIndexLaneMask(AMDGPU::sub31).getAsInteger() == (3ULL << 62) &&
50 (getSubRegIndexLaneMask(AMDGPU::lo16) |
51 getSubRegIndexLaneMask(AMDGPU::hi16)).getAsInteger() ==
52 getSubRegIndexLaneMask(AMDGPU::sub0).getAsInteger() &&
56 RegPressureIgnoredUnits.set(*MCRegUnitIterator(AMDGPU::M0, this));
57 for (auto Reg : AMDGPU::VGPR_HI16RegClass)
104 static const MCPhysReg NoCalleeSavedReg = AMDGPU::NoRegister;
149 Register SIRegisterInfo::getBaseRegister() const { return AMDGPU::SGPR34; }
166 {AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
167 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
168 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
169 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15,
170 AMDGPU::sub16, AMDGPU::sub17, AMDGPU::sub18, AMDGPU::sub19,
171 AMDGPU::sub20, AMDGPU::sub21, AMDGPU::sub22, AMDGPU::sub23,
172 AMDGPU::sub24, AMDGPU::sub25, AMDGPU::sub26, AMDGPU::sub27,
173 AMDGPU::sub28, AMDGPU::sub29, AMDGPU::sub30, AMDGPU::sub31},
174 {AMDGPU::sub0_sub1, AMDGPU::sub1_sub2, AMDGPU::sub2_sub3,
175 AMDGPU::sub3_sub4, AMDGPU::sub4_sub5, AMDGPU::sub5_sub6,
176 AMDGPU::sub6_sub7, AMDGPU::sub7_sub8, AMDGPU::sub8_sub9,
177 AMDGPU::sub9_sub10, AMDGPU::sub10_sub11, AMDGPU::sub11_sub12,
178 AMDGPU::sub12_sub13, AMDGPU::sub13_sub14, AMDGPU::sub14_sub15,
179 AMDGPU::sub15_sub16, AMDGPU::sub16_sub17, AMDGPU::sub17_sub18,
180 AMDGPU::sub18_sub19, AMDGPU::sub19_sub20, AMDGPU::sub20_sub21,
181 AMDGPU::sub21_sub22, AMDGPU::sub22_sub23, AMDGPU::sub23_sub24,
182 AMDGPU::sub24_sub25, AMDGPU::sub25_sub26, AMDGPU::sub26_sub27,
183 AMDGPU::sub27_sub28, AMDGPU::sub28_sub29, AMDGPU::sub29_sub30,
184 AMDGPU::sub30_sub31, AMDGPU::NoSubRegister},
185 {AMDGPU::sub0_sub1_sub2, AMDGPU::sub1_sub2_sub3,
186 AMDGPU::sub2_sub3_sub4, AMDGPU::sub3_sub4_sub5,
187 AMDGPU::sub4_sub5_sub6, AMDGPU::sub5_sub6_sub7,
188 AMDGPU::sub6_sub7_sub8, AMDGPU::sub7_sub8_sub9,
189 AMDGPU::sub8_sub9_sub10, AMDGPU::sub9_sub10_sub11,
190 AMDGPU::sub10_sub11_sub12, AMDGPU::sub11_sub12_sub13,
191 AMDGPU::sub12_sub13_sub14, AMDGPU::sub13_sub14_sub15,
192 AMDGPU::sub14_sub15_sub16, AMDGPU::sub15_sub16_sub17,
193 AMDGPU::sub16_sub17_sub18, AMDGPU::sub17_sub18_sub19,
194 AMDGPU::sub18_sub19_sub20, AMDGPU::sub19_sub20_sub21,
195 AMDGPU::sub20_sub21_sub22, AMDGPU::sub21_sub22_sub23,
196 AMDGPU::sub22_sub23_sub24, AMDGPU::sub23_sub24_sub25,
197 AMDGPU::sub24_sub25_sub26, AMDGPU::sub25_sub26_sub27,
198 AMDGPU::sub26_sub27_sub28, AMDGPU::sub27_sub28_sub29,
199 AMDGPU::sub28_sub29_sub30, AMDGPU::sub29_sub30_sub31,
200 AMDGPU::NoSubRegister, AMDGPU::NoSubRegister},
201 {AMDGPU::sub0_sub1_sub2_sub3, AMDGPU::sub1_sub2_sub3_sub4,
202 AMDGPU::sub2_sub3_sub4_sub5, AMDGPU::sub3_sub4_sub5_sub6,
203 AMDGPU::sub4_sub5_sub6_sub7, AMDGPU::sub5_sub6_sub7_sub8,
204 AMDGPU::sub6_sub7_sub8_sub9, AMDGPU::sub7_sub8_sub9_sub10,
205 AMDGPU::sub8_sub9_sub10_sub11, AMDGPU::sub9_sub10_sub11_sub12,
206 AMDGPU::sub10_sub11_sub12_sub13, AMDGPU::sub11_sub12_sub13_sub14,
207 AMDGPU::sub12_sub13_sub14_sub15, AMDGPU::sub13_sub14_sub15_sub16,
208 AMDGPU::sub14_sub15_sub16_sub17, AMDGPU::sub15_sub16_sub17_sub18,
209 AMDGPU::sub16_sub17_sub18_sub19, AMDGPU::sub17_sub18_sub19_sub20,
210 AMDGPU::sub18_sub19_sub20_sub21, AMDGPU::sub19_sub20_sub21_sub22,
211 AMDGPU::sub20_sub21_sub22_sub23, AMDGPU::sub21_sub22_sub23_sub24,
212 AMDGPU::sub22_sub23_sub24_sub25, AMDGPU::sub23_sub24_sub25_sub26,
213 AMDGPU::sub24_sub25_sub26_sub27, AMDGPU::sub25_sub26_sub27_sub28,
214 AMDGPU::sub26_sub27_sub28_sub29, AMDGPU::sub27_sub28_sub29_sub30,
215 AMDGPU::sub28_sub29_sub30_sub31, AMDGPU::NoSubRegister,
216 AMDGPU::NoSubRegister, AMDGPU::NoSubRegister}};
229 MCRegister BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
230 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SGPR_128RegClass);
235 Reserved.set(AMDGPU::MODE);
239 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
240 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
243 reserveRegisterTuples(Reserved, AMDGPU::M0);
246 reserveRegisterTuples(Reserved, AMDGPU::SRC_VCCZ);
247 reserveRegisterTuples(Reserved, AMDGPU::SRC_EXECZ);
248 reserveRegisterTuples(Reserved, AMDGPU::SRC_SCC);
251 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_BASE);
252 reserveRegisterTuples(Reserved, AMDGPU::SRC_SHARED_LIMIT);
253 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_BASE);
254 reserveRegisterTuples(Reserved, AMDGPU::SRC_PRIVATE_LIMIT);
257 reserveRegisterTuples(Reserved, AMDGPU::SRC_POPS_EXITING_WAVE_ID);
260 reserveRegisterTuples(Reserved, AMDGPU::XNACK_MASK);
263 reserveRegisterTuples(Reserved, AMDGPU::LDS_DIRECT);
266 reserveRegisterTuples(Reserved, AMDGPU::TBA);
267 reserveRegisterTuples(Reserved, AMDGPU::TMA);
268 reserveRegisterTuples(Reserved, AMDGPU::TTMP0_TTMP1);
269 reserveRegisterTuples(Reserved, AMDGPU::TTMP2_TTMP3);
270 reserveRegisterTuples(Reserved, AMDGPU::TTMP4_TTMP5);
271 reserveRegisterTuples(Reserved, AMDGPU::TTMP6_TTMP7);
272 reserveRegisterTuples(Reserved, AMDGPU::TTMP8_TTMP9);
273 reserveRegisterTuples(Reserved, AMDGPU::TTMP10_TTMP11);
274 reserveRegisterTuples(Reserved, AMDGPU::TTMP12_TTMP13);
275 reserveRegisterTuples(Reserved, AMDGPU::TTMP14_TTMP15);
278 reserveRegisterTuples(Reserved, AMDGPU::SGPR_NULL);
283 Reserved.set(AMDGPU::VCC);
284 Reserved.set(AMDGPU::VCC_HI);
288 unsigned TotalNumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
290 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
295 unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
297 unsigned Reg = AMDGPU::VGPR_32RegClass.getRegister(i);
299 Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
303 for (auto Reg : AMDGPU::SReg_32RegClass) {
304 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
305 Register Low = getSubReg(Reg, AMDGPU::lo16);
307 if (!AMDGPU::SGPR_LO16RegClass.contains(Low))
311 for (auto Reg : AMDGPU::AGPR_32RegClass) {
312 Reserved.set(getSubReg(Reg, AMDGPU::hi16));
318 unsigned Reg = AMDGPU::AGPR_32RegClass.getRegister(i);
326 if (ScratchRSrcReg != AMDGPU::NoRegister) {
421 int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
422 AMDGPU::OpName::offset);
431 assert(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
432 AMDGPU::OpName::vaddr) &&
461 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), BaseReg)
467 Register OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
469 Register FIReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
471 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg)
473 BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), FIReg)
499 MachineOperand *FIOp = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
506 assert(TII->getNamedOperand(MI, AMDGPU::OpName::soffset)->getReg() ==
510 MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset);
534 return &AMDGPU::VGPR_32RegClass;
540 case AMDGPU::SI_SPILL_S1024_SAVE:
541 case AMDGPU::SI_SPILL_S1024_RESTORE:
542 case AMDGPU::SI_SPILL_V1024_SAVE:
543 case AMDGPU::SI_SPILL_V1024_RESTORE:
544 case AMDGPU::SI_SPILL_A1024_SAVE:
545 case AMDGPU::SI_SPILL_A1024_RESTORE:
547 case AMDGPU::SI_SPILL_S512_SAVE:
548 case AMDGPU::SI_SPILL_S512_RESTORE:
549 case AMDGPU::SI_SPILL_V512_SAVE:
550 case AMDGPU::SI_SPILL_V512_RESTORE:
551 case AMDGPU::SI_SPILL_A512_SAVE:
552 case AMDGPU::SI_SPILL_A512_RESTORE:
554 case AMDGPU::SI_SPILL_S256_SAVE:
555 case AMDGPU::SI_SPILL_S256_RESTORE:
556 case AMDGPU::SI_SPILL_V256_SAVE:
557 case AMDGPU::SI_SPILL_V256_RESTORE:
559 case AMDGPU::SI_SPILL_S192_SAVE:
560 case AMDGPU::SI_SPILL_S192_RESTORE:
561 case AMDGPU::SI_SPILL_V192_SAVE:
562 case AMDGPU::SI_SPILL_V192_RESTORE:
564 case AMDGPU::SI_SPILL_S160_SAVE:
565 case AMDGPU::SI_SPILL_S160_RESTORE:
566 case AMDGPU::SI_SPILL_V160_SAVE:
567 case AMDGPU::SI_SPILL_V160_RESTORE:
569 case AMDGPU::SI_SPILL_S128_SAVE:
570 case AMDGPU::SI_SPILL_S128_RESTORE:
571 case AMDGPU::SI_SPILL_V128_SAVE:
572 case AMDGPU::SI_SPILL_V128_RESTORE:
573 case AMDGPU::SI_SPILL_A128_SAVE:
574 case AMDGPU::SI_SPILL_A128_RESTORE:
576 case AMDGPU::SI_SPILL_S96_SAVE:
577 case AMDGPU::SI_SPILL_S96_RESTORE:
578 case AMDGPU::SI_SPILL_V96_SAVE:
579 case AMDGPU::SI_SPILL_V96_RESTORE:
581 case AMDGPU::SI_SPILL_S64_SAVE:
582 case AMDGPU::SI_SPILL_S64_RESTORE:
583 case AMDGPU::SI_SPILL_V64_SAVE:
584 case AMDGPU::SI_SPILL_V64_RESTORE:
585 case AMDGPU::SI_SPILL_A64_SAVE:
586 case AMDGPU::SI_SPILL_A64_RESTORE:
588 case AMDGPU::SI_SPILL_S32_SAVE:
589 case AMDGPU::SI_SPILL_S32_RESTORE:
590 case AMDGPU::SI_SPILL_V32_SAVE:
591 case AMDGPU::SI_SPILL_V32_RESTORE:
592 case AMDGPU::SI_SPILL_A32_SAVE:
593 case AMDGPU::SI_SPILL_A32_RESTORE:
601 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
602 return AMDGPU::BUFFER_STORE_DWORD_OFFSET;
603 case AMDGPU::BUFFER_STORE_BYTE_OFFEN:
604 return AMDGPU::BUFFER_STORE_BYTE_OFFSET;
605 case AMDGPU::BUFFER_STORE_SHORT_OFFEN:
606 return AMDGPU::BUFFER_STORE_SHORT_OFFSET;
607 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN:
608 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET;
609 case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN:
610 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET;
611 case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN:
612 return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET;
613 case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN:
614 return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET;
622 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
623 return AMDGPU::BUFFER_LOAD_DWORD_OFFSET;
624 case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN:
625 return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET;
626 case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN:
627 return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET;
628 case AMDGPU::BUFFER_LOAD_USHORT_OFFEN:
629 return AMDGPU::BUFFER_LOAD_USHORT_OFFSET;
630 case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN:
631 return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET;
632 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN:
633 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET;
634 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN:
635 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET;
636 case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN:
637 return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET;
638 case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN:
639 return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET;
640 case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN:
641 return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET;
642 case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN:
643 return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET;
644 case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN:
645 return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET;
646 case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN:
647 return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET;
666 if (Reg == AMDGPU::NoRegister)
675 unsigned Opc = (IsStore ^ TRI->isVGPR(MRI, Reg)) ? AMDGPU::V_ACCVGPR_WRITE_B32
676 : AMDGPU::V_ACCVGPR_READ_B32;
700 const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata);
707 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
708 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
718 AMDGPU::OpName::vdata_in);
749 unsigned NumSubRegs = AMDGPU::getRegBitWidth(RC->getID()) / (EltSize * CHAR_BIT);
758 hasAGPRs(RC) ? TII->getNamedOperand(*MI, AMDGPU::OpName::tmp)->getReg()
774 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0, false);
795 if (ScratchOffsetReg == AMDGPU::NoRegister) {
796 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), SOffset)
799 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
824 if (TmpReg != AMDGPU::NoRegister) {
826 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_ACCVGPR_READ_B32), TmpReg)
840 if (SOffset == AMDGPU::NoRegister) {
853 if (!IsStore && TmpReg != AMDGPU::NoRegister)
854 MIB = BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_ACCVGPR_WRITE_B32),
865 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), SOffset)
897 assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&
898 SuperReg != AMDGPU::EXEC && "exec should never spill");
902 bool OnlyExecLo = isWave32 || NumSubRegs == 1 || SuperReg == AMDGPU::EXEC_HI;
904 unsigned ExecMovOpc = OnlyExecLo ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
905 Register ExecReg = OnlyExecLo ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
917 getSubReg(SuperReg, SplitParts[FirstPart + ExecLane]), AMDGPU::sub0,
918 &AMDGPU::SReg_64_XEXECRegClass);
945 buildSpillLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
952 buildSpillLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET, Index, VGPR,
969 BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
974 BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
1007 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
1008 assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&
1009 SuperReg != AMDGPU::EXEC && "exec should never spill");
1033 TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
1046 Register TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1068 TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
1116 assert(SuperReg != AMDGPU::M0 && "m0 should never spill");
1117 assert(SuperReg != AMDGPU::EXEC_LO && SuperReg != AMDGPU::EXEC_HI &&
1118 SuperReg != AMDGPU::EXEC && "exec should never spill");
1134 BuildMI(*MBB, MI, DL, TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
1142 Register TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1164 TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32), SubReg)
1185 case AMDGPU::SI_SPILL_S1024_SAVE:
1186 case AMDGPU::SI_SPILL_S512_SAVE:
1187 case AMDGPU::SI_SPILL_S256_SAVE:
1188 case AMDGPU::SI_SPILL_S192_SAVE:
1189 case AMDGPU::SI_SPILL_S160_SAVE:
1190 case AMDGPU::SI_SPILL_S128_SAVE:
1191 case AMDGPU::SI_SPILL_S96_SAVE:
1192 case AMDGPU::SI_SPILL_S64_SAVE:
1193 case AMDGPU::SI_SPILL_S32_SAVE:
1195 case AMDGPU::SI_SPILL_S1024_RESTORE:
1196 case AMDGPU::SI_SPILL_S512_RESTORE:
1197 case AMDGPU::SI_SPILL_S256_RESTORE:
1198 case AMDGPU::SI_SPILL_S192_RESTORE:
1199 case AMDGPU::SI_SPILL_S160_RESTORE:
1200 case AMDGPU::SI_SPILL_S128_RESTORE:
1201 case AMDGPU::SI_SPILL_S96_RESTORE:
1202 case AMDGPU::SI_SPILL_S64_RESTORE:
1203 case AMDGPU::SI_SPILL_S32_RESTORE:
1231 case AMDGPU::SI_SPILL_S1024_SAVE:
1232 case AMDGPU::SI_SPILL_S512_SAVE:
1233 case AMDGPU::SI_SPILL_S256_SAVE:
1234 case AMDGPU::SI_SPILL_S192_SAVE:
1235 case AMDGPU::SI_SPILL_S160_SAVE:
1236 case AMDGPU::SI_SPILL_S128_SAVE:
1237 case AMDGPU::SI_SPILL_S96_SAVE:
1238 case AMDGPU::SI_SPILL_S64_SAVE:
1239 case AMDGPU::SI_SPILL_S32_SAVE: {
1245 case AMDGPU::SI_SPILL_S1024_RESTORE:
1246 case AMDGPU::SI_SPILL_S512_RESTORE:
1247 case AMDGPU::SI_SPILL_S256_RESTORE:
1248 case AMDGPU::SI_SPILL_S192_RESTORE:
1249 case AMDGPU::SI_SPILL_S160_RESTORE:
1250 case AMDGPU::SI_SPILL_S128_RESTORE:
1251 case AMDGPU::SI_SPILL_S96_RESTORE:
1252 case AMDGPU::SI_SPILL_S64_RESTORE:
1253 case AMDGPU::SI_SPILL_S32_RESTORE: {
1259 case AMDGPU::SI_SPILL_V1024_SAVE:
1260 case AMDGPU::SI_SPILL_V512_SAVE:
1261 case AMDGPU::SI_SPILL_V256_SAVE:
1262 case AMDGPU::SI_SPILL_V160_SAVE:
1263 case AMDGPU::SI_SPILL_V128_SAVE:
1264 case AMDGPU::SI_SPILL_V96_SAVE:
1265 case AMDGPU::SI_SPILL_V64_SAVE:
1266 case AMDGPU::SI_SPILL_V32_SAVE:
1267 case AMDGPU::SI_SPILL_A1024_SAVE:
1268 case AMDGPU::SI_SPILL_A512_SAVE:
1269 case AMDGPU::SI_SPILL_A128_SAVE:
1270 case AMDGPU::SI_SPILL_A64_SAVE:
1271 case AMDGPU::SI_SPILL_A32_SAVE: {
1273 AMDGPU::OpName::vdata);
1274 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==
1277 buildSpillLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
1280 TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
1282 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1289 case AMDGPU::SI_SPILL_V32_RESTORE:
1290 case AMDGPU::SI_SPILL_V64_RESTORE:
1291 case AMDGPU::SI_SPILL_V96_RESTORE:
1292 case AMDGPU::SI_SPILL_V128_RESTORE:
1293 case AMDGPU::SI_SPILL_V160_RESTORE:
1294 case AMDGPU::SI_SPILL_V256_RESTORE:
1295 case AMDGPU::SI_SPILL_V512_RESTORE:
1296 case AMDGPU::SI_SPILL_V1024_RESTORE:
1297 case AMDGPU::SI_SPILL_A32_RESTORE:
1298 case AMDGPU::SI_SPILL_A64_RESTORE:
1299 case AMDGPU::SI_SPILL_A128_RESTORE:
1300 case AMDGPU::SI_SPILL_A512_RESTORE:
1301 case AMDGPU::SI_SPILL_A1024_RESTORE: {
1303 AMDGPU::OpName::vdata);
1304 assert(TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)->getReg() ==
1307 buildSpillLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
1310 TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)->getReg(),
1312 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(),
1328 bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32;
1331 : RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1336 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ResultReg)
1344 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64),
1349 const bool IsVOP2 = MIB->getOpcode() == AMDGPU::V_ADD_U32_e32;
1352 if (IsVOP2 || AMDGPU::isInlinableLiteral32(Offset, ST.hasInv2PiInlineImm())) {
1359 assert(MIB->getOpcode() == AMDGPU::V_ADD_I32_e64 &&
1365 ConstOffsetReg = getSubReg(MIB.getReg(1), AMDGPU::sub0);
1369 BuildMI(*MBB, *MIB, DL, TII->get(AMDGPU::S_MOV_B32), ConstOffsetReg)
1383 RS->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass, MI, 0, false);
1386 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHR_B32), ScaledReg)
1389 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), ScaledReg)
1392 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), ResultReg)
1397 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), ScaledReg)
1400 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_LSHL_B32), ScaledReg)
1418 AMDGPU::getNamedOperandIdx(MI->getOpcode(),
1419 AMDGPU::OpName::vaddr));
1421 auto &SOffset = *TII->getNamedOperand(*MI, AMDGPU::OpName::soffset);
1426 if (FrameReg == AMDGPU::NoRegister) {
1435 = TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm();
1451 Register TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0);
1452 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
1467 return &AMDGPU::VReg_1RegClass;
1469 return &AMDGPU::VGPR_LO16RegClass;
1471 return &AMDGPU::VGPR_32RegClass;
1473 return &AMDGPU::VReg_64RegClass;
1475 return &AMDGPU::VReg_96RegClass;
1477 return &AMDGPU::VReg_128RegClass;
1479 return &AMDGPU::VReg_160RegClass;
1481 return &AMDGPU::VReg_192RegClass;
1483 return &AMDGPU::VReg_256RegClass;
1485 return &AMDGPU::VReg_512RegClass;
1487 return &AMDGPU::VReg_1024RegClass;
1495 return &AMDGPU::AGPR_LO16RegClass;
1497 return &AMDGPU::AGPR_32RegClass;
1499 return &AMDGPU::AReg_64RegClass;
1501 return &AMDGPU::AReg_96RegClass;
1503 return &AMDGPU::AReg_128RegClass;
1505 return &AMDGPU::AReg_160RegClass;
1507 return &AMDGPU::AReg_192RegClass;
1509 return &AMDGPU::AReg_256RegClass;
1511 return &AMDGPU::AReg_512RegClass;
1513 return &AMDGPU::AReg_1024RegClass;
1521 return &AMDGPU::SGPR_LO16RegClass;
1523 return &AMDGPU::SReg_32RegClass;
1525 return &AMDGPU::SReg_64RegClass;
1527 return &AMDGPU::SGPR_96RegClass;
1529 return &AMDGPU::SGPR_128RegClass;
1531 return &AMDGPU::SGPR_160RegClass;
1533 return &AMDGPU::SGPR_192RegClass;
1535 return &AMDGPU::SGPR_256RegClass;
1537 return &AMDGPU::SGPR_512RegClass;
1539 return &AMDGPU::SGPR_1024RegClass;
1549 &AMDGPU::VGPR_LO16RegClass,
1550 &AMDGPU::VGPR_HI16RegClass,
1551 &AMDGPU::SReg_LO16RegClass,
1552 &AMDGPU::AGPR_LO16RegClass,
1553 &AMDGPU::VGPR_32RegClass,
1554 &AMDGPU::SReg_32RegClass,
1555 &AMDGPU::AGPR_32RegClass,
1556 &AMDGPU::VReg_64RegClass,
1557 &AMDGPU::SReg_64RegClass,
1558 &AMDGPU::AReg_64RegClass,
1559 &AMDGPU::VReg_96RegClass,
1560 &AMDGPU::SReg_96RegClass,
1561 &AMDGPU::AReg_96RegClass,
1562 &AMDGPU::VReg_128RegClass,
1563 &AMDGPU::SReg_128RegClass,
1564 &AMDGPU::AReg_128RegClass,
1565 &AMDGPU::VReg_160RegClass,
1566 &AMDGPU::SReg_160RegClass,
1567 &AMDGPU::AReg_160RegClass,
1568 &AMDGPU::VReg_192RegClass,
1569 &AMDGPU::SReg_192RegClass,
1570 &AMDGPU::AReg_192RegClass,
1571 &AMDGPU::VReg_256RegClass,
1572 &AMDGPU::SReg_256RegClass,
1573 &AMDGPU::AReg_256RegClass,
1574 &AMDGPU::VReg_512RegClass,
1575 &AMDGPU::SReg_512RegClass,
1576 &AMDGPU::AReg_512RegClass,
1577 &AMDGPU::SReg_1024RegClass,
1578 &AMDGPU::VReg_1024RegClass,
1579 &AMDGPU::AReg_1024RegClass,
1580 &AMDGPU::SCC_CLASSRegClass,
1581 &AMDGPU::Pseudo_SReg_32RegClass,
1582 &AMDGPU::Pseudo_SReg_128RegClass,
1598 return getCommonSubClass(&AMDGPU::VGPR_LO16RegClass, RC) != nullptr ||
1599 getCommonSubClass(&AMDGPU::VGPR_HI16RegClass, RC) != nullptr;
1641 return &AMDGPU::SGPR_32RegClass;
1649 if (SubIdx == AMDGPU::NoSubRegister)
1656 RC = &AMDGPU::SGPR_32RegClass;
1669 if (OpType >= AMDGPU::OPERAND_REG_INLINE_AC_FIRST &&
1670 OpType <= AMDGPU::OPERAND_REG_INLINE_AC_LAST)
1673 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
1674 OpType <= AMDGPU::OPERAND_SRC_LAST;
1703 /// AMDGPU::NoRegister. If \p ReserveHighestVGPR = true, then return
1723 const unsigned RegBitWidth = AMDGPU::getRegBitWidth(*RC->MC);
1787 case AMDGPU::VGPR_32RegClassID:
1788 case AMDGPU::VGPR_LO16RegClassID:
1789 case AMDGPU::VGPR_HI16RegClassID:
1791 case AMDGPU::SGPR_32RegClassID:
1792 case AMDGPU::SGPR_LO16RegClassID:
1799 if (Idx == AMDGPU::RegisterPressureSets::VGPR_32 ||
1800 Idx == AMDGPU::RegisterPressureSets::AGPR_32)
1801 return getRegPressureLimit(&AMDGPU::VGPR_32RegClass,
1804 if (Idx == AMDGPU::RegisterPressureSets::SReg_32)
1805 return getRegPressureLimit(&AMDGPU::SGPR_32RegClass,
1822 return AMDGPU::SGPR30_SGPR31;
1830 case AMDGPU::VGPRRegBankID:
1832 case AMDGPU::VCCRegBankID:
1834 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
1835 : &AMDGPU::SReg_64_XEXECRegClass;
1836 case AMDGPU::SGPRRegBankID:
1838 case AMDGPU::AGPRRegBankID:
1857 return isWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
1863 case AMDGPU::SReg_1RegClassID:
1865 case AMDGPU::SReg_1_XEXECRegClassID:
1866 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
1867 : &AMDGPU::SReg_64_XEXECRegClass;
1932 for (const TargetRegisterClass &RC : { AMDGPU::VGPR_32RegClass,
1933 AMDGPU::SReg_32RegClass,
1934 AMDGPU::AGPR_32RegClass } ) {
1935 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::lo16, &RC))
1938 if (MCPhysReg Super = getMatchingSuperReg(Reg, AMDGPU::hi16,
1939 &AMDGPU::VGPR_32RegClass)) {
1943 return AMDGPU::NoRegister;
1948 case AMDGPU::SGPR_NULL:
1949 case AMDGPU::SRC_SHARED_BASE:
1950 case AMDGPU::SRC_PRIVATE_BASE:
1951 case AMDGPU::SRC_SHARED_LIMIT:
1952 case AMDGPU::SRC_PRIVATE_LIMIT:
1961 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
1967 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), ST.getMaxNumSGPRs(MF));
1972 return makeArrayRef(AMDGPU::VGPR_32RegClass.begin(), ST.getMaxNumVGPRs(MF));