• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/contrib/llvm-project/llvm/lib/Target/AMDGPU/

Lines Matching refs:TII

89   const SIInstrInfo *TII;
134 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
138 if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
159 const MCInstrDesc &MadDesc = TII->get(Opc);
160 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
171 static bool frameIndexMayFold(const SIInstrInfo *TII,
176 (TII->isMUBUF(UseMI) || TII->isFLATScratch(UseMI)) &&
185 const SIInstrInfo &TII,
215 switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
259 MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
262 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
275 MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
278 TII.commuteInstruction(*Inst32, false);
334 const SIInstrInfo *TII) {
335 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
351 MI->setDesc(TII->get(NewOpc));
352 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
357 MI->setDesc(TII->get(Opc));
362 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
379 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
398 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
401 if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
413 !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
426 TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
436 if (TII->isSALU(MI->getOpcode())) {
439 const SIRegisterInfo &SRI = TII->getRegisterInfo();
444 !TII->isInlineConstant(*OpToFold, OpInfo)) {
449 TII->isLiteralConstantLike(Op, OpInfo)) {
463 static bool isUseSafeToFold(const SIInstrInfo *TII,
466 return !UseMO.isUndef() && !TII->isSDWA(MI);
476 const SIInstrInfo *TII, const MachineRegisterInfo &MRI) {
487 TII->isFoldableCopy(*SubDef);
491 if (TII->isInlineConstant(*Op, OpTy))
506 static bool tryToFoldACImm(const SIInstrInfo *TII,
521 if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) &&
522 TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) {
540 if (!getRegSeqInit(Defs, UseReg, OpTy, TII, MRI))
552 if (!TII->isInlineConstant(*Op, OpTy) ||
553 !TII->isOperandLegal(*UseMI, UseOpIdx, Op))
574 if (!isUseSafeToFold(TII, *UseMI, UseOp))
598 if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI,
612 if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList))
615 if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
619 if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
625 *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
677 TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
678 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
687 unsigned MovOp = TII->getMovOpcode(DestRC);
691 UseMI->setDesc(TII->get(MovOp));
706 unsigned Size = TII->getOpSize(*UseMI, 1);
720 getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32, TII,
725 UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE));
736 TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
741 TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addImm(Imm);
767 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def);
778 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def);
783 TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addReg(Vgpr);
797 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
800 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32));
820 UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
843 UseMI->setDesc(TII->get(AMDGPU::COPY));
863 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
893 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
899 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
1013 const SIInstrInfo *TII,
1020 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
1045 bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg();
1049 MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32));
1062 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1069 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
1088 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1092 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
1105 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
1109 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1123 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1132 static bool tryFoldInst(const SIInstrInfo *TII,
1139 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
1140 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
1148 TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
1198 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
1225 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
1227 } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) {
1272 if (updateOperand(Fold, *TII, *TRI, *ST)) {
1284 tryFoldInst(TII, Fold.UseMI);
1287 TII->commuteInstruction(*Fold.UseMI, false);
1301 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
1305 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1306 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1314 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1318 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
1320 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
1357 if (TII->getClampMask(*Def) != TII->getClampMask(MI))
1360 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
1421 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1422 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1434 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1435 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1436 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1437 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1450 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1451 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1455 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1456 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1457 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1458 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1479 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1485 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1502 TII = ST->getInstrInfo();
1503 TRI = &TII->getRegisterInfo();
1521 tryFoldInst(TII, &MI);
1523 if (!TII->isFoldableCopy(MI)) {