• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/contrib/llvm-project/llvm/lib/Target/AMDGPU/

Lines Matching refs:TII

71 static bool foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
73 assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI));
116 if (TII->commuteInstruction(MI)) {
117 if (foldImmediates(MI, TII, MRI, false))
121 TII->commuteInstruction(MI);
128 static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
130 !TII->isInlineConstant(*Src.getParent(),
134 static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
136 !TII->isInlineConstant(*Src.getParent(),
140 static bool isKImmOrKUImmOperand(const SIInstrInfo *TII,
145 return !TII->isInlineConstant(Src);
150 return !TII->isInlineConstant(Src);
158 static bool isReverseInlineImm(const SIInstrInfo *TII,
161 if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src))
182 static void shrinkScalarCompare(const SIInstrInfo *TII, MachineInstr &MI) {
186 TII->commuteInstruction(MI, false, 0, 1);
205 if (isKImmOrKUImmOperand(TII, Src1, HasUImm)) {
211 MI.setDesc(TII->get(SOPKOpc));
217 const MCInstrDesc &NewDesc = TII->get(SOPKOpc);
219 if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(TII, Src1)) ||
220 (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(TII, Src1))) {
233 const SIInstrInfo *TII = ST.getInstrInfo();
234 const SIRegisterInfo &TRI = TII->getRegisterInfo();
300 MI.setDesc(TII->get(NewOpcode));
322 const SIInstrInfo *TII,
365 if (!TII->commuteInstruction(MI, false, 1, 2))
377 MI.setDesc(TII->get(Opc));
461 const SIInstrInfo *TII) {
474 unsigned Size = TII->getOpSize(MovT, 0) / 4;
476 const SIRegisterInfo &TRI = TII->getRegisterInfo();
538 TII->get(AMDGPU::V_SWAP_B32))
564 const SIInstrInfo *TII = ST.getInstrInfo();
591 if (isReverseInlineImm(TII, Src, ReverseImm)) {
592 MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
601 if (auto *NextMI = matchSwap(MI, MRI, TII)) {
648 if (TII->commuteInstruction(MI, false, 1, 2))
662 if (Src1->isImm() && isKImmOperand(TII, *Src1)) {
666 MI.setDesc(TII->get(Opc));
673 if (MI.isCompare() && TII->isSOPC(MI)) {
674 shrinkScalarCompare(TII, MI);
685 if (isKImmOperand(TII, Src))
686 MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
687 else if (isReverseInlineImm(TII, Src, ReverseImm)) {
688 MI.setDesc(TII->get(AMDGPU::S_BREV_B32));
700 if (shrinkScalarLogicOp(ST, MRI, TII, MI))
704 if (TII->isMIMG(MI.getOpcode()) &&
712 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
715 if (!TII->canShrink(MI, MRI)) {
718 if (!MI.isCommutable() || !TII->commuteInstruction(MI) ||
719 !TII->canShrink(MI, MRI))
725 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
730 if (TII->isVOPC(Op32)) {
752 TII->getNamedOperand(MI, AMDGPU::OpName::src2);
765 const MachineOperand *SDst = TII->getNamedOperand(MI,
769 const MachineOperand *Src2 = TII->getNamedOperand(MI,
796 MachineInstr *Inst32 = TII->buildShrunkInst(MI, Op32);
803 foldImmediates(*Inst32, TII, MRI);